gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Orchestrates the deployment of complete systems.
Usage: orchestrate systems deploy [OPTIONS] <SYSTEM,...>
"""
import logging
import optparse
import os
import pkgutil
import subprocess
import orchestrate
from orchestrate import base
log = logging.getLogger(__name__)
def run(command, pii=False):
"""Runs given system command.
Args:
command: Command to run.
pii: Logs command redacted to prevent any PII from leaking in plain text.
By default it logs the command verbatim. Please make sure to set this
to True if the command being executed contains passwords or any other
sensitive information.
"""
message = command if not pii else '[redacted due to pii]'
log.debug('Executing: %(message)s', dict(message=message))
subprocess.call(command, shell=True)
def find_valid_systems(path):
"""Returns valid systems in the given path.
A valid system is either a module, or a non-empty package.
Args:
path: Path to package to introspect.
Returns:
A dict that includes the module loader indexed by system name.
"""
systems = dict()
for module_info in pkgutil.walk_packages([path]):
name = module_info.name.replace('_', '-')
if not module_info.ispkg:
systems[name] = module_info
else:
submodule_path = os.path.sep.join([path, module_info.name])
for _ in pkgutil.walk_packages([submodule_path]):
# Add module (not submodule) if it contains at least one valid submodule
systems[name] = module_info
break
return systems
def find_all_valid_systems():
"""Returns dict of all valid systems."""
directory = os.path.dirname(orchestrate.__file__)
path = os.path.abspath(os.path.sep.join([directory, 'systems']))
systems = find_valid_systems(path)
return systems
class ExecutionOptions:
def __init__(self, options):
self.__dict__.update(options)
SYSTEMS = find_all_valid_systems()
class Command(base.OrchestrateCommand):
"""Orchestrates the deployment of various complete systems."""
system_classes = dict()
@property
def description(self):
system_names = ['- ' + name for name in sorted(SYSTEMS)]
system_names = '\n'.join(system_names)
return """Orchestrates the deployment of various complete systems.
Available systems:
{systems}
Sample usage:
1. Deploy an Elastfile cluster and a Virtual Studio layout with the prefix vfx
using the default values for VPCs, DNS Zones, etc.:
orchestrate systems deploy elastifile virtual-studio --prefix=vfx
2. Deploy an Elastifile cluster and a Virtual Studio layout with the prefix
animation. Uses a custom IP range for the Elastifile cluster and its
load balancer virtual IP:
orchestrate systems deploy elastifile virtual-studio --prefix=animation \\
--elastifile=cidr=172.16.1.0/24,ip=172.16.2.1
""".format(systems=system_names)
@property
def options(self):
"""Returns command parser options."""
options = [
optparse.Option('--help-system', action='store_true', help=(
'Displays help for the deployment scripts for selected systems.')),
optparse.Option('-x', '--prefix', default='', help=(
'Prefix to use for orchestrated resources')),
optparse.Option(
'-d', '--dry-run', action='store_true', default=False, help=(
'Show what it would run but do not actually run it.')),
]
for system in SYSTEMS:
options.append(optparse.Option('--' + system.replace('_', '-')))
return options
def run(self, options, arguments):
"""Executes command.
Args:
options: Command-line options for all systems organized by system name.
arguments: Command-line positional arguments
Returns:
True if successful. False, otherwise.
"""
log.debug('deploy %(options)s %(arguments)s', dict(
options=options, arguments=arguments))
system_names = arguments
if not system_names:
log.info('Please specify systems to deploy. See --help for more'
' information.')
return False
try:
unknown_systems = set(system_names).difference(set(SYSTEMS))
if unknown_systems:
log.error(
'The following systems are not available: %s.'
' See --help for list of available systems.',
', '.join(list(unknown_systems)))
return False
# Initialize systems with their options provided from the command-line
systems = self.initialize_systems(options, system_names)
# Deploy one at a time
for system_name, system in systems.items():
log.info('Deploying %s', system_name)
if options.help_system:
log.info(system.usage)
else:
system.run()
except TypeError:
log.exception('Unexpected error deploying system')
return False
return True
def load_system(self, name, force=False):
"""Locate and instantiate first instance of OrchestrateSystem.
It caches the loaded class to expedite multiple calls for the same system.
Args:
name: Name of module to load.
force: Uses cached class previously loaded for performance reasons when
set to False (default). Locate module and load it otherwise.
Returns:
An instance of OrchestrateSystem.
Raises:
TypeError if no OrchestrateSystem subclass is found.
"""
system_type = self.system_classes.get(name)
if not system_type or force:
try:
# Locate and load module
module_info = SYSTEMS[name]
module_name = name.replace('-', '_')
loader = module_info.module_finder.find_module(module_name)
module = loader.load_module(module_name)
# Locate and load OrchestrateSystem instance
for _, class_type in module.__dict__.items():
if isinstance(class_type, type) and \
issubclass(class_type, base.OrchestrateSystem):
system_type = class_type
# Cache to expedite subsequent calls for the same system
self.system_classes[name] = system_type
break
if not system_type:
raise TypeError()
except TypeError:
log.error('Could not find implementation of OrchestrateSystem %s', name)
raise
# Instantiate system
system = system_type()
return system
def initialize_systems(self, options, system_names):
"""Consolidate options for all systems and override defaults for each one.
Args:
options: Command-line options as parsed by the option parser.
system_names: Names of the systems to load and initialize.
Returns:
A dict with OrchestrateSystems instances indexed by system name.
"""
all_systems = dict()
for system_name in SYSTEMS:
all_systems[system_name] = self.load_system(system_name)
# Get options applicable to all systems excluding select options intended
# for Orchestrate itself and those that match the name of as supported
# systems.
global_options = dict()
filtered_options = ['api_key', 'api_host', 'api_project', 'help_system'] + \
list(SYSTEMS.keys())
all_option_names = vars(options).keys()
global_option_names = set(all_option_names).difference(filtered_options)
for global_option_name in global_option_names:
global_options[global_option_name] = getattr(options, global_option_name)
# Add a namespace per system name
# Split the system-specific key=value,... lists
# e.g. turn: --system1=one=1,two=2,three=3
# into: system1=dict(one=1, two=2, three=3)
consolidated_options = dict()
for system_name, system in all_systems.items():
# override system's default values from provided command-line options
system_options = dict(system.defaults if system else dict())
provided_options = getattr(options, system_name.replace('-', '_'))
if provided_options:
for provided_option in provided_options.split(','):
key, value = provided_option.split('=')
key = key.replace('-', '_')
system_options[key] = value
consolidated_options[system_name] = system_options
# hydrate each system data members from options explicitly provided for
# the system via the command-line plus global options
for system_name, system in all_systems.items():
vars(system).update(consolidated_options.get(system_name, dict()))
vars(system).update(global_options)
system.name = system_name
system.others = dict(
(name, options) for name, options in consolidated_options.items()
if name != system_name)
systems = dict()
for system_name, system in all_systems.items():
system.configure()
if system_name in system_names:
systems[system_name] = system
return systems
| |
import matplotlib,numpy
matplotlib.use('Agg')
from .retrieve import time_convert
from astropy.units import Quantity
from matplotlib import pyplot
from gwpy.table import EventTable
from gwpy.plotter import SegmentPlot,TimeSeriesPlot
from gwpy.plotter import FrequencySeriesPlot,SpectrogramPlot
from gwpy.segments import SegmentList
from gwpy.frequencyseries import FrequencySeries
from gwpy.spectrogram import Spectrogram
from gwpy.timeseries import TimeSeries
from pylab import *
from scipy import signal
def plot_activity(full_seglist):
"""
Plot full activity period for station.
Parameters
----------
full_seglist : dictionary
Continuous list of available data in the selected time period
"""
# Import gwpy tools
plot = SegmentPlot()
# Initialize plotting figure
ax = plot.gca()
# Plot all segment in figure
ax.plot(full_seglist)
# Save figure
pyplot.savefig("activity.png",dpi=300)
def plot_asd(station,data):
"""
Plot Amplitude Spectral Density. AGG complexity starts to complain
with large numbers of points. And we somehow invoke precision issues
that need to be ameliorated.
"""
if station!='fake':
for d in data:
d.x0 = Quantity(int(d.x0.value * 500), d.xunit)
d.dx = Quantity(1, d.xunit)
data.coalesce()
for d in data:
d.x0 = Quantity(d.x0.value / 500, d.xunit)
d.dx = Quantity(0.002, d.xunit)
# Initialize plotting functionality
plot = FrequencySeriesPlot()
# Loop over all the time series
for d in data:
# Generate 8 seconds per FFT with 4 second (50%) overlap
spectrum = d.asd(8, 4)
# Create plotting axis
ax = plot.gca()
# Plot square root of the spectrum
ax.plot(numpy.sqrt(spectrum))
# Set x axis to log scale
ax.set_xscale('log')
ax.set_xlabel('Frequency [Hz]')
# Set y axis to log scale
ax.set_yscale('log')
ax.set_ylabel('Amplitude [pT]')
# Set x axis limits
ax.set_xlim(1e-1, 500)
import matplotlib.ticker as ticker
x = ax.get_xticklabels()
def myticks(x,pos):
if x == 0: return "$0$"
exponent = int(np.log10(x))
coeff = x/10**exponent
if coeff==1:
return r"$10^{{ {:2d} }}$".format(exponent)
else:
return r"${:2.0f} \times 10^{{ {:2d} }}$".format(coeff,
exponent)
ax.xaxis.set_major_formatter(ticker.FuncFormatter(myticks))
# Save figure
plot.savefig("asd.png",dpi=300)
def plot_time_series(data,station='station-name',t0=None,t1=None,seglist=None,burst=None,fname='time_series'):
"""
Generate a plot of the whole data time series
"""
if type(data[0])==float64:
data = [TimeSeries(data,sample_rate=data.sample_rate,epoch=data.start_time)]
plot = TimeSeriesPlot()
ax = plot.gca()
# Loop over all the time series
for ts in data:
# Plot time series for each segment
ax.plot(ts, color='black')
# Display title
ax.set_title('$\mathrm{'+station+'}$')
ax.set_ylabel('Magnetic Field')
# Plot fake signal
if burst is not None:
ax.plot(burst, color='red')
# Plot activity segments
if seglist!=None:
activity = SegmentList(seglist[station].active)
plotargs = {'label':'data present','facecolor':'g','edgecolor':'k'}
plot.add_state_segments(activity,plotargs=plotargs)
# Set limits
if t0 is not None and t1 is not None:
t0,t1 = time_convert(t0,t1)
plot.axes[0].set_epoch(t0)
plot.axes[1].set_epoch(t0)
ax.set_xlim(t0,t1)
# Fix exceeded cell block limit error
matplotlib.pyplot.rcParams['agg.path.chunksize'] = 20000
# Save figure
plot.savefig('%s.png'%fname)
def plot_whitening(data,station='station-name',t0=None,t1=None,stride=20,fftlength=6,overlap=3,seglist=None):
"""
Generate a spectrogram plot and normalized spectrogram
norm: \sqrt{S(f,t)} / \sqrt{\overbar{S(f)}}
"""
if type(data[0])==float64:
data = [TimeSeries(data,sample_rate=data.sample_rate,epoch=data.start_time)]
# Setup plots
plot = SpectrogramPlot()
ax = plot.gca()
white_plot = SpectrogramPlot()
wax = white_plot.gca()
# Loop through available time series
for ts in data:
if (len(ts) * ts.dt).value < stride:
continue
spec = ts.spectrogram(stride,fftlength=fftlength,overlap=overlap)
wspec = spec.ratio('median')
ax.plot(spec, cmap='jet')
wax.plot(wspec, vmin=0.1, vmax=100,cmap='jet')
# Define y axis and title
ax.set_title('$\mathrm{'+station+'}$')
ax.set_ylim(0.1, ts.sample_rate.value/2.)
ax.set_yscale('log')
wax.set_title('$\mathrm{'+station+'}$')
wax.set_ylim(0.1, ts.sample_rate.value/2.)
wax.set_yscale('log')
plot.add_colorbar(label='Amplitude')
white_plot.add_colorbar(label='Amplitude')
# Plot activity panels for real data
if seglist!=None:
activity = SegmentList(seglist[station].active)
plotargs = {'label':'data present','facecolor':'g','edgecolor':'k'}
plot.add_state_segments(activity,plotargs=plotargs)
white_plot.add_state_segments(activity,plotargs=plotargs)
# Set plotting limits of x axis if edges defined
if t0!=None and t1!=None:
t0,t1 = time_convert(t0,t1)
plot.axes[0].set_epoch(t0)
plot.axes[2].set_epoch(t0)
white_plot.axes[0].set_epoch(t0)
white_plot.axes[2].set_epoch(t0)
ax.set_xlim(t0,t1)
wax.set_xlim(t0,t1)
# Save figures
plot.savefig("spectrogram.png",dpi=300)
white_plot.savefig("whitened.png",dpi=300)
def plot_triggers(filename='excesspower.xml.gz',fname='triggers.png'):
events = EventTable.read(filename,format='ligolw.sngl_burst')
#plot = events.plot('time','central_freq','duration','bandwidth',color='snr')
time = events['peak_time'] + events['peak_time_ns'] * 1e-9
events.add_column(events['peak_time'] + events['peak_time_ns'] * 1e-9, name='time')
plot = events.plot('time','central_freq',color='snr',edgecolor='none')
plot.axes[0].set_epoch(int(min(time)))
plot.set_xlim((int(min(time)),round(max(time))))
plot.set_ylabel('Frequency [Hz]')
plot.set_yscale('log')
#plot.set_title('GNOME '+station+' station event triggers')
plot.add_colorbar(cmap='copper_r',label='Tile Energy')
pyplot.savefig(fname,dpi=300)
def plot_bank(fdb):
pyplot.figure()
for i, fdt in enumerate(fdb):
if i==2:
pyplot.plot(fdt.frequencies, fdt, 'k-')
break
pyplot.grid()
#xmin = fdb[0].frequencies[0].value
#xmax = fdb[-1].frequencies[-1].value
#pyplot.xlim([xmin,xmax])
pyplot.xlabel("frequency [Hz]")
pyplot.savefig('bank.png',dpi=300)
pyplot.close()
def plot_filters(tdb,fmin,band):
pyplot.figure()
pyplot.subplots_adjust(left=0.2,right=0.95,bottom=0.15,top=0.95,hspace=0,wspace=1)
for i, tdt in enumerate(tdb[:8:3]):
ax = pyplot.subplot(3, 1, i+1)
ax.plot(tdt.times.value - 2., numpy.real_if_close(tdt.value), 'k-')
c_f = fmin + band/2 + 3 * (band*i) + 2.
ax.set_xlabel("Time (s)")
ax.set_ylabel("%d Hz" % c_f)
ax.set_xlim(25.0, 31.0)
ax.set_ylim([-max(tdt.value), max(tdt.value)])
#if i!=2: pyplot.setp(ax.get_xticklabels(), visible=False)
pyplot.savefig('filters.png',dpi=300)
pyplot.close()
def plot_ts(ts, fname="ts.png"):
plot = TimeSeriesPlot()
ax = plot.gca()
ax.plot(TimeSeries(ts, sample_rate=1.0/ts.delta_t, epoch=ts.start_time))
ax.set_xlim(ts.start_time,ts.end_time)
pyplot.savefig(fname)
pyplot.close()
def plot_spectrum(fd_psd):
plot = FrequencySeriesPlot()
ax = plot.gca()
ax.plot(FrequencySeries(fd_psd, df=fd_psd.delta_f))
#pyplot.ylim(1e-10, 1e-3)
pyplot.xlim(0.1, 500)
pyplot.loglog()
pyplot.savefig("psd.png",dpi=300)
pyplot.close()
def plot_spectrogram(spec,dt,df,ymax,t0,t1,fname="specgram.png"):
plot = SpectrogramPlot()
ax = plot.gca()
ax.plot(Spectrogram(spec,dt=dt,df=df,epoch=float(t0)),cmap='viridis')
plot.add_colorbar(label='Amplitude')
pyplot.xlim(t0,t1)
pyplot.ylim(0,ymax)
pyplot.savefig(fname)#,dpi=300)
pyplot.close()
def plot_tiles_ts(tdb,ndof,df,sample_rate,t0,t1,fname="tiles.png"):
fig = TimeSeriesPlot(figsize=(12,12))
fig.suptitle('%i channels, %i Hz bandwidth, %i DOF'%(len(tdb),df,ndof))
plt.subplots_adjust(left=0.03, right=0.97, bottom=0.07, top=0.95, hspace=0, wspace=0)
for i, tdf in enumerate(tdb):
ts_data = TimeSeries(tdf,epoch=float(t0),sample_rate=sample_rate)
ax = fig.add_subplot(len(tdb),1,len(tdb)-i)
ax.plot(ts_data)
ax.set_xlim(t0,t1)
if i>0:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.savefig(fname)
pyplot.close()
def plot_tiles_tf(tdb,ndof,df,ymax,sample_rate,t0,t1,fname="tiles.png"):
for i, tdf in enumerate(tdb):
ts_data = TimeSeries(tdf,epoch=float(t0),sample_rate=sample_rate)
f, t, Sxx = signal.spectrogram(tdf, sample_rate)
pyplot.figure(figsize=(12,8))
pyplot.subplots_adjust(left=0.1, right=0.97, bottom=0.07, top=0.95, hspace=0, wspace=0)
pyplot.pcolormesh(t, f, Sxx)
pyplot.ylabel('Frequency [Hz]')
pyplot.xlabel('Time [sec]')
pyplot.ylim(0,ymax)
pyplot.show()
pyplot.savefig(fname.replace('.png','_%03i.png'%i))
pyplot.close()
quit()
def plot_spectrogram_from_ts(ts,fname='specgram.png'):
plot = SpectrogramPlot()
ax = plot.gca()
ax.plot(Spectrogram(spec))
#pyplot.ylim(1e-9, 1e-2)
#pyplot.xlim(0.1, 500)
#pyplot.loglog()
pyplot.savefig(fname)
pyplot.close()
def wavelet(ts_data,fname='wavelet.png'):
import mlpy
sample_rate = int(ts_data.sample_rate.value)
z = numpy.array([float(i) for i in ts_data])
t = numpy.array([float(i) for i in ts_data.times.value])
# Decimate magnetic field data to 1 sample/second
rate = [5,10,10] if sample_rate==500 else [8,8,8]
for i in rate:
z = signal.decimate(z,i,zero_phase=True)
# Extract time every 500 sample
t = [t[n*sample_rate] for n in range(int(len(t)/sample_rate))]
# Convert every timing points to scale (hr,min,sec) units
s = 60.
t = [(t[i]-t[0])/s for i in range(len(t))]
# Do wavelet analysis
omega0 = 6
fct = "morlet"
scales = mlpy.wavelet.autoscales(N=len(z),dt=1,dj=0.05,wf=fct,p=omega0)
spec = mlpy.wavelet.cwt(z,dt=1,scales=scales,wf=fct,p=omega0)
freq = (omega0 + numpy.sqrt(2.0 + omega0 ** 2)) / \
(4 * numpy.pi * scales[1:]) * 1000
idxs = numpy.where(numpy.logical_or(freq<0.1,1000<freq))[0]
spec = numpy.delete(spec,idxs,0)
freq = numpy.delete(freq,idxs,0)
# Initialise axis
fig = figure(figsize=(12,8))
plt.subplots_adjust(left=0.1,right=1,bottom=0.1,
top=0.94,hspace=0,wspace=0)
ax1 = fig.add_axes([0.10,0.75,0.70,0.20])
ax2 = fig.add_axes([0.10,0.10,0.70,0.60], sharex=ax1)
ax3 = fig.add_axes([0.83,0.10,0.03,0.60])
# Plot time series
ax1.plot(t,abs(z)-numpy.average(abs(z)),'k')
ax1.set_ylabel('Magnetic Fields [uT]')
# Set up axis range for spectrogram
twin_ax = ax2.twinx()
twin_ax.set_yscale('log')
twin_ax.set_xlim(t[0], t[-1])
twin_ax.set_ylim(freq[-1], freq[0])
twin_ax.tick_params(which='both', labelleft=True,
left=True, labelright=False)
# Plot spectrogram
img = ax2.imshow(numpy.abs(spec)**2,extent=[t[0],t[-1],freq[-1],freq[0]],
aspect='auto',interpolation='nearest',
cmap=cm.jet,norm=mpl.colors.LogNorm()) # cm.cubehelix
ax2.tick_params(which='both', labelleft=False, left=False)
ax2.set_xlabel('Time [mins]')
ax2.set_ylabel('Frequency [mHz]',labelpad=50)
fig.colorbar(img, cax=ax3)
plt.savefig(fname,dpi=300)
| |
# Copyright (c) 2018, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Peter Ogden"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "pynq_support@xilinx.com"
import asyncio
import numpy as np
from pynq.xlnk import ContiguousArray
from pynq import DefaultIP, allocate, UnsupportedConfiguration
class _FrameCache:
def __init__(self, mode, capacity=5, cacheable=0):
self._cache = []
self._mode = mode
self._capacity = capacity
self._cacheable = cacheable
def getframe(self):
"""Retrieve a frame from the cache or create a new frame if the
cache is empty. The freebuffer method of the returned array is
overriden to return the object to the cache rather than freeing
the object.
"""
if self._cache:
frame = allocate(
shape=self._mode.shape, dtype='u1', cacheable=self._cacheable,
pointer=self._cache.pop(), cache=self)
else:
frame = allocate(
shape=self._mode.shape, dtype=np.uint8,
cacheable=self._cacheable, cache=self)
return frame
def return_pointer(self, pointer):
if len(self._cache) < self._capacity:
self._cache.append(pointer)
def clear(self):
self._cache.clear()
class AxiVDMA(DefaultIP):
"""Driver class for the Xilinx VideoDMA IP core
The driver is split into input and output channels are exposed using the
readchannel and writechannel attributes. Each channel has start and
stop methods to control the data transfer. All channels MUST be stopped
before reprogramming the bitstream or inconsistent behaviour may result.
The DMA uses a single ownership model of frames in that frames are either
owned by the DMA or the user code but not both. S2MMChannel.readframe
and MM2SChannel.newframe both return a frame to the user. It is the
user's responsibility to either free the frame using the freebuffer()
method or to hand ownership back to the DMA using MM2SChannel.writeframe.
Once ownership has been returned the user should not access the contents
of the frame as the underlying memory may be deleted without warning.
Attributes
----------
readchannel : AxiVDMA.S2MMChannel
Video input DMA channel
writechannel : AxiVDMA.MM2SChannel
Video output DMA channel
"""
class _FrameList:
"""Internal helper class for handling the list of frames associated
with a DMA channel. Assumes ownership of all frames it contains
unless explicitly removed with takeownership
"""
def __init__(self, parent, offset, count):
self._frames = [None] * count
self._mmio = parent._mmio
self._offset = offset
self._slaves = set()
self.count = count
self.reload = parent.reload
def __getitem__(self, index):
frame = self._frames[index]
return frame
def takeownership(self, index):
self._frames[index] = None
def __len__(self):
return self.count
def __setitem__(self, index, frame):
self._frames[index] = frame
if frame is not None:
self._mmio.write(self._offset + 4 * index,
frame.physical_address)
else:
self._mmio.write(self._offset + 4 * index, 0)
self.reload()
for s in self._slaves:
s[index] = frame
s.takeownership(index)
def addslave(self, slave):
self._slaves.add(slave)
for i in range(len(self._frames)):
slave[i] = self[i]
slave.takeownership(i)
slave.reload()
def removeslave(self, slave):
self._slaves.remove(slave)
class S2MMChannel:
"""Read channel of the Video DMA
Brings frames from the video input into memory. Hands ownership of
the read frames to the user code.
Attributes
----------
mode : VideoMode
The video mode of the DMA channel
cacheable_frames : bool
Whether frames should be stored in cacheable or
non-cacheable memory
"""
def __init__(self, parent, interrupt):
self._mmio = parent.mmio
self._frames = AxiVDMA._FrameList(self, 0xAC, parent.framecount)
self._interrupt = interrupt
self._sinkchannel = None
self._mode = None
self.cacheable_frames = True
def _readframe_internal(self):
if self._mmio.read(0x34) & 0x8980:
# Some spurious errors can occur at the start of transfers
# let's ignore them for now
self._mmio.write(0x34, 0x8980)
self.irqframecount = 1
nextframe = self._cache.getframe()
previous_frame = (self.activeframe + 2) % len(self._frames)
captured = self._frames[previous_frame]
self._frames.takeownership(previous_frame)
self._frames[previous_frame] = nextframe
post_frame = (self.activeframe + 2) % len(self._frames)
captured.invalidate()
return captured
def readframe(self):
"""Read a frame from the channel and return to the user
This function may block until a complete frame has been read. A
single frame buffer is kept so the first frame read after a long
pause in reading may return a stale frame. To ensure an up-to-date
frame when starting processing video read an additional time
before starting the processing loop.
Returns
-------
numpy.ndarray of the video frame
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x34) & 0x1000 == 0:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.ensure_future(self._interrupt.wait()))
pass
self._mmio.write(0x34, 0x1000)
return self._readframe_internal()
async def readframe_async(self):
"""Read a frame from the channel, yielding instead of blocking
if no data is available. See readframe for more details
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x34) & 0x1000 == 0:
await self._interrupt.wait()
self._mmio.write(0x34, 0x1000)
return self._readframe_internal()
@property
def activeframe(self):
"""The frame index currently being processed by the DMA
This process requires clearing any error bits in the DMA channel
"""
self._mmio.write(0x34, 0x4090)
return (self._mmio.read(0x28) >> 24) & 0x1F
@property
def desiredframe(self):
"""The next frame index to the processed by the DMA
"""
return (self._mmio.read(0x28) >> 8) & 0x1F
@desiredframe.setter
def desiredframe(self, frame_number):
if frame_number < 0 or frame_number >= len(self._frames):
raise ValueError("Invalid frame index")
register_value = self._mmio.read(0x28)
mask = ~(0x1F << 8)
register_value &= mask
register_value |= (frame_number << 8)
self._mmio.write(0x28, register_value)
@property
def mode(self):
"""The video mode of the DMA. Must be set prior to starting.
Changing this while the DMA is running will result in the DMA
being stopped.
"""
return self._mode
@mode.setter
def mode(self, value):
if self.running:
self.stop()
self._mode = value
@property
def running(self):
"""Is the DMA channel running
"""
return (self._mmio.read(0x34) & 0x1) == 0
@property
def parked(self):
"""Is the channel parked or running in circular buffer mode
"""
return self._mmio.read(0x30) & 0x2 == 0
@parked.setter
def parked(self, value):
register = self._mmio.read(0x30)
if value:
register &= ~0x2
else:
register |= 0x2
self._mmio.write(0x30, register)
@property
def irqframecount(self):
register = self._mmio.read(0x30)
return (register >> 16) & 0xFF
@irqframecount.setter
def irqframecount(self, val):
register = self._mmio.read(0x30)
newregister = (register & 0xFF00FFFF) | (val << 16)
if register != newregister:
self._mmio.write(0x30, newregister)
def start(self):
"""Start the DMA. The mode must be set prior to this being called
"""
if not self._mode:
raise RuntimeError("Video mode not set, channel not started")
self.desiredframe = 0
self._cache = _FrameCache(
self._mode, cacheable=self.cacheable_frames)
for i in range(len(self._frames)):
self._frames[i] = self._cache.getframe()
self._writemode()
self.reload()
self._mmio.write(0x30, 0x00011083) # Start DMA
self.irqframecount = 4 # Ensure all frames are written to
self._mmio.write(0x34, 0x1000) # Clear any interrupts
while not self.running:
pass
self.reload()
self.desiredframe = 1
def stop(self):
"""Stops the DMA, clears the frame cache and unhooks any tied
outputs
"""
self.tie(None)
self._mmio.write(0x30, 0x00011080)
while self.running:
pass
for i in range(len(self._frames)):
self._frames[i] = None
if hasattr(self, '_cache'):
self._cache.clear()
def _writemode(self):
self._mmio.write(0xA4, self._mode.width *
self._mode.bytes_per_pixel)
self._mmio.write(0xA8, self._mode.stride)
def reload(self):
"""Reload the configuration of the DMA. Should only be called
by the _FrameList class or if you really know what you are doing
"""
if self.running:
self._mmio.write(0xA0, self._mode.height)
def reset(self):
"""Soft reset the DMA. Finishes all transfers before starting
the reset process
"""
self.stop()
self._mmio.write(0x30, 0x00011084)
while self._mmio.read(0x30) & 0x4 == 4:
pass
def tie(self, channel):
"""Ties an output channel to this input channel. This is used
to pass video from input to output without invoking the CPU
for each frame. Main use case is when some slow processing is
being done on a subset of frames while the video is passed
through directly to the output. Only one output may be tied
to an output. The tie is broken either by calling tie(None) or
writing a frame to the tied output channel.
"""
if self._sinkchannel:
self._frames.removeslave(self._sinkchannel._frames)
self._sinkchannel.parked = True
self._sinkchannel.sourcechannel = None
self._sinkchannel = channel
if self._sinkchannel:
self._frames.addslave(self._sinkchannel._frames)
self._sinkchannel.parked = False
self._sinkchannel.framedelay = 1
self._sinkchannel.sourcechannel = self
class MM2SChannel:
"""DMA channel from memory to a video output.
Will continually repeat the most recent frame written.
Attributes
----------
mode : VideoMode
Video mode of the DMA channel
cacheable_frames : bool
Whether frames should be stored in cacheable or
non-cacheable memory
"""
def __init__(self, parent, interrupt):
self._mmio = parent.mmio
self._frames = AxiVDMA._FrameList(self, 0x5C, parent.framecount)
self._interrupt = interrupt
self._mode = None
self.sourcechannel = None
self.cacheable_frames = True
def start(self):
"""Start the DMA channel with a blank screen. The mode must
be set prior to calling or a RuntimeError will result.
"""
if not self._mode:
raise RuntimeError("Video mode not set, channel not started")
self._cache = _FrameCache(
self._mode, cacheable=self.cacheable_frames)
self._frames[0] = self._cache.getframe()
self._writemode()
self.reload()
self._mmio.write(0x00, 0x00011089)
while not self.running:
pass
self.reload()
self.desiredframe = 0
pass
def stop(self):
"""Stop the DMA channel and empty the frame cache
"""
self._mmio.write(0x00, 0x00011080)
while self.running:
pass
for i in range(len(self._frames)):
self._frames[i] = None
if hasattr(self, '_cache'):
self._cache.clear()
def reset(self):
"""Soft reset the DMA channel
"""
self.stop()
self._mmio.write(0x00, 0x00011084)
while self._mmio.read(0x00) & 0x4 == 4:
pass
def _writeframe_internal(self, frame):
if self.sourcechannel:
self.sourcechannel.tie(None)
frame.flush()
next_frame = (self.desiredframe + 1) % len(self._frames)
self._frames[next_frame] = frame
self.desiredframe = next_frame
def writeframe(self, frame):
"""Schedule the specified frame to be the next one displayed.
Assumes ownership of frame which should no longer be modified
by the user. May block if there is already a frame scheduled.
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x04) & 0x1000 == 0:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.ensure_future(self._interrupt.wait()))
self._mmio.write(0x04, 0x1000)
self._writeframe_internal(frame)
async def writeframe_async(self, frame):
"""Same as writeframe() but yields instead of blocking if a
frame is already scheduled
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x04) & 0x1000 == 0:
await self._interrupt.wait()
self._mmio.write(0x04, 0x1000)
self._writeframe_internal(frame)
def setframe(self, frame):
"""Sets a frame without blocking or taking ownership. In most
circumstances writeframe() is more appropriate
"""
frameindex = self.desiredframe
self._frames[frameindex] = frame
self._frames.takeownership(frameindex)
def _writemode(self):
self._mmio.write(0x54, self._mode.width *
self._mode.bytes_per_pixel)
register = self._mmio.read(0x58)
register &= (0xF << 24)
register |= self._mode.stride
self._mmio.write(0x58, register)
def reload(self):
"""Reload the configuration of the DMA. Should only be called
by the _FrameList class or if you really know what you are doing
"""
if self.running:
self._mmio.write(0x50, self._mode.height)
def newframe(self):
"""Returns a frame of the appropriate size for the video mode.
The contents of the frame are undefined and should not be assumed
to be black
Returns
-------
numpy.ndarray video frame
"""
return self._cache.getframe()
@property
def activeframe(self):
self._mmio.write(0x04, 0x4090)
return (self._mmio.read(0x28) >> 16) & 0x1F
@property
def desiredframe(self):
return self._mmio.read(0x28) & 0x1F
@desiredframe.setter
def desiredframe(self, frame_number):
if frame_number < 0 or frame_number >= len(self._frames):
raise ValueError("Invalid Frame Index")
register_value = self._mmio.read(0x28)
mask = ~0x1F
register_value &= mask
register_value |= frame_number
self._mmio.write(0x28, register_value)
@property
def running(self):
return (self._mmio.read(0x04) & 0x1) == 0
@property
def mode(self):
"""The video mode of the DMA, must be called prior to starting.
If changed while the DMA channel is running the channel will be
stopped
"""
return self._mode
@mode.setter
def mode(self, value):
if self.running:
self.stop()
self._mode = value
@property
def parked(self):
"""Is the channel parked or running in circular buffer mode
"""
return self._mmio.read(0x00) & 0x2 == 0
@parked.setter
def parked(self, value):
register = self._mmio.read(0x00)
if value:
self.desiredframe = self.activeframe
register &= ~0x2
else:
register |= 0x2
self._mmio.write(0x00, register)
@property
def framedelay(self):
register = self._mmio.read(0x58)
return register >> 24
@framedelay.setter
def framedelay(self, value):
register = self._mmio.read(0x58)
register &= 0xFFFF
register |= value << 24
self._mmio.write(0x58, register)
def __init__(self, description, framecount=None):
"""Create a new instance of the AXI Video DMA driver
Parameters
----------
name : str
The name of the IP core to instantiate the driver for
"""
super().__init__(description)
if 'parameters' in description:
parameters = description['parameters']
has_s2mm = parameters['C_INCLUDE_S2MM'] == '1'
has_mm2s = parameters['C_INCLUDE_MM2S'] == '1'
framecount = int(parameters['C_NUM_FSTORES'])
s2mm_addr_width = int(parameters['C_M_AXI_S2MM_ADDR_WIDTH'])
mm2s_addr_width = int(parameters['C_M_AXI_MM2S_ADDR_WIDTH'])
if ((has_s2mm and s2mm_addr_width > 32) or
(has_mm2s and mm2s_addr_width > 32)):
raise UnsupportedConfiguration(
'VDMA driver only supports 32-bit addresses')
else:
has_s2mm = True
has_mm2s = True
framecount = 4 if framecount is None else framecount
self.framecount = framecount
if has_s2mm:
self.readchannel = AxiVDMA.S2MMChannel(self, self.s2mm_introut)
if has_mm2s:
self.writechannel = AxiVDMA.MM2SChannel(self, self.mm2s_introut)
bindto = ['xilinx.com:ip:axi_vdma:6.2',
'xilinx.com:ip:axi_vdma:6.3']
| |
"""Special bot library containing UploadRobot.
Do not import classes directly from here but from specialbots.
"""
#
# (C) Pywikibot team, 2003-2022
#
# Distributed under the terms of the MIT license.
#
import os
import tempfile
from contextlib import suppress
from http import HTTPStatus
from pathlib import Path
from typing import Optional, Union
from urllib.parse import urlparse
import requests
import pywikibot
import pywikibot.comms.http as http
from pywikibot import config
from pywikibot.backports import List
from pywikibot.bot import BaseBot, QuitKeyboardInterrupt
from pywikibot.exceptions import APIError, FatalServerError, NoPageError
from pywikibot.tools.formatter import color_format
class UploadRobot(BaseBot):
"""Upload bot."""
def __init__(self, url: Union[List[str], str], *,
url_encoding=None,
description: str = '',
use_filename=None,
keep_filename: bool = False,
verify_description: bool = True,
ignore_warning: Union[bool, list] = False,
target_site=None,
aborts: Union[bool, list, None] = None,
chunk_size: int = 0,
asynchronous: bool = False,
summary: Optional[str] = None,
filename_prefix: Optional[str] = None,
force_if_shared: bool = False,
**kwargs) -> None:
"""Initializer.
.. versionchanged:: 6.2
asynchronous upload is used if *asynchronous* parameter is set
.. versionchanged:: 6.4
*force_if_shared* parameter was added
:param url: path to url or local file, or list of urls or paths
to local files.
:param description: Description of file for its page. If multiple files
are uploading the same description is used for every file.
:type description: str
:param use_filename: Specify title of the file's page. If multiple
files are uploading it asks to change the name for second, third,
etc. files, otherwise the last file will overwrite the other.
:param keep_filename: Set to True to keep original names of urls and
files, otherwise it will ask to enter a name for each file.
:param summary: Summary of the upload
:param verify_description: Set to True to proofread the description.
:param ignore_warning: Set this to True to upload even if another file
would be overwritten or another mistake would be risked. Set it to
an array of warning codes to selectively ignore specific warnings.
:param target_site: Set the site to upload to. If target site is not
given it's taken from user-config.py.
:type target_site: object
:param aborts: List of the warning types to abort upload on. Set to
True to abort on any warning.
:param chunk_size: Upload the file in chunks (more overhead, but
restartable) specified in bytes. If no value is specified the file
will be uploaded as whole.
:param asynchronous: Make potentially large file operations
asynchronous on the server side when possible.
:param filename_prefix: Specify prefix for the title of every
file's page.
:param force_if_shared: Upload the file even if it's currently
shared to the target site (e.g. when moving from Commons to another
wiki)
:keyword always: Disables any input, requires that either
ignore_warning or aborts are set to True and that the
description is also set. It overwrites verify_description to
False and keep_filename to True.
:type always: bool
"""
super().__init__(**kwargs)
if self.opt.always:
if ignore_warning is not True and aborts is not True:
raise ValueError(
'When always is set to True, '
'ignore_warning or aborts must be set to True.')
if not description:
raise ValueError(
'When always is set to True, the description must be set.')
self.url = [url] if isinstance(url, str) else url
self.url_encoding = url_encoding
self.description = description
self.use_filename = use_filename
self.keep_filename = keep_filename or self.opt.always
self.verify_description = verify_description and not self.opt.always
self.ignore_warning = ignore_warning
self.aborts = aborts or []
self.chunk_size = chunk_size
self.asynchronous = asynchronous
self.summary = summary
self.filename_prefix = filename_prefix
self.force_if_shared = force_if_shared
if config.upload_to_commons:
default_site = pywikibot.Site('commons:commons')
else:
default_site = pywikibot.Site()
self.target_site = target_site or default_site
def read_file_content(self, file_url: str):
"""Return name of temp file in which remote file is saved."""
pywikibot.output('Reading file ' + file_url)
handle, tempname = tempfile.mkstemp()
path = Path(tempname)
size = 0
dt_gen = (el for el in (15, 30, 45, 60, 120, 180, 240, 300))
while True:
file_len = path.stat().st_size
if file_len:
pywikibot.output('Download resumed.')
headers = {'Range': 'bytes={}-'.format(file_len)}
else:
headers = {}
with open(str(path), 'ab') as fd: # T272345: Python 3.5 needs str
os.lseek(handle, file_len, 0)
try:
response = http.fetch(file_url, stream=True,
headers=headers)
response.raise_for_status()
# get download info, if available
# Note: this is not enough to exclude pages
# e.g. 'application/json' is also not a media
if 'text/' in response.headers['Content-Type']:
raise FatalServerError('The requested URL was not '
'found on server.')
size = max(size,
int(response.headers.get('Content-Length', 0)))
# stream content to temp file (in chunks of 1Mb)
for chunk in response.iter_content(chunk_size=1024 * 1024):
fd.write(chunk)
# raised from connection lost during response.iter_content()
except requests.ConnectionError:
fd.flush()
pywikibot.output('Connection closed at byte {}'
.format(path.stat().st_size))
# raised from response.raise_for_status()
except requests.HTTPError as e:
# exit criteria if size is not available
# error on last iteration is OK, we're requesting
# {'Range': 'bytes=file_len-'}
err = HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE
if response.status_code == err and path.stat().st_size:
break
raise FatalServerError(str(e)) from e
if size and size == path.stat().st_size:
break
try:
dt = next(dt_gen)
pywikibot.output('Sleeping for {} seconds ...'.format(dt))
pywikibot.sleep(dt)
except StopIteration:
raise FatalServerError('Download failed, too many retries!')
pywikibot.output('Downloaded {} bytes'.format(path.stat().st_size))
return tempname
def _handle_warning(self, warning: str) -> Optional[bool]:
"""Return whether the warning cause an abort or be ignored.
:param warning: The warning name
:return: False if this warning should cause an abort, True if it should
be ignored or None if this warning has no default handler.
"""
if self.aborts is not True:
if warning in self.aborts:
return False
if self.ignore_warning is True or (self.ignore_warning is not False
and warning in self.ignore_warning):
return True
return None if self.aborts is not True else False
def _handle_warnings(self, warnings):
messages = '\n'.join('{0.code}: {0.info}'.format(warning)
for warning in sorted(warnings,
key=lambda w: w.code))
if len(warnings) > 1:
messages = '\n' + messages
pywikibot.output('We got the following warning(s): ' + messages)
answer = True
for warning in warnings:
this_answer = self._handle_warning(warning.code)
if this_answer is False:
answer = False
break
if this_answer is None:
answer = None
if answer is None:
answer = pywikibot.input_yn('Do you want to ignore?',
default=False, automatic_quit=False)
return answer
def process_filename(self, file_url):
"""Return base filename portion of file_url."""
# Isolate the pure name
filename = file_url
# Filename may be either a URL or a local file path
if '://' in filename:
# extract the path portion of the URL
filename = urlparse(filename).path
filename = os.path.basename(filename)
if self.use_filename:
filename = self.use_filename
if self.filename_prefix:
filename = self.filename_prefix + filename
if not self.keep_filename:
pywikibot.output(
'\nThe filename on the target wiki will default to: {}\n'
.format(filename))
assert not self.opt.always
newfn = pywikibot.input(
'Enter a better name, or press enter to accept:')
if newfn != '':
filename = newfn
# FIXME: these 2 belong somewhere else, presumably in family
# forbidden characters are handled by pywikibot/page.py
forbidden = ':*/\\' # to be extended
try:
allowed_formats = self.target_site.siteinfo.get(
'fileextensions', get_default=False)
except KeyError:
allowed_formats = []
else:
allowed_formats = [item['ext'] for item in allowed_formats]
# ask until it's valid
first_check = True
while True:
if not first_check:
if self.opt.always:
filename = None
else:
filename = pywikibot.input('Enter a better name, or press '
'enter to skip the file:')
if not filename:
return None
first_check = False
ext = os.path.splitext(filename)[1].lower().strip('.')
# are any chars in forbidden also in filename?
invalid = set(forbidden) & set(filename)
if invalid:
c = ''.join(invalid)
pywikibot.output(
'Invalid character(s): {}. Please try again'.format(c))
continue
if allowed_formats and ext not in allowed_formats:
if self.opt.always:
pywikibot.output('File format is not one of '
'[{}]'.format(' '.join(allowed_formats)))
continue
if not pywikibot.input_yn(
'File format is not one of [{}], but {!r}. Continue?'
.format(' '.join(allowed_formats), ext),
default=False):
continue
potential_file_page = pywikibot.FilePage(self.target_site,
filename)
if potential_file_page.exists():
overwrite = self._handle_warning('exists')
if overwrite is False:
pywikibot.output(
'File exists and you asked to abort. Skipping.')
return None
if potential_file_page.has_permission():
if overwrite is None:
overwrite = not pywikibot.input_yn(
'File with name {} already exists. '
'Would you like to change the name? '
'(Otherwise file will be overwritten.)'
.format(filename), default=True,
automatic_quit=False)
if not overwrite:
continue
break
pywikibot.output('File with name {} already exists and '
'cannot be overwritten.'.format(filename))
continue
with suppress(NoPageError):
if (not self.force_if_shared
and potential_file_page.file_is_shared()):
pywikibot.output(
'File with name {} already exists in shared '
'repository and cannot be overwritten.'
.format(filename))
continue
break
# A proper description for the submission.
# Empty descriptions are not accepted.
if self.description:
pywikibot.output('The suggested description is:\n{}'
.format(self.description))
while not self.description or self.verify_description:
if not self.description:
pywikibot.output(color_format(
'{lightred}It is not possible to upload a file '
'without a description.{default}'))
assert not self.opt.always
# if no description, ask if user want to add one or quit,
# and loop until one is filled.
# if self.verify_description, ask if user want to change it
# or continue.
if self.description:
question = 'Do you want to change this description?'
else:
question = 'No description was given. Add one?'
if pywikibot.input_yn(question, default=not self.description,
automatic_quit=self.description):
from pywikibot import editor as editarticle
editor = editarticle.TextEditor()
try:
new_description = editor.edit(self.description)
except ImportError:
raise
except Exception as e:
pywikibot.error(e)
continue
# if user saved / didn't press Cancel
if new_description:
self.description = new_description
elif not self.description:
raise QuitKeyboardInterrupt
self.verify_description = False
return filename
def abort_on_warn(self, warn_code):
"""Determine if the warning message should cause an abort."""
return self.aborts is True or warn_code in self.aborts
def ignore_on_warn(self, warn_code: str):
"""
Determine if the warning message should be ignored.
:param warn_code: The warning message
"""
return self.ignore_warning is True or warn_code in self.ignore_warning
def upload_file(self, file_url):
"""
Upload the image at file_url to the target wiki.
:see: https://www.mediawiki.org/wiki/API:Upload
Return the filename that was used to upload the image.
If the upload fails, ask the user whether to try again or not.
If the user chooses not to retry, return None.
.. versionchanged:: 7.0
If 'copyuploadbaddomain' API error occurred in first step,
download the file and upload it afterwards
"""
filename = self.process_filename(file_url)
if not filename:
return None
site = self.target_site
imagepage = pywikibot.FilePage(site, filename) # normalizes filename
imagepage.text = self.description
pywikibot.output('Uploading file to {}...'.format(site))
ignore_warnings = self.ignore_warning is True or self._handle_warnings
download = False
while True:
if '://' in file_url \
and (not site.has_right('upload_by_url') or download):
try:
file_url = self.read_file_content(file_url)
except FatalServerError:
pywikibot.exception()
return None
try:
success = imagepage.upload(file_url,
ignore_warnings=ignore_warnings,
chunk_size=self.chunk_size,
asynchronous=self.asynchronous,
comment=self.summary)
except APIError as error:
if error.code == 'uploaddisabled':
pywikibot.error(
'Upload error: Local file uploads are disabled on {}.'
.format(site))
elif error.code == 'copyuploadbaddomain' and not download \
and '://' in file_url:
pywikibot.exception()
pywikibot.output('Downloading the file and retry...')
download = True
continue
else:
pywikibot.error('Upload error: ', exc_info=True)
except Exception:
pywikibot.error('Upload error: ', exc_info=True)
else:
if success:
# No warning, upload complete.
pywikibot.output('Upload of {} successful.'
.format(filename))
self.counter['write'] += 1
return filename # data['filename']
pywikibot.output('Upload aborted.')
break
return None
def skip_run(self) -> bool:
"""Check whether processing is to be skipped."""
# early check that upload is enabled
if self.target_site.is_uploaddisabled():
pywikibot.error(
'Upload error: Local file uploads are disabled on {}.'
.format(self.target_site))
return True
# early check that user has proper rights to upload
self.target_site.login()
if not self.target_site.has_right('upload'):
pywikibot.error(
"User '{}' does not have upload rights on site {}."
.format(self.target_site.user(), self.target_site))
return True
return False
def run(self):
"""Run bot."""
if self.skip_run():
return
try:
for file_url in self.url:
self.upload_file(file_url)
self.counter['read'] += 1
except QuitKeyboardInterrupt:
pywikibot.output('\nUser quit {} bot run...'
.format(self.__class__.__name__))
except KeyboardInterrupt:
if config.verbose_output:
raise
pywikibot.output('\nKeyboardInterrupt during {} bot run...'
.format(self.__class__.__name__))
finally:
self.exit()
| |
import luigi
from luigi import configuration
from luigi.contrib import redshift
from luigi.s3 import S3Target, S3PathTask
from mortar.luigi import dbms
from mortar.luigi import mortartask
import logging
logger = logging.getLogger('luigi-interface')
"""
This luigi pipeline builds an Amazon Redshift data warehouse from Wikipedia
page view data stored in MySQL.
Instructions to Use:
1. Install the example wiki table into your MySQL database. You can download SQL
statements to create and populate the table from
https://s3.amazonaws.com/mortar-example-data/wikipedia-mysql/mysql-wiki-data.tar.gz
2. Ensure that you have setup your secure project configuration variables:
# Target Redshift database
mortar config:set HOST=<my-endpoint.redshift.amazonaws.com>
mortar config:set PORT=5439
mortar config:set DATABASE=<my-database-name>
mortar config:set USERNAME=<my-master-username>
mortar config:set PASSWORD=<my-master-username-password>
# Source MySQL database
mortar config:set MYSQL_DBNAME=<my-mysql-database-name>
mortar config:set MYSQL_HOST=<my-mysql-host-name>
mortar config:set MYSQL_USER=<my-mysql-username>
mortar config:set MYSQL_PASSWORD=<my-mysql-password>
3. Move the client.cfg.template with additional MySQL configuration items
into place:
cp luigiscripts/mysql.client.cfg.template luigiscripts/client.cfg.template
TaskOrder:
ExtractFromMySQL
TransformWikipediaDataTask
CopyToRedshiftTask
ShutdownClusters
To run the pipeline:
mortar luigi luigiscripts/wikipedia-luigi-mysql.py \
--output-base-path "s3://<your-bucket-name>/wiki" \
--table-name "pageviews"
"""
def create_full_path(base_path, sub_path):
"""
Helper function for constructing paths.
"""
return '%s/%s' % (base_path, sub_path)
class WikipediaETLPigscriptTask(mortartask.MortarProjectPigscriptTask):
"""
This is the base class for all of our Mortar related Luigi Tasks. It extends
the generic MortarProjectPigscriptTask to set common defaults we'll use
for this pipeline: common data paths and default cluster size.
"""
# The base path to where output data will be written. This will be an S3 path.
output_base_path = luigi.Parameter()
# The cluster size to use for running Mortar jobs. A cluster size of 0
# will run in Mortar's local mode. This is a fast (and free!) way to run jobs
# on small data samples. Cluster sizes >= 2 will run on a Hadoop cluster.
cluster_size = luigi.IntParameter(default=5)
def token_path(self):
"""
Luigi manages dependencies between tasks by checking for the existence of
files. When one task finishes it writes out a 'token' file that will
trigger the next task in the dependency graph. This is the base path for
where those tokens will be written.
"""
return self.output_base_path
def default_parallel(self):
"""
This is used for an optimization that tells Hadoop how many reduce tasks should be used
for a Hadoop job. By default we'll tell Hadoop to use the number of reduce slots
in the cluster.
"""
if self.cluster_size - 1 > 0:
return (self.cluster_size - 1) * mortartask.NUM_REDUCE_SLOTS_PER_MACHINE
else:
return 1
def number_of_files(self):
"""
This is used for an optimization when loading Redshift. We can load Redshift faster by
splitting the data to be loaded across multiple files.
"""
if self.cluster_size - 1 > 0:
return 2 * (self.cluster_size - 1) * mortartask.NUM_REDUCE_SLOTS_PER_MACHINE
else:
return 2
class TransformWikipediaDataTask(WikipediaETLPigscriptTask):
"""
This task runs the data transformation script pigscripts/02-wiki-transform-data.pig.
"""
# Table name where wiki data is stored in MySQL
mysql_table_name = luigi.Parameter(default='wiki')
def requires(self):
"""
Tell Luigi to run the MySQL data extraction before this task.
"""
extract_output_path = create_full_path(self.output_base_path, 'extract')
return [
dbms.ExtractFromMySQL(
table=self.mysql_table_name,
columns='wiki_code, article, encoded_hourly_pageviews',
output_path=extract_output_path,
raw=True)
]
def script_output(self):
return [S3Target(create_full_path(self.output_base_path, 'transform'))]
def parameters(self):
return { 'OUTPUT_PATH': self.output_base_path,
'REDSHIFT_PARALLELIZATION': self.number_of_files()
}
def script(self):
return '02-wiki-transform-data.pig'
class CopyToRedshiftTask(redshift.S3CopyToTable):
"""
This task copies data from S3 to Redshift.
"""
# This is the Redshift table where the data will be written.
table_name = luigi.Parameter()
# As this task is writing to a Redshift table and not generating any output data
# files, this S3 location is used to store a 'token' file indicating when the task has
# been completed.
output_base_path = luigi.Parameter()
# The schema of the Redshift table where the data will be written.
columns =[
('wiki_code', 'text'),
('language', 'text'),
('wiki_type', 'text'),
('article', 'varchar(max)'),
('day', 'int'),
('hour', 'int'),
('pageviews', 'int'),
('PRIMARY KEY', '(article, day, hour)')]
def requires(self):
"""
Tell Luigi to run the TransformWikipediaDataTask task before this task.
"""
return [TransformWikipediaDataTask(output_base_path=self.output_base_path)]
def redshift_credentials(self):
"""
Returns a dictionary with the necessary fields for connecting to Redshift.
"""
config = configuration.get_config()
section = 'redshift'
return {
'host' : config.get(section, 'host'),
'port' : config.get(section, 'port'),
'database' : config.get(section, 'database'),
'username' : config.get(section, 'username'),
'password' : config.get(section, 'password'),
'aws_access_key_id' : config.get(section, 'aws_access_key_id'),
'aws_secret_access_key' : config.get(section, 'aws_secret_access_key')
}
def transform_path(self):
"""
Helper function that returns the root directory where the transformed output
has been stored. This is the data that will be copied to Redshift.
"""
return create_full_path(self.output_base_path, 'transform')
def s3_load_path(self):
"""
We want to load all files that begin with 'part' (the hadoop output file prefix) that
came from the output of the transform step.
"""
return create_full_path(self.transform_path(), 'part')
"""
Property methods for connecting to Redshift.
"""
@property
def aws_access_key_id(self):
return self.redshift_credentials()['aws_access_key_id']
@property
def aws_secret_access_key(self):
return self.redshift_credentials()['aws_secret_access_key']
@property
def database(self):
return self.redshift_credentials()['database']
@property
def user(self):
return self.redshift_credentials()['username']
@property
def password(self):
return self.redshift_credentials()['password']
@property
def host(self):
return self.redshift_credentials()['host'] + ':' + self.redshift_credentials()['port']
@property
def table(self):
return self.table_name
@property
def copy_options(self):
return 'GZIP'
class ShutdownClusters(mortartask.MortarClusterShutdownTask):
"""
This is the very last task in the pipeline. It will shut down all active
clusters that are not currently running jobs.
"""
# These parameters are not used by this task, but passed through for earlier tasks to use.
# Redshift table name
table_name = luigi.Parameter()
# As this task is only shutting down clusters and not generating any output data,
# this S3 location is used to store a 'token' file indicating when the task has
# been completed.
output_base_path = luigi.Parameter()
def requires(self):
"""
Tell Luigi that the CopyToRedshiftTask task needs to be completed
before running this task.
"""
return [CopyToRedshiftTask(output_base_path=self.output_base_path,
table_name=self.table_name)]
def output(self):
return [S3Target(create_full_path(self.output_base_path, self.__class__.__name__))]
if __name__ == "__main__":
"""
We tell Luigi to run the last task in the task dependency graph. Luigi will then
work backwards to find any tasks with its requirements met and start from there.
The first time this pipeline is run the only task with its requirements met will be
ExtractWikipediaDataTask which does not have any dependencies.
"""
luigi.run(main_task_cls=ShutdownClusters)
| |
import os
import csv
import datetime
now = datetime.datetime.now()
def parseing(fileName):
debugCounter = 0
sectionTemplate = """
:doc:`{subject}{catNumber}`{SpTopic} {Term}
| Section {section} ({classNumber}) Credits: {units}; {mixture}; {component}
| Instructor: {Instructor}
|{Building}:{Room} {Location} {Days} {Time}
{description}
"""
sectionTemplateMultiRoom = """
:doc:`{subject}{catNumber}`{SpTopic} {Term}
| Section {section} ({classNumber}) Credits: {units}; {mixture}; {component}
| Instructor: {Instructor}
{multiRoom}
{description}
"""
sectionTemplateLab = """
:doc:`{subject}{catNumber}`{SpTopic} {Term}
| Section {section}/{labSection} ({classNumber}) Credits: {units}; {mixture}; {component}
| Instructor: {Instructor}
|{Building}:{Room} {Location} {Days} {Time}
|{labBuilding}: {labRoom} ({labLocation}) {labDay} {labTime} (lab)
{description}
"""
comp314_315Template = """
{subject}{catNumber} {Term} (Description: :doc:`comp314-315`)
| Section {section} ({classNumber}) Credits: {units}; {mixture}; {component}
| Instructor: {Instructor}
|{Building}:{Room} {Location} {Days} {Time}
{description}
"""
topicsSectionTemplate = """
{subject}{catNumber} Topic{topics} {Term}
| Section {section} ({classNumber}) Credits: {units}; {mixture}; {component}
| Instructor: {Instructor}
|{Building}:{Room} {Location} {Days} {Time}
| Description similar to: :doc:`{docName}`
{description}
"""
headerTemplate = """
{semester} Schedule {txtURLline} {where}
==========================================================================
{created}
The following courses will (tentatively) be held during the {semester} semester.
For open/full status and latest changes, see
`LOCUS <http://www.luc.edu/locus>`_.
**In case of conflict, information on LOCUS should be considered authoritative.**
See `Textbook Information {textBookURLline}`_.
Section titles lines link to the course description page,
except for some labeled special topics courses related to an existing course.
The 4-digit number in parentheses after the section is the LOCUS registration code.
Be sure to look at the section's notes or LOCUS for an 8-week courses with more than one schedule line:
Friday line(s) are likely to be isolated makeup days, not every week.
{graduateLink}
**View Campus Specific Courses below :**{pages}
.. _{season}_undergraduate_courses_list:
{udergradeTxt}
~~~~~~~~~~~~~~~~~~~~~
"""
gradHeadingTemplate = """
.. _{0}_graduate_courses_list_{1}:
Graduate Courses
~~~~~~~~~~~~~~~~~~~~~
"""
indepStudyTemplate = """
:doc:`{}` 1-6 credits
You cannot register
yourself for an independenst study course!
You must find a faculty member who
agrees to supervisor the work that you outline and schedule together. This
*supervisor arranges to get you registered*. Possible supervisors are: full-time department faculty
"""
classes = []
headerObject = {
"semester": "",
"textBookURLline": "<https://docs.google.com/spreadsheets/d/138_JN8WEP8Pv5uqFiPEO_Ftp0mzesnEF5IFU1685w3I/edit?usp=sharing>",
"created": "",
"graduateLink": "",
"campusURLTemplateCuneo": "",
"season": "",
"udergradeTxt": "Undergraduate Courses",
"where": "",
"pages": "",
"txtURLline": "",
}
object = {
"subject": "",
"catNumber": "",
"section": "",
"classNumber": "",
"title": "",
"component": "",
"units": "",
"topics": "",
"Building": "",
"Location": "",
"Room": "",
"Days": "",
"Time": "",
"Instructor": "",
"classCap": "",
"totalStudents": "",
"waitCap": "",
"waitTotal": "",
"minEnroll": "",
"Attributes": "",
"roomCharicteristics": "",
"CombinedSID": "",
"ClassEquiv": "",
"SpTopic": "",
"Term": "",
"mixture": "",
"description": "",
"hasLab": False,
"labBuilding": "",
"labRoom": "",
"labTime": "",
"labDay": "",
"labLocation": "",
"Term": "",
"isStudy": False,
"isMultiRoom": False,
"docName": "",
"multiRoom": "",
}
file = open(fileName, "r")
reader = csv.reader(file, delimiter=",")
LSBuildings = ["Cuneo", "Mundelein", "Crown", "Sullivan", "Life Science", "Dumbach"]
checker = 0
string = ""
appendToList = False
hasLab = False
semester = ""
season = ""
for row in reader:
# print(row)
for i in range(0, len(row)):
if "COMP" == row[0]:
object["subject"] = row[i].lower().strip()
object["catNumber"] = str(int(row[i + 1]))
if object["catNumber"] == "398":
object["isStudy"] = True
object["section"] = row[i + 2]
if "L" in row[i + 2]:
for m in range(0, len(classes)):
if (
classes[m]["catNumber"] == object["catNumber"]
and classes[m]["section"] in object["section"]
):
classes[m]["hasLab"] = True
hasLab = True
classes[m]["labSection"] = row[i + 2]
else:
object["section"] = row[i + 2][1:]
object["classNumber"] = row[i + 3]
object["title"] = row[i + 4]
object["component"] = row[i + 5]
object["units"] = row[i + 6]
if i + 7 <= len(row):
object["topics"] = row[i + 7]
break
break
elif " Fall " in row[0]:
season = "Fall"
semester = "Fall " + getYear(row[0].split(" "), season)
headerObject["season"] = season
headerObject["semester"] = semester
elif " Spring " in row[0]:
season = "Spring"
semester = "Spring " + getYear(row[0].split(" "), season)
headerObject["season"] = season
headerObject["semester"] = semester
elif " Summer " in row[0]:
season = "Summer"
semester = "Summer " + getYear(row[0].split(" "), season)
headerObject["season"] = season
headerObject["semester"] = semester
elif "Week" in row[0]:
object["Term"] = "[" + str(row[0]) + "]"
elif row[0] == "Bldg:" and not hasLab:
if object["Building"] != "":
object["Building"] = object["Building"] + " +" + str(row[i + 1])
object["Room"] = object["Room"] + " + " + row[i + 3]
object["Days"] = (
str(object["Days"]) + " + " + str(convertDays(row[i + 5]))
)
object["Time"] = object["Time"] + " + " + row[i + 7]
for j in range(0, len(LSBuildings)):
if LSBuildings[j] in row[i + 1]:
object["Location"] = (
"(Lake Shore)" + " +" + object["Location"]
)
break
if row[i + 1] == "TBA":
object["Location"] = "" + " + " + object["Location"]
break
if "Online" in row[i + 1]:
object["Location"] = "(Online)" + " +" + object["Location"]
break
if j == len(LSBuildings) - 1:
object["Location"] = (
"(Water Tower)" + " +" + object["Location"]
)
object["isMultiRoom"] = True
else:
object["Building"] = row[i + 1]
object["Room"] = row[i + 3]
object["Days"] = convertDays(row[i + 5])
object["Time"] = row[i + 7]
if 9 < len(row):
object["Instructor"] = row[9]
for j in range(0, len(LSBuildings)):
if LSBuildings[j] in object["Building"]:
object["Location"] = "(Lake Shore)"
break
elif object["Building"] == "TBA":
object["Location"] = ""
break
elif "Online" in object["Building"]:
object["Location"] = "(Online)"
break
else:
object["Location"] = "(Water Tower)"
if object["Instructor"] == "":
object["Instructor"] = "N/A"
break
elif row[0] == "Bldg:" and hasLab:
for m in range(0, len(classes)):
if (
classes[m]["catNumber"] == object["catNumber"]
and classes[m]["section"] in object["section"]
):
classes[m]["labBuilding"] = row[i + 1]
waterTower = True
for j in range(0, len(LSBuildings)):
if LSBuildings[j] in classes[m]["labBuilding"]:
classes[m]["labLocation"] = "Lake Shore"
waterTower = False
elif classes[m]["labBuilding"] == "TBA":
classes[m]["labLocation"] = ""
waterTower = False
elif classes[m]["labBuilding"] == "Online":
classes[m]["labLocation"] = "Online"
waterTower = False
if waterTower:
classes[m]["labLocation"] = "Water Tower"
classes[m]["labRoom"] = row[i + 3]
classes[m]["labDay"] = convertDays(row[i + 5])
classes[m]["labTime"] = row[i + 7]
break
elif row[0] == "Class Enrl Cap:":
object["classCap"] = row[i + 1]
object["totalStudents"] = row[i + 3]
object["waitCap"] = row[i + 5]
object["waitTotal"] = row[i + 7]
if i + 9 < len(row[i]):
object["minEnroll"] = row[i + 9]
break
break
elif row[0] == "Attributes:":
object["Attributes"] = row[1]
elif row[0] == "Room Characteristics:":
object["roomCharicteristics"] = row[i + 1]
break
elif row[0] == "Class Equivalents:":
if len(row) > i + 1:
object["ClassEquiv"] = row[i + 1]
elif "Combined with" in row[0]:
tempList = row[0].split()
for q in range(0, len(tempList)):
if "COMP" == tempList[q]:
tempSplit = tempList[q + 1].split("-")
classNum = tempSplit[0]
object["docName"] = tempList[q].lower() + classNum
elif "_______" in row[0]:
if string != "":
object["description"] = "**Notes**\n " + string
string = ""
if hasLab == False:
appendToList = True
hasLab = False
break
elif row[0] == "Combined Section ID:":
object["CombinedSID"] == row[1] + row[2] + row[3]
break
elif row[0] == "":
for q in range(0, len(row)):
if row[q] != "":
object["mixture"] = row[q]
if object["mixture"] == "(Online)":
object["Location"] = "(Online)"
object["Building"] = "Online"
else:
if row[i] != "":
string += row[i] + "\n "
if appendToList:
if "Report ID:" not in object["description"]:
classes.append(object)
# print(object)
# print(len(classes))
elif object["isStudy"]:
continue
object = object.fromkeys(object, "")
appendToList = False
object["Building"] = ""
object["Days"] = ""
object["Room"] = ""
object["Days"] = ""
object["Time"] = ""
object["Location"] = ""
mainRST = open("./checkFolder/" + season.lower() + ".rst", "w")
onlineRST = open("./checkFolder/online" + season.lower() + ".rst", "w")
lakeRST = open("./checkFolder/lakeshore" + season.lower() + ".rst", "w")
waterRST = open("./checkFolder/watertower" + season.lower() + ".rst", "w")
headerObject[
"pages"
] = """
* :doc:`lakeshore{0}`
* :doc:`watertower{0}`
* :doc:`online{0}`""".format(
season.lower()
)
mainRST.write(headerTemplate.format(**headerObject))
headerObject["where"] = "(Lake Shore)"
headerObject[
"pages"
] = """
* :doc:`{0}`
* :doc:`watetower{0}`
* :doc:`online{0}`""".format(
season.lower()
)
lakeRST.write(headerTemplate.format(**headerObject))
headerObject["where"] = "(Online)"
headerObject[
"pages"
] = """
* :doc:`lakeshore{0}`
* :doc:`watertower{0}`
* :doc:`{0}`""".format(
season.lower()
)
onlineRST.write(headerTemplate.format(**headerObject))
headerObject["where"] = "(Water Tower)"
headerObject[
"pages"
] = """
* :doc:`lakeshore{0}`
* :doc:`{0}`
* :doc:`online{0}`""".format(
season.lower()
)
waterRST.write(headerTemplate.format(**headerObject))
Check398 = True
Check499 = True
Check490 = True
for k in range(0, len(classes)):
currentLine = ""
if classes[k]["hasLab"]:
currentLine = sectionTemplateLab.format(**classes[k])
elif classes[k]["isMultiRoom"]:
multiRoom = ""
bList = classes[k]["Building"].split("+")
dList = classes[k]["Days"].split("+")
rList = classes[k]["Room"].split("+")
lList = classes[k]["Location"].split("+")
tList = classes[k]["Time"].split("+")
for times in range(0, len(bList)):
multiRoom = multiRoom + "| {0}: {1} {2} {3} {4} \n ".format(
bList[times], rList[times], lList[times], dList[times], tList[times]
)
classes[k]["multiRoom"] = multiRoom
currentLine = sectionTemplateMultiRoom.format(**classes[k])
elif classes[k]["catNumber"] == "314" or classes[k]["catNumber"] == "315":
currentLine = comp314_315Template.format(**classes[k])
elif classes[k]["catNumber"] == "388" or classes[k]["catNumber"] == "488":
currentLine = topicsSectionTemplate.format(**classes[k])
elif (
"398" in classes[k]["catNumber"]
or "499" in classes[k]["catNumber"]
or "490" in classes[k]["catNumber"]
):
if "398" in classes[k]["catNumber"]:
if Check398:
currentLine = indepStudyTemplate.format("398")
Check398 = False
else:
currentLine = 0
if "490" in classes[k]["catNumber"]:
if Check499:
currentLine = indepStudyTemplate.format("499")
Check499 = False
else:
currentLine = 0
if "499" in classes[k]["catNumber"]:
if Check490:
currentLine = indepStudyTemplate.format("490")
Check490 = False
else:
currentLine = 0
else:
currentLine = sectionTemplate.format(**classes[k])
createHeading = False
if (
int(classes[k - 1]["catNumber"]) < 400
and int(classes[k]["catNumber"]) >= 400
):
createHeading = True
if currentLine != 0:
if "Lake" in classes[k]["Location"]:
if createHeading:
lcurrentLine = (
gradHeadingTemplate.format(season, "Lake Shore")
+ "\n"
+ currentLine
)
lakeRST.write(lcurrentLine + "\n")
else:
lakeRST.write(currentLine + "\n")
if "Water" in classes[k]["Location"]:
if createHeading:
wcurrentLine = (
gradHeadingTemplate.format(season, "Water Tower")
+ "\n"
+ currentLine
)
waterRST.write(wcurrentLine + "\n")
else:
waterRST.write(currentLine + "\n")
if "Online" in classes[k]["Location"]:
if createHeading:
ocurrentLine = (
gradHeadingTemplate.format(season, "Online")
+ "\n"
+ currentLine
)
onlineRST.write(ocurrentLine + "\n")
else:
onlineRST.write(currentLine + "\n")
if createHeading:
fcurrentLine = (
gradHeadingTemplate.format(season, "Fall") + "\n" + currentLine
)
mainRST.write(fcurrentLine + "\n")
else:
mainRST.write(currentLine + "\n")
mainRST.close()
onlineRST.close()
lakeRST.close()
waterRST.close()
def convertDays(days):
if days == "M":
return "Monday"
if days == "Tu":
return "Tuesday"
if days == "W":
return "Wednesday"
if days == "Th":
return "Thursday"
if days == "F":
return "Friday"
if days == "Sa":
return "Saturday"
if days == "MWF":
return "Monday, Wednesday, Friday"
if days == "TuTh":
return "Tuesday, Thursday"
if days == "MW":
return "Monday, Wednesday"
# the first few lines of the csv have the semester and yearself.
# once the parser gets to that row it parses it looking for the year.
def getYear(words, season):
for i in range(0, len(words)):
if words[i] == season:
return words[i + 1]
return str(now.year)
name = input("Enter file name.")
parseing(name)
| |
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
from skimage._shared._warnings import expected_warnings
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_almost_equal(x[..., i], y)
def test_estimate_transform():
for tform in ('similarity', 'affine', 'projective', 'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform.params[0, 0], tform.params[1, 1])
assert_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
scale = 0.1
rotation = np.pi / 2
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.shear, shear)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.shear, shear)
assert_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_almost_equal(tform(SRC), DST)
assert_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
assert tform.__class__ == ProjectiveTransform
tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
assert_almost_equal((tform + tform.inverse).params, np.eye(3))
def test_union_differing_types():
tform1 = SimilarityTransform()
tform2 = PolynomialTransform()
assert_raises(TypeError, tform1.__add__, tform2)
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
def test_degenerate():
src = dst = np.zeros((10, 2))
tform = SimilarityTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = AffineTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = ProjectiveTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| |
from sqlalchemy.testing import eq_, assert_raises_message, is_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import (
exc, sql, String, Integer, MetaData, and_, ForeignKey,
VARCHAR, INT, Sequence, func)
from sqlalchemy.testing.schema import Table, Column
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'}]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, 'jack'))
eq_(rows[1], (8, 'ed'))
users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, 'jack'))
eq_(rows[3], (10, 'ed'))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2 "
r"\[SQL: u?'INSERT INTO users",
users.insert().execute,
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{'user_id': 7},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id in zip(
table_.primary_key, result.inserted_primary_key):
ret[col.key] = id
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id for col, id in
zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if('sqlite')
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True)),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi'}
)
@testing.skip_if('sqlite')
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3", metadata,
Column("id", String(40), primary_key=True),
Column('foo', String(30), primary_key=True),
Column("bar", String(30))
),
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
)
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4", metadata,
Column(
'id', Integer,
Sequence('t4_id_seq', optional=True),
primary_key=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi', 'id': 1},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5", metadata,
Column('id', String(10), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'id': 'id1'},
{'id': 'id1', 'bar': 'hi'},
)
@testing.skip_if('sqlite')
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bar', Integer, primary_key=True)
),
{'bar': 0},
{'id': 1, 'bar': 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on('sqlite+pysqlite')
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
't', self.metadata, Column('x', Integer, primary_key=True),
Column('y', Integer))
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
'sqlite', "sqlite autoincremnt doesn't work with composite pks")
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
'related', metadata,
Column('id', Integer, primary_key=True),
mysql_engine='MyISAM'
)
t6 = Table(
"t6", metadata,
Column(
'manual_id', Integer, ForeignKey('related.id'),
primary_key=True),
Column(
'auto_id', Integer, primary_key=True,
test_needs_autoincrement=True),
mysql_engine='MyISAM'
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(users.insert().returning(
users.c.user_id, users.c.user_name))
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, 'inserted_primary_key'
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _fixture(self, types=True):
if types:
t = sql.table(
'foo', sql.column('id', Integer),
sql.column('data', String),
sql.column('x', Integer))
else:
t = sql.table(
'foo', sql.column('id'), sql.column('data'), sql.column('x'))
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().
order_by(self.tables.foo.c.id)).fetchall(),
data)
@testing.requires.sequences
def test_expicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
(1, 'data', 5)
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue")
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[None]
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{'data': 'd1', 'x': 5},
{'data': 'd2', 'x': 6},
{'data': 'd3', 'x': 7},
],
[
(1, 'd1', 5),
(2, 'd2', 6),
(3, 'd3', 7)
],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = index_lookup.TFIDF
INT = index_lookup.INT
BINARY = index_lookup.BINARY
COUNT = index_lookup.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1. standardize each sample (usually lowercasing + punctuation stripping)
2. split each sample into substrings (usually words)
3. recombine substrings into tokens (usually ngrams)
4. index tokens (associate a unique int value with each token)
5. transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`. This makes the callable
site natively compatible with `tf.strings.split()`.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token. When output == "int", 0 is reserved for masked locations;
this reduces the vocab size to max_tokens-2 instead of max_tokens-1
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> embedding_dims = 2
>>>
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a TextVectorization layer by passing a list
of vocabulary terms to the layer's __init__ method.
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
vocab_size = 0
# IndexLookup needs to keep track the current vocab size outside of its
# layer weights. We persist it as a hidden part of the config during
# serialization.
if "vocab_size" in kwargs:
vocab_size = kwargs["vocab_size"]
del kwargs["vocab_size"]
super(TextVectorization, self).__init__(
combiner=None,
**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
"TextVectorization").set(True)
mask_token = "" if output_mode in [None, INT] else None
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens,
mask_token=mask_token,
vocabulary=vocabulary,
pad_to_max_tokens=pad_to_max_tokens,
output_mode=output_mode if output_mode is not None else INT,
vocab_size=vocab_size)
def _get_index_lookup_class(self):
return string_lookup.StringLookup
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
if len(input_shape) == 1:
input_shape = tuple(input_shape) + (1,)
return tensor_shape.TensorShape(input_shape)
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
if len(input_shape) == 1:
input_shape = input_shape + [self._output_sequence_length]
else:
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64 if self._output_mode == INT else K.floatx()
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Args:
data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
data = array_ops.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._preprocess(data)
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 0:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, 0))
shape = dataset_ops.get_legacy_output_shapes(data)
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or an array as input, got {}".format(
type(data)))
self._index_lookup_layer.adapt(preprocessed_inputs)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._index_lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
"vocab_size": self._index_lookup_layer.vocab_size(),
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self, vocab, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through 'adapt'. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocab: An array of string tokens, or a path to a file containing one
token per line.
idf_weights: An array of document frequency data with equal length to
vocab. Only necessary if the layer output_mode is TFIDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
self._index_lookup_layer.set_vocabulary(vocab, idf_weights=idf_weights)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(updates[_VOCAB_NAME], idf_weights=updates[_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = array_ops.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
lookup_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if tf_utils.is_ragged(lookup_data):
dense_data = lookup_data.to_tensor(default_value=0)
else:
dense_data = lookup_data
if self._output_sequence_length is None:
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_shape = output_tensor.shape.as_list()
output_shape[-1] = self._output_sequence_length
output_tensor.set_shape(tensor_shape.TensorShape(output_shape))
return output_tensor
return lookup_data
| |
import math
import time
from distutils.version import LooseVersion
from cassandra import FunctionFailure
from assertions import assert_invalid, assert_none, assert_one
from dtest import CASSANDRA_VERSION_FROM_BUILD, Tester, debug
from tools import since
@since('2.2')
class TestUserFunctions(Tester):
def __init__(self, *args, **kwargs):
if CASSANDRA_VERSION_FROM_BUILD >= '3.0':
kwargs['cluster_options'] = {'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'}
else:
kwargs['cluster_options'] = {'enable_user_defined_functions': 'true'}
Tester.__init__(self, *args, **kwargs)
def prepare(self, create_keyspace=True, nodes=1, rf=1):
cluster = self.cluster
cluster.populate(nodes).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
session = self.patient_cql_connection(node1)
if create_keyspace:
self.create_ks(session, 'ks', rf)
return session
def test_migration(self):
""" Test migration of user functions """
cluster = self.cluster
# Uses 3 nodes just to make sure function mutations are correctly serialized
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
node2 = cluster.nodelist()[1]
node3 = cluster.nodelist()[2]
time.sleep(0.2)
# The latter three sessions use a whitelist policy, and then don't wait for schema agreement
# So we create `schema_wait_session` to use for schema agreement blocking, and DDL changes
schema_wait_session = self.patient_cql_connection(node1)
self.create_ks(schema_wait_session, 'ks', 1)
schema_wait_session.cluster.control_connection.wait_for_schema_agreement()
node1_session = self.patient_exclusive_cql_connection(node1, keyspace='ks')
node2_session = self.patient_exclusive_cql_connection(node2, keyspace='ks')
node3_session = self.patient_exclusive_cql_connection(node3, keyspace='ks')
schema_wait_session.execute("""
CREATE TABLE udf_kv (
key int primary key,
value double
);
""")
schema_wait_session.cluster.control_connection.wait_for_schema_agreement()
node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(1, 1))
node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(2, 2))
node1_session.execute("INSERT INTO udf_kv (key, value) VALUES ({}, {})".format(3, 3))
schema_wait_session.execute("""
create or replace function x_sin ( input double ) called on null input
returns double language java as 'if (input==null) return null;
return Double.valueOf(Math.sin(input.doubleValue()));'
""")
schema_wait_session.execute("""
create or replace function x_cos ( input double ) called on null input
returns double language java as 'if (input==null) return null;
return Double.valueOf(Math.cos(input.doubleValue()));'
""")
schema_wait_session.execute("""
create or replace function x_tan ( input double ) called on null input
returns double language java as 'if (input==null) return null;
return Double.valueOf(Math.tan(input.doubleValue()));'
""")
schema_wait_session.cluster.control_connection.wait_for_schema_agreement()
assert_one(node1_session,
"SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 1,
[1, 1.0, 0.8414709848078965, 0.5403023058681398, 1.5574077246549023])
assert_one(node2_session,
"SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 2,
[2, 2.0, math.sin(2.0), math.cos(2.0), math.tan(2.0)])
assert_one(node3_session,
"SELECT key, value, x_sin(value), x_cos(value), x_tan(value) FROM ks.udf_kv where key = %d" % 3,
[3, 3.0, math.sin(3.0), math.cos(3.0), math.tan(3.0)])
session4 = self.patient_cql_connection(node1)
# check that functions are correctly confined to namespaces
assert_invalid(session4,
"SELECT key, value, sin(value), cos(value), tan(value) FROM ks.udf_kv where key = 4",
"Unknown function 'sin'")
# try giving existing function bad input, should error
assert_invalid(node1_session,
"SELECT key, value, x_sin(key), foo_cos(KEYy), foo_tan(key) FROM ks.udf_kv where key = 1",
"Type error: key cannot be passed as argument 0 of function ks.x_sin of type double")
node2_session.execute("drop function x_sin")
node3_session.execute("drop function x_cos")
node1_session.execute("drop function x_tan")
schema_wait_session.cluster.control_connection.wait_for_schema_agreement()
assert_invalid(node1_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")
assert_invalid(node2_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")
assert_invalid(node3_session, "SELECT key, value, sin(value), cos(value), tan(value) FROM udf_kv where key = 1")
# try creating function returning the wrong type, should error
assert_invalid(node1_session,
"CREATE FUNCTION bad_sin ( input double ) CALLED ON NULL INPUT RETURNS uuid LANGUAGE java AS 'return Math.sin(input);';",
"Type mismatch: cannot convert from double to UUID")
def udf_overload_test(self):
session = self.prepare(nodes=3)
session.execute("CREATE TABLE tab (k text PRIMARY KEY, v int)")
session.execute("INSERT INTO tab (k, v) VALUES ('foo' , 1);")
# create overloaded udfs
session.execute("CREATE FUNCTION overloaded(v varchar) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")
session.execute("CREATE OR REPLACE FUNCTION overloaded(i int) called on null input RETURNS text LANGUAGE java AS 'return \"f2\";'")
session.execute("CREATE OR REPLACE FUNCTION overloaded(v1 text, v2 text) called on null input RETURNS text LANGUAGE java AS 'return \"f3\";'")
session.execute("CREATE OR REPLACE FUNCTION overloaded(v ascii) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'")
# ensure that works with correct specificity
assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded('foo')")
assert_none(session, "SELECT v FROM tab WHERE k = overloaded((text) 'foo')")
assert_none(session, "SELECT v FROM tab WHERE k = overloaded((ascii) 'foo')")
assert_none(session, "SELECT v FROM tab WHERE k = overloaded((varchar) 'foo')")
# try non-existent functions
assert_invalid(session, "DROP FUNCTION overloaded(boolean)")
assert_invalid(session, "DROP FUNCTION overloaded(bigint)")
# try dropping overloaded - should fail because ambiguous
assert_invalid(session, "DROP FUNCTION overloaded")
session.execute("DROP FUNCTION overloaded(varchar)")
assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo')")
session.execute("DROP FUNCTION overloaded(text, text)")
assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((text)'foo',(text)'bar')")
session.execute("DROP FUNCTION overloaded(ascii)")
assert_invalid(session, "SELECT v FROM tab WHERE k = overloaded((ascii)'foo')")
# should now work - unambiguous
session.execute("DROP FUNCTION overloaded")
def udf_scripting_test(self):
session = self.prepare()
session.execute("create table nums (key int primary key, val double);")
for x in range(1, 4):
session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x)))
session.execute("CREATE FUNCTION x_sin(val double) called on null input returns double language javascript as 'Math.sin(val)'")
assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 1, [1, 1.0, math.sin(1.0)])
assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 2, [2, 2.0, math.sin(2.0)])
assert_one(session, "SELECT key, val, x_sin(val) FROM nums where key = %d" % 3, [3, 3.0, math.sin(3.0)])
session.execute("create function y_sin(val double) called on null input returns double language javascript as 'Math.sin(val).toString()'")
assert_invalid(session, "select y_sin(val) from nums where key = 1", expected=FunctionFailure)
assert_invalid(session, "create function compilefail(key int) called on null input returns double language javascript as 'foo bar';")
session.execute("create function plustwo(key int) called on null input returns double language javascript as 'key+2'")
assert_one(session, "select plustwo(key) from nums where key = 3", [5])
def default_aggregate_test(self):
session = self.prepare()
session.execute("create table nums (key int primary key, val double);")
for x in range(1, 10):
session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, float(x)))
assert_one(session, "SELECT min(key) FROM nums", [1])
assert_one(session, "SELECT max(val) FROM nums", [9.0])
assert_one(session, "SELECT sum(key) FROM nums", [45])
assert_one(session, "SELECT avg(val) FROM nums", [5.0])
assert_one(session, "SELECT count(*) FROM nums", [9])
def aggregate_udf_test(self):
session = self.prepare()
session.execute("create table nums (key int primary key, val int);")
for x in range(1, 4):
session.execute("INSERT INTO nums (key, val) VALUES (%d, %d)" % (x, x))
session.execute("create function plus(key int, val int) called on null input returns int language java as 'return Integer.valueOf(key.intValue() + val.intValue());'")
session.execute("create function stri(key int) called on null input returns text language java as 'return key.toString();'")
session.execute("create aggregate suma (int) sfunc plus stype int finalfunc stri initcond 10")
assert_one(session, "select suma(val) from nums", ["16"])
session.execute("create function test(a int, b double) called on null input returns int language javascript as 'a + b;'")
session.execute("create aggregate aggy(double) sfunc test stype int")
assert_invalid(session, "create aggregate aggtwo(int) sfunc aggy stype int")
assert_invalid(session, "create aggregate aggthree(int) sfunc test stype int finalfunc aggtwo")
def udf_with_udt_test(self):
"""
Test UDFs that operate on non-frozen UDTs.
@jira_ticket CASSANDRA-7423
@since 3.6
"""
session = self.prepare()
session.execute("create type test (a text, b int);")
session.execute("create function funk(udt test) called on null input returns int language java as 'return Integer.valueOf(udt.getInt(\"b\"));';")
if LooseVersion(self.cluster.version()) >= LooseVersion('3.6'):
frozen_vals = (False, True)
else:
frozen_vals = (True,)
for frozen in frozen_vals:
debug("Using {} UDTs".format("frozen" if frozen else "non-frozen"))
table_name = "tab_frozen" if frozen else "tab"
column_type = "frozen<test>" if frozen else "test"
session.execute("create table {} (key int primary key, udt {});".format(table_name, column_type))
session.execute("insert into %s (key, udt) values (1, {a: 'un', b:1});" % (table_name,))
session.execute("insert into %s (key, udt) values (2, {a: 'deux', b:2});" % (table_name,))
session.execute("insert into %s (key, udt) values (3, {a: 'trois', b:3});" % (table_name,))
assert_one(session, "select sum(funk(udt)) from {}".format(table_name), [6])
assert_invalid(session, "drop type test;")
@since('2.2')
def udf_with_udt_keyspace_isolation_test(self):
"""
Ensure functions dont allow a UDT from another keyspace
@jira_ticket CASSANDRA-9409
@since 2.2
"""
session = self.prepare()
session.execute("create type udt (a text, b int);")
self.create_ks(session, 'user_ks', 1)
# ensure we cannot use a udt from another keyspace as function argument
assert_invalid(
session,
"CREATE FUNCTION overloaded(v ks.udt) called on null input RETURNS text LANGUAGE java AS 'return \"f1\";'",
"Statement on keyspace user_ks cannot refer to a user type in keyspace ks"
)
# ensure we cannot use a udt from another keyspace as return value
assert_invalid(
session,
("CREATE FUNCTION test(v text) called on null input RETURNS ks.udt "
"LANGUAGE java AS 'return null;';"),
"Statement on keyspace user_ks cannot refer to a user type in keyspace ks"
)
def aggregate_with_udt_keyspace_isolation_test(self):
"""
Ensure aggregates dont allow a UDT from another keyspace
@jira_ticket CASSANDRA-9409
"""
session = self.prepare()
session.execute("create type udt (a int);")
self.create_ks(session, 'user_ks', 1)
assert_invalid(
session,
"create aggregate suma (ks.udt) sfunc plus stype int finalfunc stri initcond 10",
"Statement on keyspace user_ks cannot refer to a user type in keyspace ks"
)
| |
#!/usr/bin/python
import sqlite3
import os.path
from os import makedirs
import sys
import re
from subprocess import call
import argparse
import textwrap
import csv
import StringIO
def list_tasks(dbname = "results.db"):
"""Return a list of all task names in a database"""
db = connect_db(dbname)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
return [row[0] for row in cursor.fetchall()]
def describe_tasks(tasks, dbname = "results.db"):
"""Return a list of all shared parameters for some tasks of a database"""
db = connect_db(dbname)
cursor = db.cursor()
if not isinstance(tasks,list):
tasks = [tasks]
# only the shared columns will remain
shared_params = []
for task in tasks:
query_command = "pragma table_info([{}])".format(task);
cursor.execute(query_command);
task_params = [" ".join((row[1], row[2])) for row in cursor.fetchall()]
if shared_params:
shared_params = intersection(shared_params, task_params)
else:
shared_params = task_params
return shared_params
def retrieve_data(x_param, y_param, filters, tasks, dbname = "results.db"):
"""
Return a list of selected parameters and a data structure (list of list of tuples),
- 1st index corresponds to the task,
- 2nd index corresponds to the row,
- 3rd index corresponds to the selected parameter.
The key parameters that define a benchmark are always selected.
"""
db = connect_db(dbname)
data = []
cols_to_select = [x_param.split()[0], y_param.split()[0]]
# always pass shared primary key information (they define a distinct benchmark)
primary_keys = []
for t in range(len(tasks)):
if t == 0:
primary_keys = retrieve_primary_keys(tasks[t], db)
else:
primary_keys = intersection(primary_keys, retrieve_primary_keys(tasks[t], db))
for key in primary_keys:
if key not in cols_to_select:
cols_to_select.append(key)
# also pass filter parameter value in
for f in filters:
if f.param not in cols_to_select:
cols_to_select.append(f.param)
sql_val_args = []
filter_command = ""
for t in range(len(tasks)):
select_command = "SELECT DISTINCT {} FROM {} ".format(','.join(cols_to_select), task_name(tasks[t]))
if filters:
# first time, still need to populate sql_val_args and make filter_command
if t == 0:
filter_command = "WHERE "
for f in range(len(filters)):
filter_command += str(filters[f])
sql_val_args.extend(filters[f].args)
if f < len(filters) - 1:
filter_command += " AND "
select_command += filter_command
select_command += ';'
print(select_command)
cursor = db.cursor()
cursor.execute(select_command, sql_val_args)
data.append(tuple(tuple(row) for row in cursor.fetchall()));
return cols_to_select, data
def export_data_csv(selected_cols, data):
"""
Exports retrieved data as in-memory csv files.
Each task will be a separate csv file, with the naming left to the caller.
The first row will be the header for the selected columns, the rest will be values.
"""
for task_data in data:
csvf = StringIO.StringIO()
writer = csv.writer(csvf)
# header information
writer.writerow(selected_cols)
for row in task_data:
writer.writerow(row)
yield csvf
def export_data_csv_todisk(selected_cols, data, tasks, dir = "benchtracker_data"):
"""
Exports retrieved data as csv files on export_data_csv_todisk
Each task is a separate file with their full task name, '/' being replaced by '.'
"""
if not os.path.exists(dir):
makedirs(dir)
t = 0
for csvf in export_data_csv(selected_cols, data):
with open("".join([dir, '/', tasks[t].replace('/','.'), '.csv']), 'w') as f:
csvf.seek(0)
buf = csvf.read(1048576) # 1 MB
while buf:
f.write(buf)
buf = csvf.read(1048576)
t += 1
def describe_param(param, mode, tasks, dbname = "results.db"):
"""
Give back metainformation about a parameter to allow for easier filtering.
Param would be an element of the list returned by describe_tasks - space separated name and type
Returns a 2-tuple describing the parameter type and values for some tasks of a database.
- 1st value is either 'range' or 'categorical'
- 2nd value is either a 2-tuple for range types, or a n-tuple for categorical
"""
db = connect_db(dbname)
cursor = db.cursor()
(param_name, param_type) = param.split()
if param_type == "TEXT":
mode = 'categorical'
elif mode not in {'categorical', 'range'}:
raise ValueError
subquery = ""
min_param = "min_p"
max_param = "max_p"
if not isinstance(tasks,list):
subquery = task_name(tasks)
min_param = max_param = param_name
else:
subquery += '('
for t in range(len(tasks)):
if mode == "categorical":
subquery += "SELECT DISTINCT {} FROM {}".format(param_name, task_name(tasks[t]))
else:
subquery += "SELECT MIN({0}) as min_p, MAX({0}) as max_p FROM {1}".format(param_name, task_name(tasks[t]))
if t < len(tasks) - 1:
subquery += " UNION ALL "
subquery += ')'
print(subquery)
# categorical data, return a list of all distinct values
if mode == 'categorical':
cursor.execute("SELECT DISTINCT {} FROM {};".format(param_name, subquery))
return (mode,tuple(row[0] for row in cursor.fetchall()))
# ranged data, return (min, max)
else:
cursor.execute("SELECT MIN({}), MAX({}) FROM {};".format(min_param, max_param, subquery))
return (mode,tuple(cursor.fetchone()))
def connect_db(dbname = "results.db"):
"""Attempt a database connection, exiting with 1 if dbname does not exist, else return with db connection"""
if not os.path.isfile(dbname):
print("{} does not exist".format(dbname))
raise IOError(dbname)
db = sqlite3.connect(dbname)
db.row_factory = sqlite3.Row
return db
# filter object
valid_filter_methods = {"IN", "BETWEEN", "LIKE", "=", "<>", "!=", ">", "<", ">=", "<="}
class Task_filter:
def __init__(self, param, method, args):
self.param = param
if method.upper() in valid_filter_methods:
self.method = method.upper()
else:
print(method, "is not a supported filter method")
raise ValueError
self.args = args
def __str__(self):
substitutions = sql_substitute(self.args)
if self.method == "BETWEEN":
substitutions = "? AND ?"
elif self.method == "IN":
substitutions = '('+substitutions+')'
return "({} {} {})".format(self.param, self.method, substitutions)
# internal utilities
def task_name(task):
return '['+task+']'
def intersection(first, other):
intersection_set = set.intersection(set(first), set(other))
# reimpose order
return [item for item in first if item in intersection_set]
def sql_substitute(args):
return ('?,'*len(args)).rstrip(',')
def retrieve_primary_keys(task, db):
cursor = db.cursor()
cursor.execute("PRAGMA table_info(%s)" % task_name(task))
column_info = cursor.fetchall()
primary_keys = []
for info in column_info:
print(info)
if info[5] != 0:
print("key param:", info[1])
primary_keys.append(info[1])
return primary_keys
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_database_advisor_request(
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"advisorName": _SERIALIZER.url("advisor_name", advisor_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
recommended_action_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions/{recommendedActionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"advisorName": _SERIALIZER.url("advisor_name", advisor_name, 'str'),
"recommendedActionName": _SERIALIZER.url("recommended_action_name", recommended_action_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
recommended_action_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions/{recommendedActionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"advisorName": _SERIALIZER.url("advisor_name", advisor_name, 'str'),
"recommendedActionName": _SERIALIZER.url("recommended_action_name", recommended_action_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class DatabaseRecommendedActionsOperations(object):
"""DatabaseRecommendedActionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_database_advisor(
self,
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
**kwargs: Any
) -> List["_models.RecommendedAction"]:
"""Gets list of Database Recommended Actions.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param advisor_name: The name of the Database Advisor.
:type advisor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of RecommendedAction, or the result of cls(response)
:rtype: list[~azure.mgmt.sql.models.RecommendedAction]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RecommendedAction"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_by_database_advisor_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
advisor_name=advisor_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database_advisor.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[RecommendedAction]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_database_advisor.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
recommended_action_name: str,
**kwargs: Any
) -> "_models.RecommendedAction":
"""Gets a database recommended action.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param advisor_name: The name of the Database Advisor.
:type advisor_name: str
:param recommended_action_name: The name of Database Recommended Action.
:type recommended_action_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecommendedAction, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.RecommendedAction
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecommendedAction"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
advisor_name=advisor_name,
recommended_action_name=recommended_action_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecommendedAction', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions/{recommendedActionName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
advisor_name: str,
recommended_action_name: str,
parameters: "_models.RecommendedAction",
**kwargs: Any
) -> "_models.RecommendedAction":
"""Updates a database recommended action.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param advisor_name: The name of the Database Advisor.
:type advisor_name: str
:param recommended_action_name: The name of Database Recommended Action.
:type recommended_action_name: str
:param parameters: The requested recommended action resource state.
:type parameters: ~azure.mgmt.sql.models.RecommendedAction
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecommendedAction, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.RecommendedAction
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecommendedAction"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RecommendedAction')
request = build_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
advisor_name=advisor_name,
recommended_action_name=recommended_action_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecommendedAction', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}/recommendedActions/{recommendedActionName}'} # type: ignore
| |
#!/usr/bin/env python
# test_copy.py - unit test for COPY support
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
import string
from .testutils import unittest, ConnectingTestCase, decorate_all_tests
from .testutils import skip_if_no_iobase, skip_before_postgres
from io import StringIO
from itertools import cycle
from subprocess import Popen, PIPE
import psycopg2
import psycopg2.extensions
from .testutils import skip_copy_if_green, script_to_py3
from .testconfig import dsn
if sys.version_info[0] < 3:
_base = object
else:
from io import TextIOBase as _base
class MinimalRead(_base):
"""A file wrapper exposing the minimal interface to copy from."""
def __init__(self, f):
self.f = f
def read(self, size):
return self.f.read(size)
def readline(self):
return self.f.readline()
class MinimalWrite(_base):
"""A file wrapper exposing the minimal interface to copy to."""
def __init__(self, f):
self.f = f
def write(self, data):
return self.f.write(data)
class CopyTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self._create_temp_table()
def _create_temp_table(self):
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE tcopy (
id serial PRIMARY KEY,
data text
)''')
def test_copy_from(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
finally:
curs.close()
def test_copy_from_insane_size(self):
# Trying to trigger a "would block" error
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=10*1024, srec=10*1024,
copykw={'size': 20*1024*1024})
finally:
curs.close()
def test_copy_from_cols(self):
curs = self.conn.cursor()
f = StringIO()
for i in range(10):
f.write("%s\n" % (i,))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", columns=['id'])
curs.execute("select * from tcopy order by id")
self.assertEqual([(i, None) for i in range(10)], curs.fetchall())
def test_copy_from_cols_err(self):
curs = self.conn.cursor()
f = StringIO()
for i in range(10):
f.write("%s\n" % (i,))
f.seek(0)
def cols():
raise ZeroDivisionError()
yield 'id'
self.assertRaises(ZeroDivisionError,
curs.copy_from, MinimalRead(f), "tcopy", columns=cols())
def test_copy_to(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
self._copy_to(curs, srec=10*1024)
finally:
curs.close()
@skip_if_no_iobase
def test_copy_text(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 256))))
about = abin.decode('latin1').replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 256))).decode('latin1')
about = abin.replace('\\', '\\\\')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.StringIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_bytes(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 255))))
about = abin.replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 255))).decode('latin1')
about = abin.replace('\\', '\\\\').encode('latin1')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.BytesIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_expert_textiobase(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 256))))
abin = abin.decode('latin1')
about = abin.replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 256))).decode('latin1')
about = abin.replace('\\', '\\\\')
import io
f = io.StringIO()
f.write(about)
f.seek(0)
curs = self.conn.cursor()
psycopg2.extensions.register_type(
psycopg2.extensions.UNICODE, curs)
curs.copy_expert('COPY tcopy (data) FROM STDIN', f)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
f = io.StringIO()
curs.copy_expert('COPY tcopy (data) TO STDOUT', f)
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
# same tests with setting size
f = io.StringIO()
f.write(about)
f.seek(0)
exp_size = 123
# hack here to leave file as is, only check size when reading
real_read = f.read
def read(_size, f=f, exp_size=exp_size):
self.assertEqual(_size, exp_size)
return real_read(_size)
f.read = read
curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
def _copy_from(self, curs, nrecs, srec, copykw):
f = StringIO()
for i, c in zip(range(nrecs), cycle(string.ascii_letters)):
l = c * srec
f.write("%s\t%s\n" % (i,l))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", **copykw)
curs.execute("select count(*) from tcopy")
self.assertEqual(nrecs, curs.fetchone()[0])
curs.execute("select data from tcopy where id < %s order by id",
(len(string.ascii_letters),))
for i, (l,) in enumerate(curs):
self.assertEqual(l, string.ascii_letters[i] * srec)
def _copy_to(self, curs, srec):
f = StringIO()
curs.copy_to(MinimalWrite(f), "tcopy")
f.seek(0)
ntests = 0
for line in f:
n, s = line.split()
if int(n) < len(string.ascii_letters):
self.assertEqual(s, string.ascii_letters[int(n)] * srec)
ntests += 1
self.assertEqual(ntests, len(string.ascii_letters))
def test_copy_expert_file_refcount(self):
class Whatever(object):
pass
f = Whatever()
curs = self.conn.cursor()
self.assertRaises(TypeError,
curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f)
def test_copy_no_column_limit(self):
cols = [ "c%050d" % i for i in range(200) ]
curs = self.conn.cursor()
curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join(
[ "%s int" % c for c in cols]))
curs.execute("INSERT INTO manycols DEFAULT VALUES")
f = StringIO()
curs.copy_to(f, "manycols", columns = cols)
f.seek(0)
self.assertEqual(f.read().split(), ['\\N'] * len(cols))
f.seek(0)
curs.copy_from(f, "manycols", columns = cols)
curs.execute("select count(*) from manycols;")
self.assertEqual(curs.fetchone()[0], 2)
@skip_before_postgres(8, 2) # they don't send the count
def test_copy_rowcount(self):
curs = self.conn.cursor()
curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data'])
self.assertEqual(curs.rowcount, 3)
curs.copy_expert(
"copy tcopy (data) from stdin",
StringIO('ddd\neee\n'))
self.assertEqual(curs.rowcount, 2)
curs.copy_to(StringIO(), "tcopy")
self.assertEqual(curs.rowcount, 5)
curs.execute("insert into tcopy (data) values ('fff')")
curs.copy_expert("copy tcopy to stdout", StringIO())
self.assertEqual(curs.rowcount, 6)
def test_copy_rowcount_error(self):
curs = self.conn.cursor()
curs.execute("insert into tcopy (data) values ('fff')")
self.assertEqual(curs.rowcount, 1)
self.assertRaises(psycopg2.DataError,
curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy')
self.assertEqual(curs.rowcount, -1)
def test_copy_from_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf from stdin")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % { 'dsn': dsn,})
proc = Popen([sys.executable, '-c', script_to_py3(script)])
proc.communicate()
self.assertEqual(0, proc.returncode)
def test_copy_to_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf to stdout")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % { 'dsn': dsn,})
proc = Popen([sys.executable, '-c', script_to_py3(script)], stdout=PIPE)
proc.communicate()
self.assertEqual(0, proc.returncode)
def test_copy_from_propagate_error(self):
class BrokenRead(_base):
def read(self, size):
return 1/0
def readline(self):
return 1/0
curs = self.conn.cursor()
# It seems we cannot do this, but now at least we propagate the error
# self.assertRaises(ZeroDivisionError,
# curs.copy_from, BrokenRead(), "tcopy")
try:
curs.copy_from(BrokenRead(), "tcopy")
except Exception as e:
self.assertTrue('ZeroDivisionError' in str(e))
def test_copy_to_propagate_error(self):
class BrokenWrite(_base):
def write(self, data):
return 1/0
curs = self.conn.cursor()
curs.execute("insert into tcopy values (10, 'hi')")
self.assertRaises(ZeroDivisionError,
curs.copy_to, BrokenWrite(), "tcopy")
decorate_all_tests(CopyTests, skip_copy_if_green)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
from urllib.parse import quote_plus
from plexapi import utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, Unsupported
class PlayQueue(PlexObject):
"""Control a PlayQueue.
Attributes:
TAG (str): 'PlayQueue'
TYPE (str): 'playqueue'
identifier (str): com.plexapp.plugins.library
items (list): List of :class:`~plexapi.media.Media` or :class:`~plexapi.playlist.Playlist`
mediaTagPrefix (str): Fx /system/bundle/media/flags/
mediaTagVersion (int): Fx 1485957738
playQueueID (int): ID of the PlayQueue.
playQueueLastAddedItemID (int):
Defines where the "Up Next" region starts. Empty unless PlayQueue is modified after creation.
playQueueSelectedItemID (int): The queue item ID of the currently selected item.
playQueueSelectedItemOffset (int):
The offset of the selected item in the PlayQueue, from the beginning of the queue.
playQueueSelectedMetadataItemID (int): ID of the currently selected item, matches ratingKey.
playQueueShuffled (bool): True if shuffled.
playQueueSourceURI (str): Original URI used to create the PlayQueue.
playQueueTotalCount (int): How many items in the PlayQueue.
playQueueVersion (int): Version of the PlayQueue. Increments every time a change is made to the PlayQueue.
selectedItem (:class:`~plexapi.media.Media`): Media object for the currently selected item.
_server (:class:`~plexapi.server.PlexServer`): PlexServer associated with the PlayQueue.
size (int): Alias for playQueueTotalCount.
"""
TAG = "PlayQueue"
TYPE = "playqueue"
def _loadData(self, data):
self._data = data
self.identifier = data.attrib.get("identifier")
self.mediaTagPrefix = data.attrib.get("mediaTagPrefix")
self.mediaTagVersion = utils.cast(int, data.attrib.get("mediaTagVersion"))
self.playQueueID = utils.cast(int, data.attrib.get("playQueueID"))
self.playQueueLastAddedItemID = utils.cast(
int, data.attrib.get("playQueueLastAddedItemID")
)
self.playQueueSelectedItemID = utils.cast(
int, data.attrib.get("playQueueSelectedItemID")
)
self.playQueueSelectedItemOffset = utils.cast(
int, data.attrib.get("playQueueSelectedItemOffset")
)
self.playQueueSelectedMetadataItemID = utils.cast(
int, data.attrib.get("playQueueSelectedMetadataItemID")
)
self.playQueueShuffled = utils.cast(
bool, data.attrib.get("playQueueShuffled", 0)
)
self.playQueueSourceURI = data.attrib.get("playQueueSourceURI")
self.playQueueTotalCount = utils.cast(
int, data.attrib.get("playQueueTotalCount")
)
self.playQueueVersion = utils.cast(int, data.attrib.get("playQueueVersion"))
self.size = utils.cast(int, data.attrib.get("size", 0))
self.items = self.findItems(data)
self.selectedItem = self[self.playQueueSelectedItemOffset]
def __getitem__(self, key):
if not self.items:
return None
return self.items[key]
def __len__(self):
return self.playQueueTotalCount
def __iter__(self):
yield from self.items
def __contains__(self, media):
"""Returns True if the PlayQueue contains the provided media item."""
return any(x.playQueueItemID == media.playQueueItemID for x in self.items)
def getQueueItem(self, item):
"""
Accepts a media item and returns a similar object from this PlayQueue.
Useful for looking up playQueueItemIDs using items obtained from the Library.
"""
matches = [x for x in self.items if x == item]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise BadRequest(
"{item} occurs multiple times in this PlayQueue, provide exact item".format(item=item)
)
else:
raise BadRequest("{item} not valid for this PlayQueue".format(item=item))
@classmethod
def get(
cls,
server,
playQueueID,
own=False,
center=None,
window=50,
includeBefore=True,
includeAfter=True,
):
"""Retrieve an existing :class:`~plexapi.playqueue.PlayQueue` by identifier.
Parameters:
server (:class:`~plexapi.server.PlexServer`): Server you are connected to.
playQueueID (int): Identifier of an existing PlayQueue.
own (bool, optional): If server should transfer ownership.
center (int, optional): The playQueueItemID of the center of the window. Does not change selectedItem.
window (int, optional): Number of items to return from each side of the center item.
includeBefore (bool, optional):
Include items before the center, defaults True. Does not include center if False.
includeAfter (bool, optional):
Include items after the center, defaults True. Does not include center if False.
"""
args = {
"own": utils.cast(int, own),
"window": window,
"includeBefore": utils.cast(int, includeBefore),
"includeAfter": utils.cast(int, includeAfter),
}
if center:
args["center"] = center
path = "/playQueues/{playQueueID}{args}".format(playQueueID=playQueueID, args=utils.joinArgs(args))
data = server.query(path, method=server._session.get)
c = cls(server, data, initpath=path)
c._server = server
return c
@classmethod
def create(
cls,
server,
items,
startItem=None,
shuffle=0,
repeat=0,
includeChapters=1,
includeRelated=1,
continuous=0,
):
"""Create and return a new :class:`~plexapi.playqueue.PlayQueue`.
Parameters:
server (:class:`~plexapi.server.PlexServer`): Server you are connected to.
items (:class:`~plexapi.media.Media` or :class:`~plexapi.playlist.Playlist`):
A media item, list of media items, or Playlist.
startItem (:class:`~plexapi.media.Media`, optional):
Media item in the PlayQueue where playback should begin.
shuffle (int, optional): Start the playqueue shuffled.
repeat (int, optional): Start the playqueue shuffled.
includeChapters (int, optional): include Chapters.
includeRelated (int, optional): include Related.
continuous (int, optional): include additional items after the initial item.
For a show this would be the next episodes, for a movie it does nothing.
"""
args = {
"includeChapters": includeChapters,
"includeRelated": includeRelated,
"repeat": repeat,
"shuffle": shuffle,
"continuous": continuous,
}
if isinstance(items, list):
item_keys = ",".join([str(x.ratingKey) for x in items])
uri_args = quote_plus("/library/metadata/{item_keys}".format(item_keys=item_keys))
args["uri"] = "library:///directory/{uri_args}".format(uri_args=uri_args)
args["type"] = items[0].listType
elif items.type == "playlist":
args["playlistID"] = items.ratingKey
args["type"] = items.playlistType
else:
uuid = items.section().uuid
args["type"] = items.listType
args["uri"] = "library://{uuid}/item/{key}".format(uuid=uuid, key=items.key)
if startItem:
args["key"] = startItem.key
path = "/playQueues{args}".format(args=utils.joinArgs(args))
data = server.query(path, method=server._session.post)
c = cls(server, data, initpath=path)
c.playQueueType = args["type"]
c._server = server
return c
def addItem(self, item, playNext=False, refresh=True):
"""
Append the provided item to the "Up Next" section of the PlayQueue.
Items can only be added to the section immediately following the current playing item.
Parameters:
item (:class:`~plexapi.media.Media` or :class:`~plexapi.playlist.Playlist`): Single media item or Playlist.
playNext (bool, optional): If True, add this item to the front of the "Up Next" section.
If False, the item will be appended to the end of the "Up Next" section.
Only has an effect if an item has already been added to the "Up Next" section.
See https://support.plex.tv/articles/202188298-play-queues/ for more details.
refresh (bool, optional): Refresh the PlayQueue from the server before updating.
"""
if refresh:
self.refresh()
args = {}
if item.type == "playlist":
args["playlistID"] = item.ratingKey
itemType = item.playlistType
else:
uuid = item.section().uuid
itemType = item.listType
args["uri"] = "library://{uuid}/item{key}".format(uuid=uuid, key=item.key)
if itemType != self.playQueueType:
raise Unsupported("Item type does not match PlayQueue type")
if playNext:
args["next"] = 1
path = "/playQueues/{playQueueID}{args}".format(playQueueID=self.playQueueID, args=utils.joinArgs(args))
data = self._server.query(path, method=self._server._session.put)
self._loadData(data)
def moveItem(self, item, after=None, refresh=True):
"""
Moves an item to the beginning of the PlayQueue. If `after` is provided,
the item will be placed immediately after the specified item.
Parameters:
item (:class:`~plexapi.base.Playable`): An existing item in the PlayQueue to move.
afterItemID (:class:`~plexapi.base.Playable`, optional): A different item in the PlayQueue.
If provided, `item` will be placed in the PlayQueue after this item.
refresh (bool, optional): Refresh the PlayQueue from the server before updating.
"""
args = {}
if refresh:
self.refresh()
if item not in self:
item = self.getQueueItem(item)
if after:
if after not in self:
after = self.getQueueItem(after)
args["after"] = after.playQueueItemID
path = "/playQueues/{playQueueID}/items/{playQueueItemID}/move{args}".format(
playQueueID=self.playQueueID, playQueueItemID=item.playQueueItemID, args=utils.joinArgs(args)
)
data = self._server.query(path, method=self._server._session.put)
self._loadData(data)
def removeItem(self, item, refresh=True):
"""Remove an item from the PlayQueue.
Parameters:
item (:class:`~plexapi.base.Playable`): An existing item in the PlayQueue to move.
refresh (bool, optional): Refresh the PlayQueue from the server before updating.
"""
if refresh:
self.refresh()
if item not in self:
item = self.getQueueItem(item)
path = "/playQueues/{playQueueID}/items/{playQueueItemID}".format(
playQueueID=self.playQueueID, playQueueItemID=item.playQueueItemID
)
data = self._server.query(path, method=self._server._session.delete)
self._loadData(data)
def clear(self):
"""Remove all items from the PlayQueue."""
path = "/playQueues/{playQueueID}/items".format(playQueueID=self.playQueueID)
data = self._server.query(path, method=self._server._session.delete)
self._loadData(data)
def refresh(self):
"""Refresh the PlayQueue from the Plex server."""
path = "/playQueues/{playQueueID}".format(playQueueID=self.playQueueID)
data = self._server.query(path, method=self._server._session.get)
self._loadData(data)
| |
# test_parser.py
# By Ben Anderson
# December 2018
import unittest
from unittest import TestCase
from typing import cast
from parser import Parser
from parser import Tokens, Tk
from parser import StorageClass, TypeSpecifier, TypeQualifier, \
FunctionSpecifier, StructSpecifier, UnionSpecifier, EnumSpecifier, \
DeclaratorPointerPart, DeclaratorFunctionPart, DeclaratorArrayPart, \
DeclarationList, Declaration, InitializerList, StructDesignator, \
ArrayDesignator
from parser import ExpressionList, TernaryExpression, BinaryExpression, \
CastExpression, SizeofExpression, InitializerExpression, UnaryExpression, \
PostfixExpression, ArrayAccessExpression, FunctionCallExpression, \
FieldAccessExpression, SymbolExpression, ConstantExpression, \
BinaryOperator, UnaryOperator
from parser import CompoundStatement, ExpressionStatement, IfStatementChain, \
IfStatement, SwitchStatement, CaseStatement, DefaultStatement, \
WhileStatement, DoWhileStatement, ForStatement, ContinueStatement, \
BreakStatement, ReturnStatement, GotoStatement, LabelStatement
# ******************************************************************************
# Declarator Tests
# ******************************************************************************
class TestDeclarationSpecifiers(TestCase):
def test_storage_class(self):
t = Tokens("", "int; typedef int; int register")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.TYPEDEF)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.REGISTER)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
def test_type_qualifiers(self):
t = Tokens("", "const int; int const; const int restrict; "
"const volatile int restrict; int typedef volatile")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST, TypeQualifier.RESTRICT})
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST, TypeQualifier.VOLATILE,
TypeQualifier.RESTRICT})
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.TYPEDEF)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.VOLATILE})
self.assertEqual(s.function_specifiers, [])
def test_function_specifiers(self):
t = Tokens("", "inline void; void inline")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.VOID)
self.assertEqual(s.type_qualifiers, [])
f = set([x.type for x in s.function_specifiers])
self.assertEqual(f, {FunctionSpecifier.INLINE})
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.VOID)
self.assertEqual(s.type_qualifiers, [])
f = set([x.type for x in s.function_specifiers])
self.assertEqual(f, {FunctionSpecifier.INLINE})
def test_type_specifiers(self):
t = Tokens("", "int; unsigned; signed int; short; short int; "
"unsigned short int; extern int unsigned; "
"static long int; long long int; long int long; "
"long static long int const")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.UINT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.SHORT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.SHORT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.USHORT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.EXTERN)
self.assertEqual(s.type_specifier.type, TypeSpecifier.UINT)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.STATIC)
self.assertEqual(s.type_specifier.type, TypeSpecifier.LONG)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.LLONG)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertTrue(s.storage_class is None)
self.assertEqual(s.type_specifier.type, TypeSpecifier.LLONG)
self.assertEqual(s.type_qualifiers, [])
self.assertEqual(s.function_specifiers, [])
t.next() # Skip the semicolon
s = p.parse_declaration_specifiers()
self.assertEqual(s.storage_class.type, StorageClass.STATIC)
self.assertEqual(s.type_specifier.type, TypeSpecifier.LLONG)
q = set([x.type for x in s.type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertEqual(s.function_specifiers, [])
class TestPointerDeclarators(TestCase):
def test_no_pointer(self):
t = Tokens("", "a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 0)
def test_single_pointer(self):
t = Tokens("", "*a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
def test_double_pointer(self):
t = Tokens("", "**a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
def test_single_pointer_with_qualifier(self):
t = Tokens("", "*const a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
def test_qualifier_order(self):
t = Tokens("", "*const *a; **const a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
q = set([x.type for x in d.parts[1].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
t.next() # Skip the semicolon
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertEqual(len(d.parts), 2)
def test_double_pointer_with_qualifiers(self):
t = Tokens("", "*const *restrict a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.RESTRICT})
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
q = set([x.type for x in d.parts[1].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
def test_multiple_qualifiers(self):
t = Tokens("", "*const restrict a")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST, TypeQualifier.RESTRICT})
class TestFunctionDeclarators(TestCase):
def test_no_args(self):
t = Tokens("", "a(); a(void)")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 0)
t.next() # Skip the semicolon
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 0)
def test_one_arg(self):
t = Tokens("", "a(int); a(int b)")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 1)
arg = d.parts[0].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator is None)
t.next() # Skip the semicolon
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 1)
arg = d.parts[0].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator.name.type, Tk.IDENT)
self.assertTrue(arg.declarator.name.contents, "b")
self.assertEqual(len(arg.declarator.parts), 0)
def test_redundant_parentheses(self):
t = Tokens("", "((((a(int (b))))))")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 1)
arg = d.parts[0].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator.name.type, Tk.IDENT)
self.assertTrue(arg.declarator.name.contents, "b")
self.assertEqual(len(arg.declarator.parts), 0)
def test_two_args(self):
t = Tokens("", "a(int, char); a(int b, char c)")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 2)
arg = d.parts[0].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator is None)
arg = d.parts[0].args[1]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.CHAR)
self.assertTrue(arg.declarator is None)
t.next() # Skip the semicolon
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 2)
arg = d.parts[0].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator.name.type, Tk.IDENT)
self.assertTrue(arg.declarator.name.contents, "b")
self.assertEqual(len(arg.declarator.parts), 0)
arg = d.parts[0].args[1]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.CHAR)
self.assertTrue(arg.declarator.name.type, Tk.IDENT)
self.assertTrue(arg.declarator.name.contents, "c")
self.assertEqual(len(arg.declarator.parts), 0)
class TestArrayDeclarators(TestCase):
def test_single_array(self):
t = Tokens("", "a[]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
def test_double_array(self):
t = Tokens("", "a[][]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
self.assertTrue(isinstance(d.parts[1], DeclaratorArrayPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertTrue(d.parts[1].vla is None)
self.assertTrue(d.parts[1].static is None)
def test_redundant_parentheses(self):
t = Tokens("", "((((((a[]))))[]))")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
self.assertTrue(isinstance(d.parts[1], DeclaratorArrayPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertTrue(d.parts[1].vla is None)
self.assertTrue(d.parts[1].static is None)
def test_static_qualifier(self):
t = Tokens("", "a[static]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is not None)
def test_pointer_qualifiers(self):
t = Tokens("", "a[const restrict]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(len(d.parts[0].type_qualifiers), 2)
q = {x.type for x in d.parts[0].type_qualifiers}
self.assertTrue(TypeQualifier.CONST in q)
self.assertTrue(TypeQualifier.RESTRICT in q)
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
def test_pointer_with_static(self):
t = Tokens("", "a[const restrict static]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(len(d.parts[0].type_qualifiers), 2)
q = {x.type for x in d.parts[0].type_qualifiers}
self.assertTrue(TypeQualifier.CONST in q)
self.assertTrue(TypeQualifier.RESTRICT in q)
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is not None)
class TestMixedDeclarators(TestCase):
def test_pointer_to_function(self):
t = Tokens("", "(*a)()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 0)
def test_pointer_to_function_with_qualifiers(self):
t = Tokens("", "(*const a)()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 0)
def test_pointer_to_function_with_args(self):
t = Tokens("", "(*const a)(int b)")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
q = set([x.type for x in d.parts[0].type_qualifiers])
self.assertEqual(q, {TypeQualifier.CONST})
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 1)
arg = d.parts[1].args[0]
self.assertEqual(arg.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(arg.declarator.name.type, Tk.IDENT)
self.assertTrue(arg.declarator.name.contents, "b")
self.assertEqual(len(arg.declarator.parts), 0)
def test_function_returning_pointer(self):
t = Tokens("", "*a()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 0)
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
def test_redundant_parentheses(self):
t = Tokens("", "(((*((((a())))))))")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 0)
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
def test_pointer_to_function_returning_pointer(self):
t = Tokens("", "*(*a)()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 3)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 0)
self.assertTrue(isinstance(d.parts[2], DeclaratorPointerPart))
self.assertEqual(d.parts[2].type_qualifiers, [])
def test_array_of_functions(self):
t = Tokens("", "(a[])()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 2)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 0)
def test_array_of_function_pointers(self):
t = Tokens("", "(*a[])()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 3)
self.assertTrue(isinstance(d.parts[0], DeclaratorArrayPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(d.parts[0].vla is None)
self.assertTrue(d.parts[0].static is None)
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[2], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[2].args), 0)
def test_pointer_to_array_of_function_pointers(self):
t = Tokens("", "(*(*a)[])()")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 4)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorArrayPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertTrue(d.parts[1].vla is None)
self.assertTrue(d.parts[1].static is None)
self.assertTrue(isinstance(d.parts[2], DeclaratorPointerPart))
self.assertEqual(d.parts[2].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[3], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[3].args), 0)
def test_function_returning_pointer_to_array(self):
t = Tokens("", "(*a())[]")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 3)
self.assertTrue(isinstance(d.parts[0], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[0].args), 0)
self.assertTrue(isinstance(d.parts[1], DeclaratorPointerPart))
self.assertEqual(d.parts[1].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[2], DeclaratorArrayPart))
self.assertEqual(d.parts[2].type_qualifiers, [])
self.assertTrue(d.parts[2].vla is None)
self.assertTrue(d.parts[2].static is None)
def test_pointer_to_function_returning_pointer_to_function(self):
t = Tokens("", "(*(*a)(int))(char)")
p = Parser(t)
d = p.parse_declarator()
self.assertEqual(d.name.type, Tk.IDENT)
self.assertEqual(d.name.contents, "a")
self.assertEqual(len(d.parts), 4)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertEqual(d.parts[0].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[1], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[1].args), 1)
self.assertTrue(isinstance(d.parts[2], DeclaratorPointerPart))
self.assertEqual(d.parts[2].type_qualifiers, [])
self.assertTrue(isinstance(d.parts[3], DeclaratorFunctionPart))
self.assertEqual(len(d.parts[3].args), 1)
class TestTypedef(TestCase):
def test_basic(self):
t = Tokens("", "typedef int a; a b;")
p = Parser(t)
p.push_scope()
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
self.assertEqual(d.specifiers.storage_class.type, StorageClass.TYPEDEF)
self.assertEqual(d.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(d.declarator.name.contents, "a")
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
s = d.specifiers.type_specifier
self.assertEqual(s.type, TypeSpecifier.TYPEDEF)
self.assertEqual(s.typedef_name, "a")
self.assertEqual(d.declarator.name.contents, "b")
p.pop_scope()
class TestStructSpecifier(TestCase):
def test_struct(self):
t = Tokens("", "struct thing { int a; }")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertEqual(s.type_specifier.type, TypeSpecifier.STRUCT)
s = cast(StructSpecifier, s.type_specifier.struct)
self.assertEqual(s.name.contents, "thing")
self.assertEqual(len(s.fields), 1)
d = cast(Declaration, s.fields[0])
self.assertEqual(d.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(d.declarator.name.contents, "a")
def test_anonymous_struct(self):
t = Tokens("", "struct { int a; }")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertEqual(s.type_specifier.type, TypeSpecifier.STRUCT)
s = cast(StructSpecifier, s.type_specifier.struct)
self.assertTrue(s.name is None)
self.assertEqual(len(s.fields), 1)
d = cast(Declaration, s.fields[0])
self.assertEqual(d.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(d.declarator.name.contents, "a")
def test_incomplete_struct(self):
t = Tokens("", "struct thing")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertEqual(s.type_specifier.type, TypeSpecifier.STRUCT)
s = cast(StructSpecifier, s.type_specifier.struct)
self.assertEqual(s.name.contents, "thing")
self.assertTrue(s.fields is None)
def test_union(self):
t = Tokens("", "union thing { int a; }")
p = Parser(t)
s = p.parse_declaration_specifiers()
self.assertEqual(s.type_specifier.type, TypeSpecifier.UNION)
s = cast(UnionSpecifier, s.type_specifier.union)
self.assertEqual(s.name.contents, "thing")
self.assertEqual(len(s.fields), 1)
d = cast(Declaration, s.fields[0])
self.assertEqual(d.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(d.declarator.name.contents, "a")
class TestEnumSpecifier(TestCase):
def test_enum(self):
t = Tokens("", "enum thing { THING1, THING2 }")
p = Parser(t)
d = p.parse_declaration_specifiers()
self.assertEqual(d.type_specifier.type, TypeSpecifier.ENUM)
e = cast(EnumSpecifier, d.type_specifier.enum)
self.assertEqual(e.name.contents, "thing")
self.assertEqual(len(e.consts), 2)
self.assertEqual(e.consts[0].name.contents, "THING1")
self.assertEqual(e.consts[1].name.contents, "THING2")
def test_anonymous_enum(self):
t = Tokens("", "enum { THING1, THING2 }")
p = Parser(t)
d = p.parse_declaration_specifiers()
self.assertEqual(d.type_specifier.type, TypeSpecifier.ENUM)
e = cast(EnumSpecifier, d.type_specifier.enum)
self.assertTrue(e.name is None)
self.assertEqual(len(e.consts), 2)
self.assertEqual(e.consts[0].name.contents, "THING1")
self.assertEqual(e.consts[1].name.contents, "THING2")
def test_incomplete_enum(self):
t = Tokens("", "enum thing")
p = Parser(t)
d = p.parse_declaration_specifiers()
self.assertEqual(d.type_specifier.type, TypeSpecifier.ENUM)
e = cast(EnumSpecifier, d.type_specifier.enum)
self.assertEqual(e.name.contents, "thing")
self.assertTrue(e.consts is None)
class TestInitializerList(TestCase):
def test_single_declaration(self):
t = Tokens("", "struct a b = {1, 2, 3};")
p = Parser(t)
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
self.assertEqual(d.specifiers.type_specifier.type, TypeSpecifier.STRUCT)
s = cast(StructSpecifier, d.specifiers.type_specifier.struct)
self.assertEqual(s.name.contents, "a")
self.assertTrue(s.fields is None)
self.assertEqual(d.declarator.name.contents, "b")
self.assertTrue(isinstance(d.initializer, InitializerList))
self.assertEqual(len(d.initializer.fields), 3)
f = d.initializer.fields[0]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
f = d.initializer.fields[1]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
f = d.initializer.fields[2]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
def test_nested_initializers(self):
t = Tokens("", "struct a b = {{1}, {2}};")
p = Parser(t)
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
self.assertTrue(isinstance(d.initializer, InitializerList))
self.assertEqual(len(d.initializer.fields), 2)
f = d.initializer.fields[0]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, InitializerList))
self.assertEqual(len(f.value.fields), 1)
f = d.initializer.fields[1]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, InitializerList))
self.assertEqual(len(f.value.fields), 1)
def test_struct_designator(self):
t = Tokens("", "struct a b = {.x = 3, .y .z = 4};")
p = Parser(t)
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
self.assertTrue(isinstance(d.initializer, InitializerList))
self.assertEqual(len(d.initializer.fields), 2)
f = d.initializer.fields[0]
self.assertEqual(len(f.designators), 1)
self.assertTrue(isinstance(f.designators[0], StructDesignator))
self.assertEqual(f.designators[0].field.contents, "x")
self.assertTrue(isinstance(f.value, ConstantExpression))
f = d.initializer.fields[1]
self.assertEqual(len(f.designators), 2)
self.assertTrue(isinstance(f.designators[0], StructDesignator))
self.assertEqual(f.designators[0].field.contents, "y")
self.assertTrue(isinstance(f.designators[1], StructDesignator))
self.assertEqual(f.designators[1].field.contents, "z")
self.assertTrue(isinstance(f.value, ConstantExpression))
def test_array_designator(self):
t = Tokens("", "int b[13] = {[10] = 1, [11] [12] = 8};")
p = Parser(t)
d = p.parse_declaration_list()
self.assertEqual(len(d.declarations), 1)
d = d.declarations[0]
self.assertTrue(isinstance(d.initializer, InitializerList))
self.assertEqual(len(d.initializer.fields), 2)
f = d.initializer.fields[0]
self.assertEqual(len(f.designators), 1)
self.assertTrue(isinstance(f.designators[0], ArrayDesignator))
self.assertTrue(isinstance(f.designators[0].index, ConstantExpression))
self.assertTrue(isinstance(f.value, ConstantExpression))
f = d.initializer.fields[1]
self.assertEqual(len(f.designators), 2)
self.assertTrue(isinstance(f.designators[0], ArrayDesignator))
self.assertTrue(isinstance(f.designators[0].index, ConstantExpression))
self.assertTrue(isinstance(f.designators[1], ArrayDesignator))
self.assertTrue(isinstance(f.designators[1].index, ConstantExpression))
self.assertTrue(isinstance(f.value, ConstantExpression))
# ******************************************************************************
# Expression Tests
# ******************************************************************************
class TestPrimaryExpressions(TestCase):
def test_symbols(self):
t = Tokens("", "hello")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "hello")
def test_constants(self):
t = Tokens("", "123")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, ConstantExpression))
e = cast(ConstantExpression, e)
self.assertEqual(e.value.type, Tk.CONST_INT)
self.assertEqual(e.value.number, 123)
class TestUnaryExpressions(TestCase):
def test_single_operators(self):
t = Tokens("", "-a; !b")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertTrue(e.name.type, Tk.IDENT)
self.assertTrue(e.name.contents, "a")
t.next() # Skip the semicolon
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.LOGICAL_NOT)
e = e.operand
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertTrue(e.name.type, Tk.IDENT)
self.assertTrue(e.name.contents, "b")
def test_multiple_operators(self):
t = Tokens("", "- -3; - - - 3; *&*d")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, ConstantExpression))
e = cast(ConstantExpression, e)
self.assertTrue(e.value.type, Tk.CONST_INT)
self.assertTrue(e.value.number, 3)
t.next() # Skip the semicolon
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.NEG)
e = e.operand
self.assertTrue(isinstance(e, ConstantExpression))
e = cast(ConstantExpression, e)
self.assertTrue(e.value.type, Tk.CONST_INT)
self.assertTrue(e.value.number, 3)
t.next() # Skip the semicolon
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.DEREF)
e = e.operand
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.ADDR)
e = e.operand
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.DEREF)
e = e.operand
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertTrue(e.name.type, Tk.IDENT)
self.assertTrue(e.name.contents, "d")
def test_prefix_operators(self):
t = Tokens("", "--3; ++a")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.DEC)
e = e.operand
self.assertTrue(isinstance(e, ConstantExpression))
e = cast(ConstantExpression, e)
self.assertEqual(e.value.type, Tk.CONST_INT)
self.assertEqual(e.value.number, 3)
t.next() # Skip the semicolon
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.INC)
e = e.operand
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "a")
class TestSizeofExpressions(TestCase):
def test_sizeof_expression(self):
t = Tokens("", "sizeof a")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, UnaryExpression))
e = cast(UnaryExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.SIZEOF)
e = e.operand
self.assertTrue(isinstance(e, SymbolExpression))
e = cast(SymbolExpression, e)
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "a")
def test_sizeof_declaration_specifiers(self):
t = Tokens("", "sizeof(int)")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, SizeofExpression))
e = cast(SizeofExpression, e)
self.assertEqual(e.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(e.declarator is None)
def test_sizeof_with_declarator(self):
t = Tokens("", "sizeof(int *)")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, SizeofExpression))
e = cast(SizeofExpression, e)
self.assertEqual(e.specifiers.type_specifier.type, TypeSpecifier.INT)
d = e.declarator
self.assertTrue(d.name is None)
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
class TestBinaryExpressions(TestCase):
def test_basic_operation(self):
t = Tokens("", "3 + 4")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, BinaryExpression))
e = cast(BinaryExpression, e)
self.assertEqual(e.operator.operator, BinaryOperator.ADD)
self.assertTrue(isinstance(e.left, ConstantExpression))
self.assertTrue(isinstance(e.right, ConstantExpression))
l = cast(ConstantExpression, e.left)
self.assertEqual(l.value.type, Tk.CONST_INT)
self.assertEqual(l.value.number, 3)
r = cast(ConstantExpression, e.right)
self.assertEqual(r.value.type, Tk.CONST_INT)
self.assertEqual(r.value.number, 4)
def test_same_precedence(self):
t = Tokens("", "3 + 4 + 5")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, BinaryExpression))
e = cast(BinaryExpression, e)
self.assertEqual(e.operator.operator, BinaryOperator.ADD)
self.assertTrue(isinstance(e.left, BinaryExpression))
self.assertTrue(isinstance(e.right, ConstantExpression))
l = cast(BinaryExpression, e.left)
self.assertEqual(l.operator.operator, BinaryOperator.ADD)
self.assertTrue(isinstance(l.left, ConstantExpression))
self.assertTrue(isinstance(l.right, ConstantExpression))
ll = cast(ConstantExpression, l.left)
self.assertEqual(ll.value.type, Tk.CONST_INT)
self.assertEqual(ll.value.number, 3)
lr = cast(ConstantExpression, l.right)
self.assertEqual(lr.value.type, Tk.CONST_INT)
self.assertEqual(lr.value.number, 4)
r = cast(ConstantExpression, e.right)
self.assertEqual(r.value.type, Tk.CONST_INT)
self.assertEqual(r.value.number, 5)
def test_different_precedences(self):
t = Tokens("", "3 + 4 * 5")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, BinaryExpression))
e = cast(BinaryExpression, e)
self.assertEqual(e.operator.operator, BinaryOperator.ADD)
self.assertTrue(isinstance(e.left, ConstantExpression))
self.assertTrue(isinstance(e.right, BinaryExpression))
l = cast(ConstantExpression, e.left)
self.assertEqual(l.value.type, Tk.CONST_INT)
self.assertEqual(l.value.number, 3)
r = cast(BinaryExpression, e.right)
self.assertEqual(r.operator.operator, BinaryOperator.MUL)
self.assertTrue(isinstance(r.left, ConstantExpression))
self.assertTrue(isinstance(r.right, ConstantExpression))
rl = cast(ConstantExpression, r.left)
self.assertEqual(rl.value.type, Tk.CONST_INT)
self.assertEqual(rl.value.number, 4)
rr = cast(ConstantExpression, r.right)
self.assertEqual(rr.value.type, Tk.CONST_INT)
self.assertEqual(rr.value.number, 5)
def test_subexpressions(self):
t = Tokens("", "(3 + 4) * 5")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, BinaryExpression))
e = cast(BinaryExpression, e)
self.assertEqual(e.operator.operator, BinaryOperator.MUL)
self.assertTrue(isinstance(e.left, BinaryExpression))
self.assertTrue(isinstance(e.right, ConstantExpression))
l = cast(BinaryExpression, e.left)
self.assertEqual(l.operator.operator, BinaryOperator.ADD)
self.assertTrue(isinstance(l.left, ConstantExpression))
self.assertTrue(isinstance(l.right, ConstantExpression))
ll = cast(ConstantExpression, l.left)
self.assertEqual(ll.value.type, Tk.CONST_INT)
self.assertEqual(ll.value.number, 3)
lr = cast(ConstantExpression, l.right)
self.assertEqual(lr.value.type, Tk.CONST_INT)
self.assertEqual(lr.value.number, 4)
r = cast(ConstantExpression, e.right)
self.assertEqual(r.value.type, Tk.CONST_INT)
self.assertEqual(r.value.number, 5)
def test_ternary(self):
t = Tokens("", "3 ? 4 : 5")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, TernaryExpression))
e = cast(TernaryExpression, e)
self.assertTrue(isinstance(e.condition, ConstantExpression))
self.assertTrue(isinstance(e.true, ConstantExpression))
self.assertTrue(isinstance(e.false, ConstantExpression))
c = cast(ConstantExpression, e.condition)
self.assertEqual(c.value.type, Tk.CONST_INT)
self.assertEqual(c.value.number, 3)
t = cast(ConstantExpression, e.true)
self.assertEqual(t.value.type, Tk.CONST_INT)
self.assertEqual(t.value.number, 4)
f = cast(ConstantExpression, e.false)
self.assertEqual(f.value.type, Tk.CONST_INT)
self.assertEqual(f.value.number, 5)
class TestInitializerExpressions(TestCase):
def test_initializer(self):
t = Tokens("", "(struct a) {1, 2, 3}")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, InitializerExpression))
e = cast(InitializerExpression, e)
self.assertEqual(e.type.specifiers.type_specifier.type, TypeSpecifier.STRUCT)
s = e.type.specifiers.type_specifier.struct
self.assertEqual(s.name.contents, "a")
self.assertEqual(len(e.initializer_list.fields), 3)
f = e.initializer_list.fields[0]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
f = e.initializer_list.fields[1]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
f = e.initializer_list.fields[2]
self.assertEqual(len(f.designators), 0)
self.assertTrue(isinstance(f.value, ConstantExpression))
class TestCastExpressions(TestCase):
def test_just_declaration_specifiers(self):
t = Tokens("", "(int) 3")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, CastExpression))
e = cast(CastExpression, e)
self.assertEqual(e.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertTrue(e.declarator is None)
self.assertTrue(isinstance(e.operand, ConstantExpression))
e = cast(ConstantExpression, e.operand)
self.assertEqual(e.value.type, Tk.CONST_INT)
self.assertEqual(e.value.number, 3)
def test_with_declarator(self):
t = Tokens("", "(int *) 3")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, CastExpression))
e = cast(CastExpression, e)
self.assertEqual(e.specifiers.type_specifier.type, TypeSpecifier.INT)
d = e.declarator
self.assertTrue(d.name is None)
self.assertEqual(len(d.parts), 1)
self.assertTrue(isinstance(d.parts[0], DeclaratorPointerPart))
self.assertTrue(isinstance(e.operand, ConstantExpression))
e = cast(ConstantExpression, e.operand)
self.assertEqual(e.value.type, Tk.CONST_INT)
self.assertEqual(e.value.number, 3)
class TestPostfixExpressions(TestCase):
def test_postfix_operators(self):
t = Tokens("", "3--")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, PostfixExpression))
e = cast(PostfixExpression, e)
self.assertEqual(e.operator.operator, UnaryOperator.DEC)
e = e.operand
self.assertTrue(isinstance(e, ConstantExpression))
e = cast(ConstantExpression, e)
self.assertEqual(e.value.type, Tk.CONST_INT)
self.assertEqual(e.value.number, 3)
def test_array_access(self):
t = Tokens("", "a[3]")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, ArrayAccessExpression))
e = cast(ArrayAccessExpression, e)
self.assertTrue(isinstance(e.array, SymbolExpression))
self.assertTrue(isinstance(e.index, ConstantExpression))
a = cast(SymbolExpression, e.array)
self.assertEqual(a.name.type, Tk.IDENT)
self.assertEqual(a.name.contents, "a")
i = cast(ConstantExpression, e.index)
self.assertEqual(i.value.type, Tk.CONST_INT)
self.assertEqual(i.value.number, 3)
def test_function_call_no_args(self):
t = Tokens("", "a()")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, FunctionCallExpression))
e = cast(FunctionCallExpression, e)
self.assertTrue(isinstance(e.function, SymbolExpression))
f = cast(SymbolExpression, e.function)
self.assertEqual(f.name.type, Tk.IDENT)
self.assertEqual(f.name.contents, "a")
self.assertEqual(len(e.args), 0)
def test_function_call_one_arg(self):
t = Tokens("", "a(3)")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, FunctionCallExpression))
e = cast(FunctionCallExpression, e)
self.assertTrue(isinstance(e.function, SymbolExpression))
f = cast(SymbolExpression, e.function)
self.assertEqual(f.name.type, Tk.IDENT)
self.assertEqual(f.name.contents, "a")
self.assertEqual(len(e.args), 1)
self.assertTrue(isinstance(e.args[0], ConstantExpression))
a = cast(ConstantExpression, e.args[0])
self.assertEqual(a.value.type, Tk.CONST_INT)
self.assertEqual(a.value.number, 3)
def test_function_call_multiple_args(self):
t = Tokens("", "a(3, 4)")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, FunctionCallExpression))
e = cast(FunctionCallExpression, e)
self.assertTrue(isinstance(e.function, SymbolExpression))
f = cast(SymbolExpression, e.function)
self.assertEqual(f.name.type, Tk.IDENT)
self.assertEqual(f.name.contents, "a")
self.assertEqual(len(e.args), 2)
self.assertTrue(isinstance(e.args[0], ConstantExpression))
a = cast(ConstantExpression, e.args[0])
self.assertEqual(a.value.type, Tk.CONST_INT)
self.assertEqual(a.value.number, 3)
self.assertTrue(isinstance(e.args[1], ConstantExpression))
a = cast(ConstantExpression, e.args[1])
self.assertEqual(a.value.type, Tk.CONST_INT)
self.assertEqual(a.value.number, 4)
def test_field_access(self):
t = Tokens("", "a.b")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, FieldAccessExpression))
e = cast(FieldAccessExpression, e)
self.assertTrue(isinstance(e.struct, SymbolExpression))
s = cast(SymbolExpression, e.struct)
self.assertEqual(s.name.type, Tk.IDENT)
self.assertEqual(s.name.contents, "a")
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "b")
def test_field_deref_access(self):
t = Tokens("", "a->b")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, FieldAccessExpression))
e = cast(FieldAccessExpression, e)
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "b")
self.assertTrue(isinstance(e.struct, UnaryExpression))
e = cast(UnaryExpression, e.struct)
self.assertEqual(e.operator.operator, UnaryOperator.DEREF)
self.assertTrue(isinstance(e.operand, SymbolExpression))
e = cast(SymbolExpression, e.operand)
self.assertEqual(e.name.type, Tk.IDENT)
self.assertEqual(e.name.contents, "a")
class TestExpressionList(TestCase):
def test_two_expressions(self):
t = Tokens("", "a, b")
p = Parser(t)
e = p.parse_expression()
self.assertTrue(isinstance(e, ExpressionList))
e = cast(ExpressionList, e)
self.assertEqual(len(e.roots), 2)
self.assertTrue(isinstance(e.roots[0], SymbolExpression))
self.assertTrue(isinstance(e.roots[1], SymbolExpression))
a = cast(SymbolExpression, e.roots[0])
self.assertEqual(a.name.type, Tk.IDENT)
self.assertEqual(a.name.contents, "a")
b = cast(SymbolExpression, e.roots[1])
self.assertEqual(b.name.type, Tk.IDENT)
self.assertEqual(b.name.contents, "b")
# ******************************************************************************
# Statement Tests
# ******************************************************************************
class TestLabelledStatement(TestCase):
def test_labelled_statement(self):
t = Tokens("", "label:")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, LabelStatement))
s = cast(LabelStatement, s)
self.assertEqual(s.name.type, Tk.IDENT)
self.assertEqual(s.name.contents, "label")
def test_case_statement(self):
t = Tokens("", "case 3:")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, CaseStatement))
s = cast(CaseStatement, s)
self.assertTrue(isinstance(s.condition, ConstantExpression))
c = cast(ConstantExpression, s.condition)
self.assertTrue(c.value.type, Tk.CONST_INT)
self.assertTrue(c.value.number, 3)
def test_default_statement(self):
t = Tokens("", "default:")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, DefaultStatement))
class TestExpressionStatement(TestCase):
def test_empty_statement(self):
t = Tokens("", "; 3;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ExpressionStatement))
s = cast(ExpressionStatement, s)
self.assertTrue(isinstance(s.expression, ConstantExpression))
e = cast(ConstantExpression, s.expression)
self.assertTrue(e.value.type, Tk.CONST_INT)
self.assertTrue(e.value.number, 3)
def test_basic_expression(self):
t = Tokens("", "3 + 4;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ExpressionStatement))
s = cast(ExpressionStatement, s)
self.assertTrue(isinstance(s.expression, BinaryExpression))
e = cast(BinaryExpression, s.expression)
self.assertTrue(isinstance(e.left, ConstantExpression))
self.assertTrue(isinstance(e.right, ConstantExpression))
def test_function_call(self):
t = Tokens("", "a();")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ExpressionStatement))
s = cast(ExpressionStatement, s)
self.assertTrue(isinstance(s.expression, FunctionCallExpression))
e = cast(FunctionCallExpression, s.expression)
self.assertTrue(isinstance(e.function, SymbolExpression))
self.assertEqual(len(e.args), 0)
class TestSelectionStatement(TestCase):
def test_single_if(self):
t = Tokens("", "if (a) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 1)
self.assertTrue(isinstance(s.chain[0], IfStatement))
s = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(s.condition, SymbolExpression))
self.assertTrue(isinstance(s.body, CompoundStatement))
s = cast(CompoundStatement, s.body)
self.assertEqual(len(s.statements), 0)
def test_else(self):
t = Tokens("", "if (a) {} else {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 2)
self.assertTrue(isinstance(s.chain[0], IfStatement))
a = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(a.condition, SymbolExpression))
self.assertTrue(isinstance(a.body, CompoundStatement))
a = cast(CompoundStatement, a.body)
self.assertEqual(len(a.statements), 0)
b = cast(IfStatement, s.chain[1])
self.assertTrue(b.condition is None)
self.assertTrue(isinstance(b.body, CompoundStatement))
b = cast(CompoundStatement, b.body)
self.assertEqual(len(b.statements), 0)
def test_single_elseif(self):
t = Tokens("", "if (a) {} else if (b) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 2)
self.assertTrue(isinstance(s.chain[0], IfStatement))
a = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(a.condition, SymbolExpression))
self.assertTrue(isinstance(a.body, CompoundStatement))
a = cast(CompoundStatement, a.body)
self.assertEqual(len(a.statements), 0)
b = cast(IfStatement, s.chain[1])
self.assertTrue(isinstance(b.condition, SymbolExpression))
self.assertTrue(isinstance(b.body, CompoundStatement))
b = cast(CompoundStatement, b.body)
self.assertEqual(len(b.statements), 0)
def test_multiple_elseifs(self):
t = Tokens("", "if (a) {} else if (b) {} else if (c) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 3)
self.assertTrue(isinstance(s.chain[0], IfStatement))
a = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(a.condition, SymbolExpression))
self.assertTrue(isinstance(a.body, CompoundStatement))
a = cast(CompoundStatement, a.body)
self.assertEqual(len(a.statements), 0)
b = cast(IfStatement, s.chain[1])
self.assertTrue(isinstance(b.condition, SymbolExpression))
self.assertTrue(isinstance(b.body, CompoundStatement))
b = cast(CompoundStatement, b.body)
self.assertEqual(len(b.statements), 0)
c = cast(IfStatement, s.chain[2])
self.assertTrue(isinstance(c.condition, SymbolExpression))
self.assertTrue(isinstance(c.body, CompoundStatement))
c = cast(CompoundStatement, c.body)
self.assertEqual(len(c.statements), 0)
def test_elseif_else(self):
t = Tokens("", "if (a) {} else if (b) {} else {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 3)
self.assertTrue(isinstance(s.chain[0], IfStatement))
a = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(a.condition, SymbolExpression))
self.assertTrue(isinstance(a.body, CompoundStatement))
a = cast(CompoundStatement, a.body)
self.assertEqual(len(a.statements), 0)
b = cast(IfStatement, s.chain[1])
self.assertTrue(isinstance(b.condition, SymbolExpression))
self.assertTrue(isinstance(b.body, CompoundStatement))
b = cast(CompoundStatement, b.body)
self.assertEqual(len(b.statements), 0)
c = cast(IfStatement, s.chain[2])
self.assertTrue(c.condition is None)
self.assertTrue(isinstance(c.body, CompoundStatement))
c = cast(CompoundStatement, c.body)
self.assertEqual(len(c.statements), 0)
def test_multiple_elseifs_else(self):
t = Tokens("", "if (a) {} else if (b) {} else if (c) {} else {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, IfStatementChain))
s = cast(IfStatementChain, s)
self.assertEqual(len(s.chain), 4)
self.assertTrue(isinstance(s.chain[0], IfStatement))
a = cast(IfStatement, s.chain[0])
self.assertTrue(isinstance(a.condition, SymbolExpression))
self.assertTrue(isinstance(a.body, CompoundStatement))
a = cast(CompoundStatement, a.body)
self.assertEqual(len(a.statements), 0)
b = cast(IfStatement, s.chain[1])
self.assertTrue(isinstance(b.condition, SymbolExpression))
self.assertTrue(isinstance(b.body, CompoundStatement))
b = cast(CompoundStatement, b.body)
self.assertEqual(len(b.statements), 0)
c = cast(IfStatement, s.chain[2])
self.assertTrue(isinstance(c.condition, SymbolExpression))
self.assertTrue(isinstance(c.body, CompoundStatement))
c = cast(CompoundStatement, c.body)
self.assertEqual(len(c.statements), 0)
d = cast(IfStatement, s.chain[3])
self.assertTrue(d.condition is None)
self.assertTrue(isinstance(d.body, CompoundStatement))
d = cast(CompoundStatement, d.body)
self.assertEqual(len(d.statements), 0)
def test_switch(self):
t = Tokens("", "switch (a) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, SwitchStatement))
s = cast(SwitchStatement, s)
self.assertTrue(isinstance(s.condition, SymbolExpression))
self.assertTrue(isinstance(s.body, CompoundStatement))
s = cast(CompoundStatement, s.body)
self.assertEqual(len(s.statements), 0)
class TestIterationStatements(TestCase):
def test_while(self):
t = Tokens("", "while (a) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, WhileStatement))
s = cast(WhileStatement, s)
self.assertTrue(isinstance(s.condition, SymbolExpression))
self.assertTrue(isinstance(s.body, CompoundStatement))
s = cast(CompoundStatement, s.body)
self.assertEqual(len(s.statements), 0)
def test_do_while(self):
t = Tokens("", "do {} while (a);")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, DoWhileStatement))
s = cast(DoWhileStatement, s)
self.assertTrue(isinstance(s.condition, SymbolExpression))
self.assertTrue(isinstance(s.body, CompoundStatement))
s = cast(CompoundStatement, s.body)
self.assertEqual(len(s.statements), 0)
def test_for_with_declaration(self):
t = Tokens("", "for (int i = 3; i < 100; i++) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(isinstance(s.initializer, DeclarationList))
self.assertTrue(isinstance(s.condition, BinaryExpression))
self.assertTrue(isinstance(s.increment, PostfixExpression))
i = cast(DeclarationList, s.initializer)
self.assertEqual(len(i.declarations), 1)
i = cast(Declaration, s.initializer.declarations[0])
self.assertEqual(i.specifiers.type_specifier.type, TypeSpecifier.INT)
self.assertEqual(i.declarator.name.contents, "i")
self.assertEqual(len(i.declarator.parts), 0)
self.assertTrue(isinstance(i.initializer, ConstantExpression))
c = cast(BinaryExpression, s.condition)
self.assertEqual(c.operator.operator, BinaryOperator.LT)
self.assertTrue(isinstance(c.left, SymbolExpression))
self.assertTrue(isinstance(c.right, ConstantExpression))
i = cast(PostfixExpression, s.increment)
self.assertEqual(i.operator.operator, UnaryOperator.INC)
self.assertTrue(isinstance(i.operand, SymbolExpression))
def test_for_with_expression(self):
t = Tokens("", "for (i = 3; i < 100; i++) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(isinstance(s.initializer, BinaryExpression))
self.assertTrue(isinstance(s.condition, BinaryExpression))
self.assertTrue(isinstance(s.increment, PostfixExpression))
i = cast(BinaryExpression, s.initializer)
self.assertEqual(i.operator.operator, BinaryOperator.ASSIGN)
self.assertTrue(isinstance(i.left, SymbolExpression))
self.assertTrue(isinstance(i.right, ConstantExpression))
c = cast(BinaryExpression, s.condition)
self.assertEqual(c.operator.operator, BinaryOperator.LT)
self.assertTrue(isinstance(c.left, SymbolExpression))
self.assertTrue(isinstance(c.right, ConstantExpression))
i = cast(PostfixExpression, s.increment)
self.assertEqual(i.operator.operator, UnaryOperator.INC)
self.assertTrue(isinstance(i.operand, SymbolExpression))
def test_for_missing_initializer(self):
t = Tokens("", "for (; i < 100; i++) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(s.initializer is None)
self.assertTrue(isinstance(s.condition, BinaryExpression))
self.assertTrue(isinstance(s.increment, PostfixExpression))
c = cast(BinaryExpression, s.condition)
self.assertEqual(c.operator.operator, BinaryOperator.LT)
self.assertTrue(isinstance(c.left, SymbolExpression))
self.assertTrue(isinstance(c.right, ConstantExpression))
i = cast(PostfixExpression, s.increment)
self.assertEqual(i.operator.operator, UnaryOperator.INC)
self.assertTrue(isinstance(i.operand, SymbolExpression))
def test_for_missing_condition(self):
t = Tokens("", "for (i = 3;; i++) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(isinstance(s.initializer, BinaryExpression))
self.assertTrue(s.condition is None)
self.assertTrue(isinstance(s.increment, PostfixExpression))
i = cast(BinaryExpression, s.initializer)
self.assertEqual(i.operator.operator, BinaryOperator.ASSIGN)
self.assertTrue(isinstance(i.left, SymbolExpression))
self.assertTrue(isinstance(i.right, ConstantExpression))
i = cast(PostfixExpression, s.increment)
self.assertEqual(i.operator.operator, UnaryOperator.INC)
self.assertTrue(isinstance(i.operand, SymbolExpression))
def test_for_missing_increment(self):
t = Tokens("", "for (i = 3; i < 100;) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(isinstance(s.initializer, BinaryExpression))
self.assertTrue(isinstance(s.condition, BinaryExpression))
self.assertTrue(s.increment is None)
i = cast(BinaryExpression, s.initializer)
self.assertEqual(i.operator.operator, BinaryOperator.ASSIGN)
self.assertTrue(isinstance(i.left, SymbolExpression))
self.assertTrue(isinstance(i.right, ConstantExpression))
c = cast(BinaryExpression, s.condition)
self.assertEqual(c.operator.operator, BinaryOperator.LT)
self.assertTrue(isinstance(c.left, SymbolExpression))
self.assertTrue(isinstance(c.right, ConstantExpression))
def test_for_missing_everything(self):
t = Tokens("", "for (;;) {}")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ForStatement))
s = cast(ForStatement, s)
self.assertTrue(s.initializer is None)
self.assertTrue(s.condition is None)
self.assertTrue(s.increment is None)
class TestJumpStatements(TestCase):
def test_goto(self):
t = Tokens("", "goto label;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, GotoStatement))
s = cast(GotoStatement, s)
self.assertEqual(s.name.type, Tk.IDENT)
self.assertEqual(s.name.contents, "label")
def test_continue(self):
t = Tokens("", "continue;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ContinueStatement))
def test_break(self):
t = Tokens("", "break;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, BreakStatement))
def test_return_nothing(self):
t = Tokens("", "return;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ReturnStatement))
s = cast(ReturnStatement, s)
self.assertTrue(s.result is None)
def test_return_expression(self):
t = Tokens("", "return 3;")
p = Parser(t)
s = p.parse_statement()
self.assertTrue(isinstance(s, ReturnStatement))
s = cast(ReturnStatement, s)
self.assertTrue(isinstance(s.result, ConstantExpression))
if __name__ == "__main__":
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common Policy Engine Implementation"""
import json
import urllib
import urllib2
class NotAuthorized(Exception):
pass
_BRAIN = None
def set_brain(brain):
"""Set the brain used by enforce().
Defaults use Brain() if not set.
"""
global _BRAIN
_BRAIN = brain
def reset():
"""Clear the brain used by enforce()."""
global _BRAIN
_BRAIN = None
def enforce(match_list, target_dict, credentials_dict):
"""Enforces authorization of some rules against credentials.
:param match_list: nested tuples of data to match against
The basic brain supports three types of match lists:
1) rules
looks like: ('rule:compute:get_instance',)
Retrieves the named rule from the rules dict and recursively
checks against the contents of the rule.
2) roles
looks like: ('role:compute:admin',)
Matches if the specified role is in credentials_dict['roles'].
3) generic
('tenant_id:%(tenant_id)s',)
Substitutes values from the target dict into the match using
the % operator and matches them against the creds dict.
Combining rules:
The brain returns True if any of the outer tuple of rules match
and also True if all of the inner tuples match. You can use this to
perform simple boolean logic. For example, the following rule would
return True if the creds contain the role 'admin' OR the if the
tenant_id matches the target dict AND the the creds contains the
role 'compute_sysadmin'::
{
"rule:combined": (
'role:admin',
('tenant_id:%(tenant_id)s', 'role:compute_sysadmin')
)
}
Note that rule and role are reserved words in the credentials match, so
you can't match against properties with those names. Custom brains may
also add new reserved words. For example, the HttpBrain adds http as a
reserved word.
:param target_dict: dict of object properties
Target dicts contain as much information as we can about the object being
operated on.
:param credentials_dict: dict of actor properties
Credentials dicts contain as much information as we can about the user
performing the action.
:raises NotAuthorized if the check fails
"""
global _BRAIN
if not _BRAIN:
_BRAIN = Brain()
if not _BRAIN.check(match_list, target_dict, credentials_dict):
raise NotAuthorized()
class Brain(object):
"""Implements policy checking."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Init a brain using json instead of a rules dictionary."""
rules_dict = json.loads(data)
return cls(rules=rules_dict, default_rule=default_rule)
def __init__(self, rules=None, default_rule=None):
self.rules = rules or {}
self.default_rule = default_rule
def add_rule(self, key, match):
self.rules[key] = match
def _check(self, match, target_dict, cred_dict):
match_kind, match_value = match.split(':', 1)
try:
f = getattr(self, '_check_%s' % match_kind)
except AttributeError:
if not self._check_generic(match, target_dict, cred_dict):
return False
else:
if not f(match_value, target_dict, cred_dict):
return False
return True
def check(self, match_list, target_dict, cred_dict):
"""Checks authorization of some rules against credentials.
Detailed description of the check with examples in policy.enforce().
:param match_list: nested tuples of data to match against
:param target_dict: dict of object properties
:param credentials_dict: dict of actor properties
:returns: True if the check passes
"""
if not match_list:
return True
for and_list in match_list:
if isinstance(and_list, basestring):
and_list = (and_list,)
if all([self._check(item, target_dict, cred_dict)
for item in and_list]):
return True
return False
def _check_rule(self, match, target_dict, cred_dict):
"""Recursively checks credentials based on the brains rules."""
try:
new_match_list = self.rules[match]
except KeyError:
if self.default_rule and match != self.default_rule:
new_match_list = ('rule:%s' % self.default_rule,)
else:
return False
return self.check(new_match_list, target_dict, cred_dict)
def _check_role(self, match, target_dict, cred_dict):
"""Check that there is a matching role in the cred dict."""
return match.lower() in [x.lower() for x in cred_dict['roles']]
def _check_generic(self, match, target_dict, cred_dict):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = match % target_dict
key, value = match.split(':', 1)
if key in cred_dict:
return value == cred_dict[key]
return False
class HttpBrain(Brain):
"""A brain that can check external urls for policy.
Posts json blobs for target and credentials.
"""
def _check_http(self, match, target_dict, cred_dict):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response is
exactly 'True'. A custom brain using response codes could easily
be implemented.
"""
url = match % target_dict
data = {'target': json.dumps(target_dict),
'credentials': json.dumps(cred_dict)}
post_data = urllib.urlencode(data)
f = urllib2.urlopen(url, post_data)
return f.read() == "True"
| |
import logging
import os
import signal
import sys
import threading
import time
import numpy as np
import pytest
import ray
from ray.experimental.internal_kv import _internal_kv_get
from ray.ray_constants import DEBUG_AUTOSCALING_ERROR
import ray._private.utils
import ray.ray_constants as ray_constants
from ray.cluster_utils import cluster_not_supported
import ray._private.gcs_pubsub as gcs_pubsub
from ray._private.test_utils import (
init_error_pubsub,
get_error_message,
get_log_batch,
Semaphore,
wait_for_condition,
run_string_as_driver_nonblocking,
)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
# NOTE: We should save actor, otherwise it will be out of scope.
actor_group1 = [Foo.remote() for _ in range(num_cpus * 10)]
assert len(actor_group1) == num_cpus * 10
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
actor_group2 = [Foo.remote() for _ in range(num_cpus * 3)]
assert len(actor_group2) == num_cpus * 3
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get(
[
remote_wait.locked.remote(),
nested_wait.locked.remote(),
]
)
@ray.remote(num_cpus=0.25)
def f():
time.sleep(1000)
return 1
@ray.remote(num_cpus=0.25)
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote(num_cpus=0.25)
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray._private.import_thread.logger.
# However, I didn't find a good way to capture the output for all loggers
# simultaneously.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert (
"has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD
)
in log_contents
)
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert (
"has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD
)
in log_contents
)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
@pytest.mark.skipif(
sys.platform == "win32", reason="Killing process on Windows does not raise a signal"
)
def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub):
# Terminate the autoscaler process.
from ray.worker import _global_node
autoscaler_process = _global_node.all_processes[ray_constants.PROCESS_TYPE_MONITOR][
0
].process
autoscaler_process.terminate()
# Confirm that we receive an autoscaler failure error.
errors = get_error_message(
error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5
)
assert len(errors) == 1
# Confirm that the autoscaler failure error is stored.
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
assert error is not None
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(object_ref)
thread.join()
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_nodes": 1,
"num_cpus": 2,
},
{
"num_nodes": 2,
"num_cpus": 1,
},
],
indirect=True,
)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert isinstance(ray.get(obj), np.ndarray)
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_nodes": 2,
"num_cpus": 1,
},
{
"num_nodes": 1,
"num_cpus": 2,
},
],
indirect=True,
)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.mark.parametrize(
"use_actors,node_failure",
[(False, False), (False, True), (True, False), (True, True)],
)
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
@pytest.mark.parametrize(
"ray_start_regular",
[{"_system_config": {"gcs_rpc_server_reconnect_timeout_s": 100}}],
indirect=True,
)
@pytest.mark.skipif(
gcs_pubsub.gcs_pubsub_enabled(),
reason="Logs are streamed via GCS pubsub when it is enabled, so logs "
"cannot be delivered after GCS is killed.",
)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
# TODO(mwtian): make sure logs are delivered after GCS is restarted.
if sys.platform == "win32":
sig = 9
else:
sig = signal.SIGBUS
os.kill(gcs_server_pid, sig)
# wait for 30 seconds, for the 1st batch of logs.
batches = get_log_batch(log_pubsub, 1, timeout=30)
assert gcs_server_process.poll() is not None
if sys.platform != "win32":
# Windows signal handler does not run when process is terminated
assert len(batches) == 1
assert batches[0]["pid"] == "gcs_server", batches
def test_list_named_actors_timeout(monkeypatch, shutdown_only):
with monkeypatch.context() as m:
# defer for 3s
m.setenv(
"RAY_testing_asio_delay_us",
"ActorInfoGcsService.grpc_server.ListNamedActors" "=3000000:3000000",
)
ray.init(_system_config={"gcs_server_request_timeout_seconds": 1})
@ray.remote
class A:
pass
a = A.options(name="hi").remote()
print(a)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.util.list_named_actors()
def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
with pytest.raises(Exception):
cluster.add_node(wait=False, node_manager_port=redis_port)
# wait for max 10 seconds.
def matcher(log_batch):
return log_batch["pid"] == "raylet" and any(
"Failed to start the grpc server." in line for line in log_batch["lines"]
)
match = get_log_batch(log_pubsub, 1, timeout=10, matcher=matcher)
assert len(match) > 0
def test_gcs_server_crash_cluster(ray_start_cluster):
# Test the GCS server failures will crash the driver.
cluster = ray_start_cluster
GCS_RECONNECTION_TIMEOUT = 5
node = cluster.add_node(
num_cpus=0,
_system_config={"gcs_rpc_server_reconnect_timeout_s": GCS_RECONNECTION_TIMEOUT},
)
script = """
import ray
import time
ray.init(address="auto")
time.sleep(60)
"""
# Get gcs server pid to send a signal.
all_processes = node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
proc = run_string_as_driver_nonblocking(script)
# Wait long enough to start the driver.
time.sleep(5)
start = time.time()
print(gcs_server_pid)
os.kill(gcs_server_pid, signal.SIGKILL)
wait_for_condition(lambda: proc.poll() is None, timeout=10)
# Make sure the driver was exited within the timeout instead of hanging.
# * 2 for avoiding flakiness.
assert time.time() - start < GCS_RECONNECTION_TIMEOUT * 2
# Make sure all processes are cleaned up after GCS is crashed.
# Currently, not every process is fate shared with GCS.
# It seems like log monitor, ray client server, and Redis
# are not fate shared.
# TODO(sang): Fix it.
# wait_for_condition(lambda: not node.any_processes_alive())
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend([
ExtractBitsFromFloat16(x) for x in proto_values])
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
# TODO(sesse): We should have a
# fast_tensor_util.AppendFloat16ArrayToTensorProto,
# but it seems np.float16_t doesn't exist?
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16: fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32,
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterTuple(v):
if not isinstance(v, (list, tuple)):
return v
if isinstance(v, tuple):
if not any(isinstance(x, (list, tuple)) for x in v):
return None
if isinstance(v, list):
if not any(isinstance(x, (list, tuple)) for x in v):
return _FirstNotNone([None if isinstance(x, (list, tuple)) else x for x in v])
return _FirstNotNone([_FilterTuple(x) for x in v])
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: [_FilterBool],
dtypes.complex128: [_FilterComplex],
dtypes.complex64: [_FilterComplex],
dtypes.float32: [_FilterFloat],
dtypes.float64: [_FilterFloat],
dtypes.int16: [_FilterInt],
dtypes.int32: [_FilterInt],
dtypes.int64: [_FilterInt],
dtypes.int8: [_FilterInt],
dtypes.qint16: [_FilterInt, _FilterTuple],
dtypes.qint32: [_FilterInt, _FilterTuple],
dtypes.qint8: [_FilterInt, _FilterTuple],
dtypes.quint16: [_FilterInt, _FilterTuple],
dtypes.quint8: [_FilterInt, _FilterTuple],
dtypes.string: [_FilterStr],
dtypes.uint16: [_FilterInt],
dtypes.uint8: [_FilterInt],
}
def _AssertCompatible(values, dtype):
fn_list = _TF_TO_IS_OK.get(dtype, [_FilterNotTensor])
mismatch = _FirstNotNone([fn(values) for fn in fn_list])
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
np_dt = dtype.as_numpy_dtype if dtype else None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" % (
values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if verify_shape:
if not nparray.shape == tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
if len(tensor.half_val) == 1:
tmp = np.array(tensor.half_val[0], dtype=np.uint16)
tmp.dtype = np.float16
return np.repeat(tmp, num_elements).reshape(shape)
else:
tmp = np.fromiter(tensor.half_val, dtype=np.uint16)
tmp.dtype = np.float16
return tmp.reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16,
dtypes.int8, dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.qint16, dtypes.quint16, dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
tensor.dcomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(shape=(), buffer=np.array([input_shape.ndims]),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
for x in tensor.op.inputs:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.array(values)
else:
return None
def constant_value(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
NOTE: If `constant_value(tensor)` returns a non-`None` result, it will no
longer be possible to feed a different value for `tensor`. This allows the
result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
ret = _ConstantValue(tensor)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
else:
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(tensor_shape.TensorShape(
[d if d != -1 else None for d in value]))
return ret
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
import tempfile
from catapult_base import cloud_storage # pylint: disable=import-error
def AssertValidCloudStorageBucket(bucket):
is_valid = bucket in (None,
cloud_storage.PUBLIC_BUCKET,
cloud_storage.PARTNER_BUCKET,
cloud_storage.INTERNAL_BUCKET)
if not is_valid:
raise ValueError("Cloud storage privacy bucket %s is invalid" % bucket)
class ArchiveError(Exception):
pass
class WprArchiveInfo(object):
def __init__(self, file_path, data, bucket):
AssertValidCloudStorageBucket(bucket)
self._file_path = file_path
self._base_dir = os.path.dirname(file_path)
self._data = data
self._bucket = bucket
# Ensure directory exists.
if not os.path.exists(self._base_dir):
os.makedirs(self._base_dir)
# Map from the relative path (as it appears in the metadata file) of the
# .wpr file to a list of story names it supports.
self._wpr_file_to_story_names = data['archives']
# Map from the story name to a relative path (as it appears
# in the metadata file) of the .wpr file.
self._story_name_to_wpr_file = dict()
# Find out the wpr file names for each story.
for wpr_file in data['archives']:
story_names = data['archives'][wpr_file]
for story_name in story_names:
self._story_name_to_wpr_file[story_name] = wpr_file
self.temp_target_wpr_file_path = None
@classmethod
def FromFile(cls, file_path, bucket):
if os.path.exists(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return cls(file_path, data, bucket)
return cls(file_path, {'archives': {}}, bucket)
def DownloadArchivesIfNeeded(self):
"""Downloads archives iff the Archive has a bucket parameter and the user
has permission to access the bucket.
Raises cloud storage Permissions or Credentials error when there is no
local copy of the archive and the user doesn't have permission to access
the archive's bucket.
Warns when a bucket is not specified or when the user doesn't have
permission to access the archive's bucket but a local copy of the archive
exists.
"""
# Download all .wpr files.
if not self._bucket:
logging.warning('Story set in %s has no bucket specified, and '
'cannot be downloaded from cloud_storage.', )
return
assert 'archives' in self._data, 'Invalid data format in %s. \'archives\'' \
' field is needed' % self._file_path
for archive_path in self._data['archives']:
archive_path = self._WprFileNameToPath(archive_path)
try:
cloud_storage.GetIfChanged(archive_path, self._bucket)
except (cloud_storage.CredentialsError, cloud_storage.PermissionError):
if os.path.exists(archive_path):
# If the archive exists, assume the user recorded their own and
# simply warn.
logging.warning('Need credentials to update WPR archive: %s',
archive_path)
else:
logging.error("You either aren't authenticated or don't have "
"permission to use the archives for this page set."
"\nYou may need to run gsutil config."
"\nYou can find instructions for gsutil config at: "
"http://www.chromium.org/developers/telemetry/"
"upload_to_cloud_storage")
raise
def WprFilePathForStory(self, story):
if self.temp_target_wpr_file_path:
return self.temp_target_wpr_file_path
wpr_file = self._story_name_to_wpr_file.get(story.display_name, None)
if wpr_file is None and hasattr(story, 'url'):
# Some old pages always use the URL to identify a page rather than the
# display_name, so try to look for that.
wpr_file = self._story_name_to_wpr_file.get(story.url, None)
if wpr_file:
return self._WprFileNameToPath(wpr_file)
return None
def AddNewTemporaryRecording(self, temp_wpr_file_path=None):
if temp_wpr_file_path is None:
temp_wpr_file_handle, temp_wpr_file_path = tempfile.mkstemp()
os.close(temp_wpr_file_handle)
self.temp_target_wpr_file_path = temp_wpr_file_path
def AddRecordedStories(self, stories, upload_to_cloud_storage=False):
if not stories:
os.remove(self.temp_target_wpr_file_path)
return
(target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
for story in stories:
self._SetWprFileForStory(story.display_name, target_wpr_file)
shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
# Update the hash file.
target_wpr_file_hash = cloud_storage.CalculateHash(target_wpr_file_path)
with open(target_wpr_file_path + '.sha1', 'wb') as f:
f.write(target_wpr_file_hash)
f.flush()
self._WriteToFile()
self._DeleteAbandonedWprFiles()
# Upload to cloud storage
if upload_to_cloud_storage:
if not self._bucket:
logging.warning('StorySet must have bucket specified to upload '
'stories to cloud storage.')
return
try:
cloud_storage.Insert(self._bucket, target_wpr_file_hash,
target_wpr_file_path)
except cloud_storage.CloudStorageError, e:
logging.warning('Failed to upload wpr file %s to cloud storage. '
'Error:%s' % target_wpr_file_path, e)
def _DeleteAbandonedWprFiles(self):
# Update the metadata so that the abandoned wpr files don't have
# empty story name arrays.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del self._wpr_file_to_story_names[wpr_file]
# Don't fail if we're unable to delete some of the files.
wpr_file_path = self._WprFileNameToPath(wpr_file)
try:
os.remove(wpr_file_path)
except Exception:
logging.warning('Failed to delete file: %s' % wpr_file_path)
def _AbandonedWprFiles(self):
abandoned_wpr_files = []
for wpr_file, story_names in (
self._wpr_file_to_story_names.iteritems()):
if not story_names:
abandoned_wpr_files.append(wpr_file)
return abandoned_wpr_files
def _WriteToFile(self):
"""Writes the metadata into the file passed as constructor parameter."""
metadata = dict()
metadata['description'] = (
'Describes the Web Page Replay archives for a story set. '
'Don\'t edit by hand! Use record_wpr for updating.')
metadata['archives'] = self._wpr_file_to_story_names.copy()
# Don't write data for abandoned archives.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del metadata['archives'][wpr_file]
with open(self._file_path, 'w') as f:
json.dump(metadata, f, indent=4)
f.flush()
def _WprFileNameToPath(self, wpr_file):
return os.path.abspath(os.path.join(self._base_dir, wpr_file))
def _NextWprFileName(self):
"""Creates a new file name for a wpr archive file."""
# The names are of the format "some_thing_number.wpr". Read the numbers.
highest_number = -1
base = None
for wpr_file in self._wpr_file_to_story_names:
match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
if not match:
raise Exception('Illegal wpr file name ' + wpr_file)
highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
if base and match.groupdict()['BASE'] != base:
raise Exception('Illegal wpr file name ' + wpr_file +
', doesn\'t begin with ' + base)
base = match.groupdict()['BASE']
if not base:
# If we're creating a completely new info file, use the base name of the
# story set file.
base = os.path.splitext(os.path.basename(self._file_path))[0]
new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
return new_filename, self._WprFileNameToPath(new_filename)
def _SetWprFileForStory(self, story_name, wpr_file):
"""For modifying the metadata when we're going to record a new archive."""
old_wpr_file = self._story_name_to_wpr_file.get(story_name, None)
if old_wpr_file:
self._wpr_file_to_story_names[old_wpr_file].remove(story_name)
self._story_name_to_wpr_file[story_name] = wpr_file
if wpr_file not in self._wpr_file_to_story_names:
self._wpr_file_to_story_names[wpr_file] = []
self._wpr_file_to_story_names[wpr_file].append(story_name)
| |
#
#
# Copyright (C) 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""HTTP authentication module.
"""
import logging
import re
import base64
import binascii
from cStringIO import StringIO
from ganeti import compat
from ganeti import http
# Digest types from RFC2617
HTTP_BASIC_AUTH = "Basic"
HTTP_DIGEST_AUTH = "Digest"
# Not exactly as described in RFC2616, section 2.2, but good enough
_NOQUOTE = re.compile(r"^[-_a-z0-9]+$", re.I)
def _FormatAuthHeader(scheme, params):
"""Formats WWW-Authentication header value as per RFC2617, section 1.2
@type scheme: str
@param scheme: Authentication scheme
@type params: dict
@param params: Additional parameters
@rtype: str
@return: Formatted header value
"""
buf = StringIO()
buf.write(scheme)
for name, value in params.iteritems():
buf.write(" ")
buf.write(name)
buf.write("=")
if _NOQUOTE.match(value):
buf.write(value)
else:
buf.write("\"")
# TODO: Better quoting
buf.write(value.replace("\"", "\\\""))
buf.write("\"")
return buf.getvalue()
class HttpServerRequestAuthentication(object):
# Default authentication realm
AUTH_REALM = "Unspecified"
# Schemes for passwords
_CLEARTEXT_SCHEME = "{CLEARTEXT}"
_HA1_SCHEME = "{HA1}"
def GetAuthRealm(self, req):
"""Returns the authentication realm for a request.
May be overridden by a subclass, which then can return different realms for
different paths.
@type req: L{http.server._HttpServerRequest}
@param req: HTTP request context
@rtype: string
@return: Authentication realm
"""
# today we don't have per-request filtering, but we might want to
# add it in the future
# pylint: disable=W0613
return self.AUTH_REALM
def AuthenticationRequired(self, req):
"""Determines whether authentication is required for a request.
To enable authentication, override this function in a subclass and return
C{True}. L{AUTH_REALM} must be set.
@type req: L{http.server._HttpServerRequest}
@param req: HTTP request context
"""
# Unused argument, method could be a function
# pylint: disable=W0613,R0201
return False
def PreHandleRequest(self, req):
"""Called before a request is handled.
@type req: L{http.server._HttpServerRequest}
@param req: HTTP request context
"""
# Authentication not required, and no credentials given?
if not (self.AuthenticationRequired(req) or
(req.request_headers and
http.HTTP_AUTHORIZATION in req.request_headers)):
return
realm = self.GetAuthRealm(req)
if not realm:
raise AssertionError("No authentication realm")
# Check Authentication
if self.Authenticate(req):
# User successfully authenticated
return
# Send 401 Unauthorized response
params = {
"realm": realm,
}
# TODO: Support for Digest authentication (RFC2617, section 3).
# TODO: Support for more than one WWW-Authenticate header with the same
# response (RFC2617, section 4.6).
headers = {
http.HTTP_WWW_AUTHENTICATE: _FormatAuthHeader(HTTP_BASIC_AUTH, params),
}
raise http.HttpUnauthorized(headers=headers)
@staticmethod
def ExtractUserPassword(req):
"""Extracts a user and a password from the http authorization header.
@type req: L{http.server._HttpServerRequest}
@param req: HTTP request
@rtype: (str, str)
@return: A tuple containing a user and a password. One or both values
might be None if they are not presented
"""
credentials = req.request_headers.get(http.HTTP_AUTHORIZATION, None)
if not credentials:
return None, None
# Extract scheme
parts = credentials.strip().split(None, 2)
if len(parts) < 1:
# Missing scheme
return None, None
# RFC2617, section 1.2: "[...] It uses an extensible, case-insensitive
# token to identify the authentication scheme [...]"
scheme = parts[0].lower()
if scheme == HTTP_BASIC_AUTH.lower():
# Do basic authentication
if len(parts) < 2:
raise http.HttpBadRequest(message=("Basic authentication requires"
" credentials"))
return HttpServerRequestAuthentication._ExtractBasicUserPassword(parts[1])
elif scheme == HTTP_DIGEST_AUTH.lower():
# TODO: Implement digest authentication
# RFC2617, section 3.3: "Note that the HTTP server does not actually need
# to know the user's cleartext password. As long as H(A1) is available to
# the server, the validity of an Authorization header may be verified."
pass
# Unsupported authentication scheme
return None, None
@staticmethod
def _ExtractBasicUserPassword(in_data):
"""Extracts user and password from the contents of an authorization header.
@type in_data: str
@param in_data: Username and password encoded as Base64
@rtype: (str, str)
@return: A tuple containing user and password. One or both values might be
None if they are not presented
"""
try:
creds = base64.b64decode(in_data.encode("ascii")).decode("ascii")
except (TypeError, binascii.Error, UnicodeError):
logging.exception("Error when decoding Basic authentication credentials")
raise http.HttpBadRequest(message=("Invalid basic authorization header"))
if ":" not in creds:
# We have just a username without password
return creds, None
# return (user, password) tuple
return creds.split(":", 1)
def Authenticate(self, req):
"""Checks the credentiales.
This function MUST be overridden by a subclass.
"""
raise NotImplementedError()
@staticmethod
def ExtractSchemePassword(expected_password):
"""Extracts a scheme and a password from the expected_password.
@type expected_password: str
@param expected_password: Username and password encoded as Base64
@rtype: (str, str)
@return: A tuple containing a scheme and a password. Both values will be
None when an invalid scheme or password encoded
"""
if expected_password is None:
return None, None
# Backwards compatibility for old-style passwords without a scheme
if not expected_password.startswith("{"):
expected_password = (HttpServerRequestAuthentication._CLEARTEXT_SCHEME +
expected_password)
# Check again, just to be sure
if not expected_password.startswith("{"):
raise AssertionError("Invalid scheme")
scheme_end_idx = expected_password.find("}", 1)
# Ensure scheme has a length of at least one character
if scheme_end_idx <= 1:
logging.warning("Invalid scheme in password")
return None, None
scheme = expected_password[:scheme_end_idx + 1].upper()
password = expected_password[scheme_end_idx + 1:]
return scheme, password
@staticmethod
def VerifyBasicAuthPassword(username, password, expected, realm):
"""Checks the password for basic authentication.
As long as they don't start with an opening brace ("E{lb}"), old passwords
are supported. A new scheme uses H(A1) from RFC2617, where H is MD5 and A1
consists of the username, the authentication realm and the actual password.
@type username: string
@param username: Username from HTTP headers
@type password: string
@param password: Password from HTTP headers
@type expected: string
@param expected: Expected password with optional scheme prefix (e.g. from
users file)
@type realm: string
@param realm: Authentication realm
"""
scheme, expected_password = HttpServerRequestAuthentication \
.ExtractSchemePassword(expected)
if scheme is None or password is None:
return False
# Good old plain text password
if scheme == HttpServerRequestAuthentication._CLEARTEXT_SCHEME:
return password == expected_password
# H(A1) as described in RFC2617
if scheme == HttpServerRequestAuthentication._HA1_SCHEME:
if not realm:
# There can not be a valid password for this case
raise AssertionError("No authentication realm")
expha1 = compat.md5_hash()
expha1.update("%s:%s:%s" % (username, realm, password))
return (expected_password.lower() == expha1.hexdigest().lower())
logging.warning("Unknown scheme '%s' in password for user '%s'",
scheme, username)
return False
| |
"""
Test case for iperf example.
This test case might have problem running on windows:
1. direct use of `make`
2. use `sudo killall iperf` to force kill iperf, didn't implement windows version
The test env Example_ShieldBox do need the following config::
Example_ShieldBox:
ap_list:
- ssid: "ssid"
password: "password"
outlet: 1
apc_ip: "192.168.1.88"
attenuator_port: "/dev/ttyUSB0"
iperf: "/dev/ttyUSB1"
apc_ip: "192.168.1.88"
pc_nic: "eth0"
"""
from __future__ import division, unicode_literals
import os
import re
import subprocess
import time
from builtins import range, str
import ttfw_idf
from idf_iperf_test_util import Attenuator, IperfUtility, PowerControl, TestReport
from idf_iperf_test_util.IperfUtility import SCAN_RETRY_COUNT, SCAN_TIMEOUT, TEST_TIME
from tiny_test_fw import DUT, TinyFW, Utility
# configurations
RETRY_COUNT_FOR_BEST_PERFORMANCE = 2
ATTEN_VALUE_LIST = range(0, 60, 2)
CONFIG_NAME_PATTERN = re.compile(r'sdkconfig\.ci\.(.+)')
# We need to auto compare the difference between adjacent configs (01 -> 00, 02 -> 01, ...) and put them to reports.
# Using numbers for config will make this easy.
# Use default value `99` for config with best performance.
BEST_PERFORMANCE_CONFIG = '99'
class IperfTestUtilitySoftap(IperfUtility.IperfTestUtility):
""" iperf test implementation """
def __init__(self, dut, softap_dut, config_name, test_result=None):
IperfUtility.IperfTestUtility.__init__(self, dut, config_name, 'softap', '1234567890', None, None, test_result)
self.softap_dut = softap_dut
self.softap_ip = '192.168.4.1'
def setup(self):
"""
setup iperf test:
1. kill current iperf process
2. reboot DUT (currently iperf is not very robust, need to reboot DUT)
3. scan to get AP RSSI
4. connect to AP
"""
self.softap_dut.write('restart')
self.softap_dut.expect_any('iperf>', 'esp32>', timeout=30)
self.softap_dut.write('ap {} {}'.format(self.ap_ssid, self.ap_password))
self.dut.write('restart')
self.dut.expect_any('iperf>', 'esp32>', timeout=30)
self.dut.write('scan {}'.format(self.ap_ssid))
for _ in range(SCAN_RETRY_COUNT):
try:
rssi = int(self.dut.expect(re.compile(r'\[{}]\[rssi=(-\d+)]'.format(self.ap_ssid)),
timeout=SCAN_TIMEOUT)[0])
break
except DUT.ExpectTimeout:
continue
else:
raise AssertionError('Failed to scan AP')
self.dut.write('sta {} {}'.format(self.ap_ssid, self.ap_password))
dut_ip = self.dut.expect(re.compile(r'sta ip: ([\d.]+), mask: ([\d.]+), gw: ([\d.]+)'))[0]
return dut_ip, rssi
def _test_once(self, proto, direction):
""" do measure once for one type """
# connect and scan to get RSSI
dut_ip, rssi = self.setup()
assert direction in ['rx', 'tx']
assert proto in ['tcp', 'udp']
# run iperf test
if direction == 'tx':
if proto == 'tcp':
self.softap_dut.write('iperf -s -i 1 -t {}'.format(TEST_TIME))
# wait until DUT TCP server created
try:
self.softap_dut.expect('iperf tcp server create successfully', timeout=1)
except DUT.ExpectTimeout:
# compatible with old iperf example binary
pass
self.dut.write('iperf -c {} -i 1 -t {}'.format(self.softap_ip, TEST_TIME))
else:
self.softap_dut.write('iperf -s -u -i 1 -t {}'.format(TEST_TIME))
self.dut.write('iperf -c {} -u -i 1 -t {}'.format(self.softap_ip, TEST_TIME))
else:
if proto == 'tcp':
self.dut.write('iperf -s -i 1 -t {}'.format(TEST_TIME))
# wait until DUT TCP server created
try:
self.dut.expect('iperf tcp server create successfully', timeout=1)
except DUT.ExpectTimeout:
# compatible with old iperf example binary
pass
self.softap_dut.write('iperf -c {} -i 1 -t {}'.format(dut_ip, TEST_TIME))
else:
self.dut.write('iperf -s -u -i 1 -t {}'.format(TEST_TIME))
self.softap_dut.write('iperf -c {} -u -i 1 -t {}'.format(dut_ip, TEST_TIME))
time.sleep(60)
if direction == 'tx':
server_raw_data = self.dut.read()
else:
server_raw_data = self.softap_dut.read()
self.dut.write('iperf -a')
self.softap_dut.write('iperf -a')
self.dut.write('heap')
heap_size = self.dut.expect(re.compile(r'min heap size: (\d+)\D'))[0]
# return server raw data (for parsing test results) and RSSI
return server_raw_data, rssi, heap_size
@ttfw_idf.idf_example_test(env_tag='Example_ShieldBox_Basic', target=['ESP32', 'ESP32S2', 'ESP32C3', 'ESP32S3'], category='stress')
def test_wifi_throughput_with_different_configs(env, extra_data):
"""
steps: |
1. build iperf with specified configs
2. test throughput for all routers
"""
pc_nic_ip = env.get_pc_nic_info('pc_nic', 'ipv4')['addr']
pc_iperf_log_file = os.path.join(env.log_path, 'pc_iperf_log.md')
ap_info = {
'ssid': env.get_variable('ap_ssid'),
'password': env.get_variable('ap_password'),
}
config_names_raw = subprocess.check_output(['ls', os.path.dirname(os.path.abspath(__file__))])
config_names = CONFIG_NAME_PATTERN.findall(config_names_raw)
if not config_names:
raise ValueError('no configs found in {}'.format(os.path.dirname(__file__)))
test_result = dict()
sdkconfig_files = dict()
for config_name in config_names:
# 1. get the config
sdkconfig_files[config_name] = os.path.join(os.path.dirname(__file__),
'sdkconfig.ci.{}'.format(config_name))
# 2. get DUT and download
dut = env.get_dut('iperf', 'examples/wifi/iperf', app_config_name=config_name)
dut.start_app()
dut.expect_any('iperf>', 'esp32>')
# 3. run test for each required att value
test_result[config_name] = {
'tcp_tx': IperfUtility.TestResult('tcp', 'tx', config_name),
'tcp_rx': IperfUtility.TestResult('tcp', 'rx', config_name),
'udp_tx': IperfUtility.TestResult('udp', 'tx', config_name),
'udp_rx': IperfUtility.TestResult('udp', 'rx', config_name),
}
test_utility = IperfUtility.IperfTestUtility(dut, config_name, ap_info['ssid'], ap_info['password'], pc_nic_ip,
pc_iperf_log_file, test_result[config_name])
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
for result_type in test_result[config_name]:
summary = str(test_result[config_name][result_type])
if summary:
Utility.console_log(summary, color='orange')
# 4. check test results
env.close_dut('iperf')
# 5. generate report
report = TestReport.ThroughputForConfigsReport(os.path.join(env.log_path, 'Performance',
'ThroughputForConfigsReport'),
ap_info['ssid'], test_result, sdkconfig_files)
report.generate_report()
@ttfw_idf.idf_example_test(env_tag='Example_ShieldBox', target=['ESP32', 'ESP32S2', 'ESP32C3', 'ESP32S3'], category='stress')
def test_wifi_throughput_vs_rssi(env, extra_data):
"""
steps: |
1. build with best performance config
2. switch on one router
3. set attenuator value from 0-60 for each router
4. test TCP tx rx and UDP tx rx throughput
"""
att_port = env.get_variable('attenuator_port')
ap_list = env.get_variable('ap_list')
pc_nic_ip = env.get_pc_nic_info('pc_nic', 'ipv4')['addr']
apc_ip = env.get_variable('apc_ip')
pc_iperf_log_file = os.path.join(env.log_path, 'pc_iperf_log.md')
test_result = {
'tcp_tx': IperfUtility.TestResult('tcp', 'tx', BEST_PERFORMANCE_CONFIG),
'tcp_rx': IperfUtility.TestResult('tcp', 'rx', BEST_PERFORMANCE_CONFIG),
'udp_tx': IperfUtility.TestResult('udp', 'tx', BEST_PERFORMANCE_CONFIG),
'udp_rx': IperfUtility.TestResult('udp', 'rx', BEST_PERFORMANCE_CONFIG),
}
# 1. get DUT and download
dut = env.get_dut('iperf', 'examples/wifi/iperf', app_config_name=BEST_PERFORMANCE_CONFIG)
dut.start_app()
dut.expect_any('iperf>', 'esp32>')
# 2. run test for each required att value
for ap_info in ap_list:
test_utility = IperfUtility.IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info['ssid'],
ap_info['password'], pc_nic_ip, pc_iperf_log_file, test_result)
PowerControl.Control.control_rest(apc_ip, ap_info['outlet'], 'OFF')
PowerControl.Control.control(apc_ip, {ap_info['outlet']: 'ON'})
Attenuator.set_att(att_port, 0)
if not test_utility.wait_ap_power_on():
Utility.console_log('[{}] failed to power on, skip testing this AP'
.format(ap_info['ssid']), color='red')
continue
for atten_val in ATTEN_VALUE_LIST:
assert Attenuator.set_att(att_port, atten_val) is True
try:
test_utility.run_all_cases(atten_val)
except AssertionError:
break
# 3. check test results
env.close_dut('iperf')
# 4. generate report
report = TestReport.ThroughputVsRssiReport(os.path.join(env.log_path, 'Performance', 'STAThroughputVsRssiReport'),
test_result)
report.generate_report()
@ttfw_idf.idf_example_test(env_tag='Example_ShieldBox_Basic',
target=['ESP32', 'ESP32S2', 'ESP32C3', 'ESP32S3'], ci_target=['ESP32'])
def test_wifi_throughput_basic(env, extra_data):
"""
steps: |
1. test TCP tx rx and UDP tx rx throughput
2. compare with the pre-defined pass standard
"""
pc_nic_ip = env.get_pc_nic_info('pc_nic', 'ipv4')['addr']
pc_iperf_log_file = os.path.join(env.log_path, 'pc_iperf_log.md')
ap_info = {
'ssid': env.get_variable('ap_ssid'),
'password': env.get_variable('ap_password'),
}
# 1. get DUT
dut = env.get_dut('iperf', 'examples/wifi/iperf', app_config_name=BEST_PERFORMANCE_CONFIG)
dut.start_app()
dut.expect_any('iperf>', 'esp32>')
# 2. preparing
test_result = {
'tcp_tx': IperfUtility.TestResult('tcp', 'tx', BEST_PERFORMANCE_CONFIG),
'tcp_rx': IperfUtility.TestResult('tcp', 'rx', BEST_PERFORMANCE_CONFIG),
'udp_tx': IperfUtility.TestResult('udp', 'tx', BEST_PERFORMANCE_CONFIG),
'udp_rx': IperfUtility.TestResult('udp', 'rx', BEST_PERFORMANCE_CONFIG),
}
test_utility = IperfUtility.IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info['ssid'], ap_info['password'],
pc_nic_ip, pc_iperf_log_file, test_result)
# 3. run test for TCP Tx, Rx and UDP Tx, Rx
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
# 4. log performance and compare with pass standard
performance_items = []
for throughput_type in test_result:
ttfw_idf.log_performance('{}_throughput'.format(throughput_type),
'{:.02f} Mbps'.format(test_result[throughput_type].get_best_throughput()))
performance_items.append(['{}_throughput'.format(throughput_type),
'{:.02f} Mbps'.format(test_result[throughput_type].get_best_throughput())])
# 5. save to report
TinyFW.JunitReport.update_performance(performance_items)
# do check after logging, otherwise test will exit immediately if check fail, some performance can't be logged.
for throughput_type in test_result:
ttfw_idf.check_performance('{}_throughput'.format(throughput_type),
test_result[throughput_type].get_best_throughput(), dut.TARGET)
env.close_dut('iperf')
@ttfw_idf.idf_example_test(env_tag='Example_ShieldBox2', target=['ESP32', 'ESP32S2', 'ESP32C3', 'ESP32S3'], category='stress')
def test_softap_throughput_vs_rssi(env, extra_data):
"""
steps: |
1. build with best performance config
2. switch on one router
3. set attenuator value from 0-60 for each router
4. test TCP tx rx and UDP tx rx throughput
"""
att_port = env.get_variable('attenuator_port')
test_result = {
'tcp_tx': IperfUtility.TestResult('tcp', 'tx', BEST_PERFORMANCE_CONFIG),
'tcp_rx': IperfUtility.TestResult('tcp', 'rx', BEST_PERFORMANCE_CONFIG),
'udp_tx': IperfUtility.TestResult('udp', 'tx', BEST_PERFORMANCE_CONFIG),
'udp_rx': IperfUtility.TestResult('udp', 'rx', BEST_PERFORMANCE_CONFIG),
}
# 1. get DUT and download
softap_dut = env.get_dut('softap_iperf', 'examples/wifi/iperf')
softap_dut.start_app()
softap_dut.expect_any('iperf>', 'esp32>')
sta_dut = env.get_dut('sta_iperf', 'examples/wifi/iperf', app_config_name=BEST_PERFORMANCE_CONFIG)
sta_dut.start_app()
sta_dut.expect_any('iperf>', 'esp32>')
# 2. run test for each required att value
test_utility = IperfTestUtilitySoftap(sta_dut, softap_dut, BEST_PERFORMANCE_CONFIG, test_result)
Attenuator.set_att(att_port, 0)
for atten_val in ATTEN_VALUE_LIST:
assert Attenuator.set_att(att_port, atten_val) is True
try:
test_utility.run_all_cases(atten_val)
except AssertionError:
break
env.close_dut('softap_iperf')
env.close_dut('sta_iperf')
# 3. generate report
report = TestReport.ThroughputVsRssiReport(os.path.join(env.log_path, 'Performance',
'SoftAPThroughputVsRssiReport'),test_result)
report.generate_report()
if __name__ == '__main__':
# test_wifi_throughput_basic(env_config_file='EnvConfig.yml')
# test_wifi_throughput_with_different_configs(env_config_file='EnvConfig.yml')
test_wifi_throughput_vs_rssi(env_config_file='EnvConfig.yml', target='ESP32C3')
test_softap_throughput_vs_rssi(env_config_file='EnvConfig.yml')
| |
# flake8: noqa pylint: skip-file
"""Tests for the TelldusLive config flow."""
import asyncio
from unittest.mock import Mock, patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tellduslive import (
APPLICATION_NAME, DOMAIN, KEY_HOST, KEY_SCAN_INTERVAL, SCAN_INTERVAL,
config_flow)
from tests.common import MockConfigEntry, MockDependency, mock_coro
def init_config_flow(hass, side_effect=None):
"""Init a configuration flow."""
flow = config_flow.FlowHandler()
flow.hass = hass
if side_effect:
flow._get_auth_url = Mock(side_effect=side_effect)
return flow
@pytest.fixture
def supports_local_api():
"""Set TelldusLive supports_local_api."""
return True
@pytest.fixture
def authorize():
"""Set TelldusLive authorize."""
return True
@pytest.fixture
def mock_tellduslive(supports_local_api, authorize):
"""Mock tellduslive."""
with MockDependency('tellduslive') as mock_tellduslive_:
mock_tellduslive_.supports_local_api.return_value = supports_local_api
mock_tellduslive_.Session().authorize.return_value = authorize
mock_tellduslive_.Session().access_token = 'token'
mock_tellduslive_.Session().access_token_secret = 'token_secret'
mock_tellduslive_.Session().authorize_url = 'https://example.com'
yield mock_tellduslive_
async def test_abort_if_already_setup(hass):
"""Test we abort if TelldusLive is already setup."""
flow = init_config_flow(hass)
with patch.object(hass.config_entries, 'async_entries', return_value=[{}]):
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
with patch.object(hass.config_entries, 'async_entries', return_value=[{}]):
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
async def test_full_flow_implementation(hass, mock_tellduslive):
"""Test registering an implementation and finishing flow works."""
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
assert len(flow._hosts) == 2
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
result = await flow.async_step_user({'host': 'localhost'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert result['description_placeholders'] == {
'auth_url': 'https://example.com',
'app_name': APPLICATION_NAME,
}
result = await flow.async_step_auth('')
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'localhost'
assert result['data']['host'] == 'localhost'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {'token': 'token', 'host': 'localhost'}
async def test_step_import(hass, mock_tellduslive):
"""Test that we trigger auth when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({
KEY_HOST: DOMAIN,
KEY_SCAN_INTERVAL: 0,
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
async def test_step_import_add_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({
KEY_HOST: 'localhost',
KEY_SCAN_INTERVAL: 0,
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_no_config_file(hass, mock_tellduslive):
"""Test that we trigger user with no config_file configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({ KEY_HOST: 'localhost', KEY_SCAN_INTERVAL: 0, })
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_load_json_matching_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
with patch('homeassistant.components.tellduslive.config_flow.load_json',
return_value={'tellduslive': {}}), \
patch('os.path.isfile'):
result = await flow.async_step_import({ KEY_HOST: 'Cloud API', KEY_SCAN_INTERVAL: 0, })
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import_load_json(hass, mock_tellduslive):
"""Test that we create entry when configuring from import."""
flow = init_config_flow(hass)
with patch('homeassistant.components.tellduslive.config_flow.load_json',
return_value={'localhost': {}}), \
patch('os.path.isfile'):
result = await flow.async_step_import({ KEY_HOST: 'localhost', KEY_SCAN_INTERVAL: SCAN_INTERVAL, })
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'localhost'
assert result['data']['host'] == 'localhost'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {}
@pytest.mark.parametrize('supports_local_api', [False])
async def test_step_disco_no_local_api(hass, mock_tellduslive):
"""Test that we trigger when configuring from discovery, not supporting local api."""
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert len(flow._hosts) == 1
async def test_step_auth(hass, mock_tellduslive):
"""Test that create cloud entity from auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth(['localhost', 'tellstick'])
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'Cloud API'
assert result['data']['host'] == 'Cloud API'
assert result['data']['scan_interval'] == 60
assert result['data']['session'] == {
'token': 'token',
'token_secret': 'token_secret',
}
@pytest.mark.parametrize('authorize', [False])
async def test_wrong_auth_flow_implementation(hass, mock_tellduslive):
"""Test wrong auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth('')
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
assert result['errors']['base'] == 'auth_error'
async def test_not_pick_host_if_only_one(hass, mock_tellduslive):
"""Test not picking host if we have just one."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'auth'
async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url timeout."""
flow = init_config_flow(hass, side_effect=asyncio.TimeoutError)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_timeout'
async def test_abort_no_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url returns none."""
flow = init_config_flow(hass)
flow._get_auth_url = Mock(return_value=False)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_fail'
async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive):
"""Test we abort if generating authorize url blows up."""
flow = init_config_flow(hass, side_effect=ValueError)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'authorize_url_fail'
async def test_discovery_already_configured(hass, mock_tellduslive):
"""Test abort if alredy configured fires from discovery."""
MockConfigEntry(
domain='tellduslive',
data={'host': 'some-host'}
).add_to_hass(hass)
flow = init_config_flow(hass)
result = await flow.async_step_discovery(['some-host', ''])
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'already_setup'
| |
import simplejson
import types
class NoneSupportedTypeError(Exception):
"""not supported type error"""
pass
class JsonObject(object):
def __init__(self):
pass
def put(self, name, val):
setattr(self, name, val)
def dump(self):
return simplejson.dumps(self.__dict__, ensure_ascii=True)
def hasattr(self, name):
if getattr(self, name):
return True
return False
def __getitem__(self, name):
return getattr(self, name)
def __getattr__(self, name):
if name.endswith('_'):
n = name[:-1]
if hasattr(self, n):
return getattr(self, n)
else:
return None
else:
return None
# covers long as well
def _is_int(val):
try:
int(val)
return True
except ValueError:
return False
def _is_float(val):
try:
float(val)
return True
except ValueError:
return False
def _is_bool(val):
return val in ['True', 'true', 'False', 'false']
def _to_proper_type(val):
if _is_bool(val):
return bool(val)
elif _is_float(val):
return float(val)
elif _is_int(val):
return int(val)
else:
return str(val)
def _parse_list(lst):
vals = []
for l in lst:
if _is_unsupported_type(l):
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, list dump: %s" % (l, type(l), lst))
if _is_primitive_types(l):
vals.append(l)
elif isinstance(l, types.DictType):
dobj = _parse_dict(l)
vals.append(dobj)
elif isinstance(l, types.ListType):
lobj = _parse_list(l)
vals.append(lobj)
else:
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, list dump: %s" % (l, type(l), lst))
return vals
def _parse_dict(d):
dobj = JsonObject()
for key in d.keys():
val = d[key]
if _is_unsupported_type(val):
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, dict dump: %s" % (val, type(val), d))
if _is_primitive_types(val):
setattr(dobj, key, val)
elif isinstance(val, types.ListType):
lst = _parse_list(val)
setattr(dobj, key, lst)
elif isinstance(val, types.DictType):
nobj = _parse_dict(val)
setattr(dobj, key, nobj)
else:
raise NoneSupportedTypeError("Cannot parse object: %s, type: %s, dict dump: %s" % (val, type(val), d))
return dobj
def loads(jstr):
try:
root = simplejson.loads(jstr)
except Exception as e:
raise NoneSupportedTypeError("Cannot compile string: %s to a jsonobject" % jstr)
if isinstance(root, types.DictType):
return _parse_dict(root)
if isinstance(root, types.ListType):
return _parse_list(root)
else:
return root
def _new_json_object():
return JsonObject()
def nj():
return _new_json_object()
def _is_unsupported_type(obj):
return isinstance(obj, (types.ComplexType, types.TupleType, types.FunctionType, types.LambdaType,
types.GeneratorType, types.MethodType, types.UnboundMethodType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.FileType,
types.XRangeType, types.TracebackType, types.FrameType, types.DictProxyType,
types.NotImplementedType, types.GetSetDescriptorType,
types.MemberDescriptorType))
def _is_primitive_types(obj):
return isinstance(obj, (types.BooleanType, types.LongType, types.IntType, types.FloatType, types.StringType, types.UnicodeType))
def _dump_list(lst):
nlst = []
for val in lst:
if _is_unsupported_type(val):
raise NoneSupportedTypeError('Cannot dump val: %s, type: %s, list dump: %s' % (val, type(val), lst))
if _is_primitive_types(val):
nlst.append(val)
elif isinstance(val, types.DictType):
nlst.append(val)
elif isinstance(val, types.ListType):
tlst = _dump_list(val)
nlst.append(tlst)
elif isinstance(val, types.NoneType):
pass
else:
nmap = _dump(val)
nlst.append(nmap)
return nlst
def _dump(obj):
if _is_primitive_types(obj): return simplejson.dumps(obj, ensure_ascii=True)
ret = {}
items = obj.iteritems() if isinstance(obj, types.DictionaryType) else obj.__dict__.iteritems()
for key, val in items:
if key.startswith('_'): continue
if _is_unsupported_type(obj):
raise NoneSupportedTypeError('cannot dump %s, type:%s, object dict: %s' % (val, type(val), obj.__dict__))
if _is_primitive_types(val):
ret[key] = val
elif isinstance(val, types.DictType):
if len(val) == 0:
ret[key] = val
continue
nmap = _dump(val)
ret[key] = nmap
elif isinstance(val, types.ListType):
nlst = _dump_list(val)
ret[key] = nlst
elif isinstance(val, types.NoneType):
pass
else:
nmap = _dump(val)
ret[key] = nmap
return ret
def dumps(obj, pretty=False):
jsonmap = _dump(obj)
if pretty:
return simplejson.dumps(jsonmap, ensure_ascii=True, sort_keys=True, indent=4)
else:
return simplejson.dumps(jsonmap, ensure_ascii=True)
| |
import datetime
import os
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
import basket
import happyforms
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
import mkt
from mkt.comm.utils import create_comm_note
from mkt.constants import APP_FEATURES, comm, FREE_PLATFORMS, PAID_PLATFORMS
from mkt.developers.forms import AppSupportFormMixin, verify_app_domain
from mkt.files.models import FileUpload
from mkt.files.utils import parse_addon
from mkt.reviewers.models import RereviewQueue
from mkt.site.utils import slug_validator
from mkt.tags.models import Tag
from mkt.tags.utils import clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.widgets import TransInput, TransTextarea
from mkt.users.models import UserNotification
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AppFeatures, BlockedSlug, Webapp
def mark_for_rereview(addon, added_devices, removed_devices):
msg = _(u'Device(s) changed: {0}').format(', '.join(
[_(u'Added {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in added_devices] +
[_(u'Removed {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in removed_devices]))
RereviewQueue.flag(addon, mkt.LOG.REREVIEW_DEVICES_ADDED, msg)
def mark_for_rereview_features_change(addon, added_features, removed_features):
# L10n: {0} is the list of requirements changes.
msg = _(u'Requirements changed: {0}').format(', '.join(
[_(u'Added {0}').format(f) for f in added_features] +
[_(u'Removed {0}').format(f) for f in removed_features]))
RereviewQueue.flag(addon, mkt.LOG.REREVIEW_FEATURES_CHANGED, msg)
class DeviceTypeForm(happyforms.Form):
ERRORS = {
'both': _lazy(u'Cannot be free and paid.'),
'none': _lazy(u'Please select a device.'),
}
free_platforms = forms.MultipleChoiceField(
choices=FREE_PLATFORMS(), required=False)
paid_platforms = forms.MultipleChoiceField(
choices=PAID_PLATFORMS(), required=False)
def save(self, addon, is_paid):
data = self.cleaned_data[
'paid_platforms' if is_paid else 'free_platforms']
submitted_data = self.get_devices(t.split('-', 1)[1] for t in data)
new_types = set(dev.id for dev in submitted_data)
old_types = set(mkt.DEVICE_TYPES[x.id].id for x in addon.device_types)
added_devices = new_types - old_types
removed_devices = old_types - new_types
for d in added_devices:
addon.addondevicetype_set.create(device_type=d)
for d in removed_devices:
addon.addondevicetype_set.filter(device_type=d).delete()
# Send app to re-review queue if public and new devices are added.
if added_devices and addon.status in mkt.WEBAPPS_APPROVED_STATUSES:
mark_for_rereview(addon, added_devices, removed_devices)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def _get_combined(self):
devices = (self.cleaned_data.get('free_platforms', []) +
self.cleaned_data.get('paid_platforms', []))
return set(d.split('-', 1)[1] for d in devices)
def clean(self):
data = self.cleaned_data
paid = data.get('paid_platforms', [])
free = data.get('free_platforms', [])
# Check that they didn't select both.
if free and paid:
self._add_error('both')
return data
# Check that they selected one.
if not free and not paid:
self._add_error('none')
return data
return super(DeviceTypeForm, self).clean()
def get_devices(self, source=None):
"""Returns a device based on the requested free or paid."""
if source is None:
source = self._get_combined()
return map(mkt.DEVICE_LOOKUP.get, source)
def is_paid(self):
return bool(self.cleaned_data.get('paid_platforms', False))
def get_paid(self):
"""Returns the premium type. Should not be used if the form is used to
modify an existing app.
"""
return mkt.ADDON_PREMIUM if self.is_paid() else mkt.ADDON_FREE
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree and Continue'),
widget=forms.HiddenInput)
newsletter = forms.BooleanField(required=False, label=app_surveys.label,
widget=forms.CheckboxInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
self.request = kw.pop('request')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.datetime.now()
self.instance.save()
if self.cleaned_data.get('newsletter'):
UserNotification.update_or_create(
user=self.instance,
notification_id=app_surveys.id, update={'enabled': True})
basket.subscribe(self.instance.email,
'app-dev',
format='H',
country=self.request.REGION.slug,
lang=self.request.LANG,
source_url=os.path.join(settings.SITE_URL,
'developers/submit'))
class NewWebappVersionForm(happyforms.Form):
upload_error = _lazy(u'There was an error with your upload. '
u'Please try again.')
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': upload_error})
def __init__(self, *args, **kw):
kw.pop('request', None)
self.addon = kw.pop('addon', None)
self._is_packaged = kw.pop('is_packaged', False)
self.is_homescreen = False
super(NewWebappVersionForm, self).__init__(*args, **kw)
def clean(self):
data = self.cleaned_data
if 'upload' not in self.cleaned_data:
self._errors['upload'] = self.upload_error
return
if self.is_packaged():
# Now run the packaged app check, done in clean, because
# clean_packaged needs to be processed first.
try:
pkg = parse_addon(data['upload'], self.addon)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
# Collect validation errors so we can display them at once.
errors = []
ver = pkg.get('version')
if (ver and self.addon and
self.addon.versions.filter(version=ver).exists()):
errors.append(_(u'Version %s already exists.') % ver)
origin = pkg.get('origin')
if origin:
try:
verify_app_domain(origin, packaged=True,
exclude=self.addon)
except forms.ValidationError, e:
errors.append(e.message)
if self.addon and origin != self.addon.app_domain:
errors.append(_('Changes to "origin" are not allowed.'))
self.is_homescreen = pkg.get('role') == 'homescreen'
if errors:
self._errors['upload'] = self.error_class(errors)
return
else:
# Throw an error if this is a dupe.
# (JS sets manifest as `upload.name`.)
try:
verify_app_domain(data['upload'].name)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
return data
def is_packaged(self):
return self._is_packaged
class NewWebappForm(DeviceTypeForm, NewWebappVersionForm):
ERRORS = DeviceTypeForm.ERRORS.copy()
ERRORS['user'] = _lazy('User submitting validation does not match.')
ERRORS['homescreen'] = _lazy('Homescreens can only be submitted for '
'Firefox OS.')
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': _lazy(
u'There was an error with your upload. Please try again.')})
packaged = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(NewWebappForm, self).__init__(*args, **kwargs)
if 'paid_platforms' in self.fields:
self.fields['paid_platforms'].choices = PAID_PLATFORMS(
self.request)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def clean(self):
data = super(NewWebappForm, self).clean()
if not data:
return
upload = data.get('upload')
if self.request and upload:
if not (upload.user and upload.user.pk == self.request.user.pk):
self._add_error('user')
if self.is_homescreen and self.get_devices() != [mkt.DEVICE_GAIA]:
self._add_error('homescreen')
return data
def is_packaged(self):
return self._is_packaged or self.cleaned_data.get('packaged', False)
class AppDetailsBasicForm(AppSupportFormMixin, TranslationFormMixin,
happyforms.ModelForm):
"""Form for "Details" submission step."""
PRIVACY_MDN_URL = (
'https://developer.mozilla.org/Marketplace/'
'Publishing/Policies_and_Guidelines/Privacy_policies')
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
_lazy(u'Publish my app and make it visible to everyone in the '
u'Marketplace and include it in search results.')),
(mkt.PUBLISH_PRIVATE,
_lazy(u'Do not publish my app. Notify me and I will adjust app '
u'visibility after it is approved.')),
)
app_slug = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'class': 'm'}))
description = TransField(
label=_lazy(u'Description:'),
help_text=_lazy(u'The app description is one of the fields used to '
u'return search results in the Firefox Marketplace. '
u'The app description also appears on the app\'s '
u'detail page. Be sure to include a description that '
u'accurately represents your app.'),
widget=TransTextarea(attrs={'rows': 4}))
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
privacy_policy = TransField(
label=_lazy(u'Privacy Policy:'),
widget=TransTextarea(attrs={'rows': 6}),
help_text=_lazy(
u'A privacy policy explains how you handle data received '
u'through your app. For example: what data do you receive? '
u'How do you use it? Who do you share it with? Do you '
u'receive personal information? Do you take steps to make '
u'it anonymous? What choices do users have to control what '
u'data you and others receive? Enter your privacy policy '
u'link or text above. If you don\'t have a privacy '
u'policy, <a href="{url}" target="_blank">learn more on how to '
u'write one.</a>'))
homepage = TransField.adapt(forms.URLField)(
label=_lazy(u'Homepage:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has another homepage, enter its address here.'))
support_url = TransField.adapt(forms.URLField)(
label=_lazy(u'Website:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has a support website or forum, enter its address '
u'here.'))
support_email = TransField.adapt(forms.EmailField)(
label=_lazy(u'Email:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'This email address will be listed publicly on the Marketplace '
u'and used by end users to contact you with support issues. This '
u'email address will be listed publicly on your app details page.'
))
notes = forms.CharField(
label=_lazy(u'Your comments for reviewers:'), required=False,
widget=forms.Textarea(attrs={'rows': 2}),
help_text=_lazy(
u'Your app will be reviewed by Mozilla before it becomes publicly '
u'listed on the Marketplace. Enter any special instructions for '
u'the app reviewers here.'))
publish_type = forms.TypedChoiceField(
label=_lazy(u'Once your app is approved, choose a publishing option:'),
choices=PUBLISH_CHOICES, initial=mkt.PUBLISH_IMMEDIATE,
widget=forms.RadioSelect())
is_offline = forms.BooleanField(
label=_lazy(u'My app works without an Internet connection.'),
required=False)
class Meta:
model = Webapp
fields = ('app_slug', 'description', 'privacy_policy', 'homepage',
'support_url', 'support_email', 'publish_type', 'is_offline')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
# TODO: remove this and put it in the field definition above.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1072513
privacy_field = self.base_fields['privacy_policy']
privacy_field.help_text = mark_safe(privacy_field.help_text.format(
url=self.PRIVACY_MDN_URL))
if 'instance' in kwargs:
instance = kwargs['instance']
instance.is_offline = instance.guess_is_offline()
super(AppDetailsBasicForm, self).__init__(*args, **kwargs)
def clean_app_slug(self):
slug = self.cleaned_data['app_slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.'
% slug))
return slug.lower()
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def save(self, *args, **kw):
if self.data['notes']:
create_comm_note(self.instance, self.instance.versions.latest(),
self.request.user, self.data['notes'],
note_type=comm.SUBMISSION)
self.instance = super(AppDetailsBasicForm, self).save(commit=True)
for tag_text in self.cleaned_data['tags']:
Tag(tag_text=tag_text).save_tag(self.instance)
return self.instance
class AppFeaturesForm(happyforms.ModelForm):
class Meta:
exclude = ['version']
model = AppFeatures
def __init__(self, *args, **kwargs):
super(AppFeaturesForm, self).__init__(*args, **kwargs)
if self.instance:
self.initial_feature_keys = sorted(self.instance.to_keys())
else:
self.initial_feature_keys = None
def all_fields(self):
"""
Degeneratorizes self.__iter__(), the list of fields on the form. This
allows further manipulation of fields: to display a subset of fields or
order them in a specific way.
"""
return [f for f in self.__iter__()]
def required_api_fields(self):
"""
All fields on the form, alphabetically sorted by help text.
"""
return sorted(self.all_fields(), key=lambda x: x.help_text)
def get_tooltip(self, field):
field_id = field.name.split('_', 1)[1].upper()
return (unicode(APP_FEATURES[field_id].get('description') or '') if
field_id in APP_FEATURES else None)
def get_changed_features(self):
old_features = dict.fromkeys(self.initial_feature_keys, True)
old_features = set(AppFeatures(**old_features).to_names())
new_features = set(self.instance.to_names())
added_features = new_features - old_features
removed_features = old_features - new_features
return added_features, removed_features
def save(self, *args, **kwargs):
mark_for_rereview = kwargs.pop('mark_for_rereview', True)
addon = self.instance.version.addon
rval = super(AppFeaturesForm, self).save(*args, **kwargs)
# Also save the addon to update modified date and trigger a reindex.
addon.save(update_fields=['modified'])
# Trigger a re-review if necessary.
if (self.instance and mark_for_rereview and
addon.status in mkt.WEBAPPS_APPROVED_STATUSES and
self.changed_data):
added_features, removed_features = self.get_changed_features()
mark_for_rereview_features_change(addon,
added_features,
removed_features)
return rval
| |
"""
This module implements a particle system for complex animcation effects. For more details, see
http://asciimatics.readthedocs.io/en/latest/animation.html
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from builtins import object
from builtins import range
from copy import copy
from math import pi, sin, cos, sqrt
from random import uniform, randint
from future.utils import with_metaclass
from asciimatics.effects import Effect
from asciimatics.screen import Screen
class Particle(object):
"""
A single particle in a Particle Effect.
"""
def __init__(self, chars, x, y, dx, dy, colours, life_time, move,
next_colour=None, next_char=None, parm=None,
on_create=None, on_each=None, on_destroy=None):
"""
:param chars: String of characters to use for the particle.
:param x: The initial horizontal position of the particle.
:param y: The initial vertical position of the particle.
:param dx: The initial horizontal velocity of the particle.
:param dy: The initial vertical velocity of the particle.
:param colours: A list of colour tuples to use for the particle.
:param life_time: The life time of the particle.
:param move: A function which returns the next location of the particle.
:param next_colour: An optional function to return the next colour for
the particle. Defaults to a linear progression of `chars`.
:param next_char: An optional function to return the next character for
the particle. Defaults to a linear progression of `colours`.
:param parm: An optional parameter for use within any of the
:param on_create: An optional function to spawn new particles when this
particle first is created.
:param on_each: An optional function to spawn new particles for every
frame of this particle (other than creation/destruction).
:param on_destroy: An optional function to spawn new particles when this
particle is destroyed.
"""
self.chars = chars
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.colours = colours
self.time = 0
self.life_time = life_time
self._move = move
self._next_colour = (
self._default_next_colour if next_colour is None else next_colour)
self._next_char = (
self._default_next_char if next_char is None else next_char)
self._last = None
self.parm = parm
self._on_create = on_create
self._on_each = on_each
self._on_destroy = on_destroy
@staticmethod
def _default_next_char(particle):
"""
Default next character implementation - linear progression through
each character.
"""
return particle.chars[
(len(particle.chars) - 1) * particle.time // particle.life_time]
@staticmethod
def _default_next_colour(particle):
"""
Default next colour implementation - linear progression through
each colour tuple.
"""
return particle.colours[
(len(particle.colours) - 1) * particle.time // particle.life_time]
def last(self):
"""
The last attributes returned for this particle - typically used for
clearing out the particle on the next frame. See :py:meth:`.next` for
details of the returned results.
"""
return self._last
def next(self):
"""
The set of attributes for this particle for the next frame to be
rendered.
:returns: A tuple of (character, x, y, fg, attribute, bg)
"""
# Get next particle details
x, y = self._move(self)
colour = self._next_colour(self)
char = self._next_char(self)
self._last = char, x, y, colour[0], colour[1], colour[2]
self.time += 1
# Trigger any configured events
if self.time == 1 and self._on_create is not None:
self._on_create(self)
elif self.life_time == self.time and self._on_destroy is not None:
self._on_destroy(self)
elif self._on_each is not None:
self._on_each(self)
return self._last
class ParticleEmitter(object):
"""
An emitter for a particle system to create a set of :py:obj:`._Particle`
objects for a :py:obj:`.ParticleEffect`. After initialization, the
emitter will be called once per frame to be displayed on the Screen.
"""
def __init__(self, screen, x, y, count, new_particle, spawn, life_time,
blend=False):
"""
:param screen: The screen to which the particle system will be rendered.
:param x: The x location of origin of the particle system.
:param y: The y location of origin of the particle system.
:param count: The count of new particles to spawn on each frame.
:param new_particle: The function to call to spawn a new particle.
:param spawn: The number of frames for which to spawn particles.
:param life_time: The life time of the whole particle system.
:param blend: Whether to blend particles or not. A blended system
picks the colour based on the number of overlapping particles,
while an unblended one picks the colour based on a the state of
Each Particle individually as they are drawn.
Defaults to False.
"""
super(ParticleEmitter, self).__init__()
self._screen = screen
self._x = x
self._y = y
self._count = count
self._new_particle = new_particle
self._life_time = life_time
self.particles = []
self.time_left = spawn
self._blend = blend
@staticmethod
def _find_colour(particle, start_index, screen_data):
"""
Helper function to find an existing colour in the particle palette.
"""
_, fg2, attr2, bg2 = screen_data
index = start_index
for i, colours in enumerate(particle.colours):
if (fg2, attr2, bg2) == colours:
index = i
break
return index
def update(self):
"""
The function to draw a new frame for the particle system.
"""
# Spawn new particles if required
if self.time_left > 0:
self.time_left -= 1
for _ in range(self._count):
new_particle = self._new_particle()
if new_particle is not None:
self.particles.append(new_particle)
# Now draw them all
for particle in self.particles:
# Clear our the old particle
last = particle.last()
if last is not None:
char, x, y, fg, attr, bg = last
screen_data = self._screen.get_from(x, y)
if self._blend and screen_data:
index = self._find_colour(particle, 0, screen_data) - 1
fg, attr, bg = particle.colours[max(index, 0)]
self._screen.print_at(" ", x, y, fg, attr, bg)
if particle.time < particle.life_time:
# Draw the new one
char, x, y, fg, attr, bg = particle.next()
screen_data = self._screen.get_from(x, y)
if self._blend and screen_data:
index = self._find_colour(particle, -1, screen_data) + 1
fg, attr, bg = \
particle.colours[min(index, len(particle.colours) - 1)]
self._screen.print_at(char, x, y, fg, attr, bg)
else:
self.particles.remove(particle)
class ParticleEffect(with_metaclass(ABCMeta, Effect)):
"""
An Effect that uses a :py:obj:`.ParticleEmitter` to create the animation.
To define a new ParticleEffect, you must implement the reset() method to
construct a chain of ParticleEmitter objects and append them to the internal
_active_systems list.
"""
def __init__(self, screen, x, y, life_time, **kwargs):
"""
:param screen: The Screen being used for the Scene.
:param x: The column (x coordinate) for the origin of the effect.
:param y: The line (y coordinate) for the origin of the effect.
:param life_time: The life time of the effect.
Also see the common keyword arguments in :py:obj:`.Effect`.
"""
super(ParticleEffect, self).__init__(screen, **kwargs)
self._x = x
self._y = y
self._life_time = life_time
self._active_systems = []
self.reset()
@abstractmethod
def reset(self):
"""
Reset the particle effect back to its initial state. This must be
implemented by the child classes.
"""
def _update(self, frame_no):
# Take a copy in case a new system is added to the list this iteration.
for system in copy(self._active_systems):
if len(system.particles) > 0 or system.time_left > 0:
system.update()
else:
self._active_systems.remove(system)
@property
def stop_frame(self):
return self._stop_frame
class Rocket(ParticleEmitter):
"""
A rocket being launched from the ground.
"""
def __init__(self, screen, x, y, life_time, on_destroy=None):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of the rocket.
:param y: The line (y coordinate) for the origin of the rocket.
:param life_time: The life time of the rocket.
:param on_destroy: The function to call when the rocket explodes.
"""
super(Rocket, self).__init__(
screen, x, screen.height - 1, 1, self._next_particle, 1, life_time)
self._end_y = y
self._acceleration = (self._end_y - self._y) // life_time
self._on_destroy = on_destroy
def _next_particle(self):
return Particle("|",
self._x,
self._y,
0,
self._acceleration,
[(Screen.COLOUR_YELLOW, Screen.A_BOLD, 0)],
self._life_time,
self._move,
on_destroy=self._on_destroy)
def _move(self, particle):
particle.x += particle.dx
particle.y += particle.dy
if particle.y <= self._end_y:
# Rounding errors may mean we need to end slightly early.
particle.y = self._end_y
particle.time = self._life_time - 1
return int(particle.x), int(particle.y)
class RingExplosion(ParticleEmitter):
"""
A classic firework explosion in a simple ring.
"""
def __init__(self, screen, x, y, life_time):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this explosion.
:param y: The line (y coordinate) for the origin of this explosion.
:param life_time: The life time of this explosion.
"""
super(RingExplosion, self).__init__(
screen, x, y, 30, self._next_particle, 1, life_time)
self._colour = randint(1, 7)
self._acceleration = 1.0 - (1.0 / life_time)
def _next_particle(self):
direction = uniform(0, 2 * pi)
return Particle("***:. ",
self._x,
self._y,
sin(direction) * 3 * 8 / self._life_time,
cos(direction) * 1.5 * 8 / self._life_time,
[(self._colour, Screen.A_BOLD, 0),
(self._colour, 0, 0),
(0, 0, 0)],
self._life_time,
self._explode)
def _explode(self, particle):
# Simulate some gravity and slowdown in explosion
particle.dy = particle.dy * self._acceleration + 0.03
particle.dx *= self._acceleration
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
class SerpentExplosion(ParticleEmitter):
"""
A firework explosion where each trail changes direction.
"""
def __init__(self, screen, x, y, life_time):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this explosion.
:param y: The line (y coordinate) for the origin of this explosion.
:param life_time: The life time of this explosion.
"""
super(SerpentExplosion, self).__init__(
screen, x, y, 8, self._next_particle, 2, life_time)
self._colour = randint(1, 7)
def _next_particle(self):
direction = uniform(0, 2 * pi)
acceleration = uniform(0, 2 * pi)
return Particle("++++- ",
self._x,
self._y,
cos(direction),
sin(direction) / 2,
[(self._colour, Screen.A_BOLD, 0), (0, 0, 0)],
self._life_time,
self._explode,
parm=acceleration)
@staticmethod
def _explode(particle):
# Change direction like a serpent firework.
if particle.time % 3 == 0:
particle.parm = uniform(0, 2 * pi)
particle.dx = (particle.dx + cos(particle.parm) / 2) * 0.8
particle.dy = (particle.dy + sin(particle.parm) / 4) * 0.8
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
class StarExplosion(ParticleEmitter):
"""
A classic firework explosion to a Peony shape with trails.
"""
def __init__(self, screen, x, y, life_time, points, on_each):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this explosion.
:param y: The line (y coordinate) for the origin of this explosion.
:param life_time: The life time of this explosion.
:param points: Number of points the explosion should have.
:param on_each: The function to call to spawn a trail.
"""
super(StarExplosion, self).__init__(
screen, x, y, points, self._next_particle, 1, life_time)
self._colour = randint(1, 7)
self._acceleration = 1.0 - (1.0 / life_time)
self._on_each = on_each
self._points = points
self._point_count = 0
def _next_particle(self):
direction = self._point_count * 2 * pi / self._points
self._point_count += 1
return Particle("+",
self._x,
self._y,
sin(direction) * 3 * 8 / self._life_time,
cos(direction) * 1.5 * 8 / self._life_time,
[(self._colour, Screen.A_BOLD, 0), (0, 0, 0)],
self._life_time,
self._explode,
on_each=self._on_each)
def _explode(self, particle):
# Simulate some gravity and slowdown in explosion
particle.dy = particle.dy * self._acceleration + 0.03
particle.dx *= self._acceleration
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
class StarTrail(ParticleEmitter):
"""
A trail for a :py:obj:`.StarExplosion`.
"""
def __init__(self, screen, x, y, life_time, colour):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this trail.
:param y: The line (y coordinate) for the origin of this trail.
:param life_time: The life time of this trail.
:param colour: The colour of this trail.
"""
super(StarTrail, self).__init__(
screen, x, y, 1, self._next_particle, 1, life_time)
self._colour = colour
def _next_particle(self):
return Particle("+:,. ",
self._x,
self._y,
0,
0,
[(self._colour, Screen.A_BOLD, 0),
(self._colour, 0, 0),
(0, 0, 0)],
self._life_time,
self._twinkle)
@staticmethod
def _twinkle(particle):
# Simulate some gravity
particle.dy += 0.03
particle.y += particle.dy
return int(particle.x), int(particle.y)
class PalmExplosion(ParticleEmitter):
"""
A classic firework explosion into a palm shape.
"""
def __init__(self, screen, x, y, life_time, on_each=None):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this explosion.
:param y: The line (y coordinate) for the origin of this explosion.
:param life_time: The life time of this explosion.
:param on_each: The function to call to spawn a trail.
"""
super(PalmExplosion, self).__init__(
screen, x, y, 6, self._next_particle, 2, life_time)
self._colour = randint(1, 7)
self._on_each = on_each
self._arc_start = uniform(pi / 6, pi / 3)
self._arc_end = self._arc_start + uniform(pi / 6, pi / 2)
def _next_particle(self):
direction = uniform(self._arc_start, self._arc_end)
return Particle("* ",
self._x,
self._y,
cos(direction) * 1.5,
-sin(direction),
[(self._colour, Screen.A_BOLD, 0),
(0, 0, 0)],
self._life_time,
self._explode,
on_each=self._on_each)
@staticmethod
def _explode(particle):
# Simulate some gravity
particle.dy += 0.2
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
class ExplosionFlames(ParticleEmitter):
"""
An explosion of flame and smoke.
"""
def __init__(self, screen, x, y, life_time):
"""
:param screen: The Screen being used for this particle system.
:param x: The column (x coordinate) for the origin of this explosion.
:param y: The line (y coordinate) for the origin of this explosion.
:param life_time: The life time of this explosion.
"""
super(ExplosionFlames, self).__init__(
screen, x, y, 30, self._next_particle, life_time - 10, life_time,
blend=True)
def _next_particle(self):
direction = uniform(0, 2 * pi)
d = self._life_time - 10
r = uniform(0, sin(pi * (d - self.time_left) / (d * 2))) * 3.0
return Particle("#",
self._x + sin(direction) * r * 2.0,
self._y + cos(direction) * r,
sin(direction) / 2.0,
cos(direction) / 4.0,
[
(Screen.COLOUR_BLACK, 0, 0),
(Screen.COLOUR_RED, 0, 0),
(Screen.COLOUR_RED, Screen.A_BOLD, 0),
(Screen.COLOUR_YELLOW, Screen.A_BOLD, 0),
(Screen.COLOUR_WHITE, Screen.A_BOLD, 0),
],
10,
self._burn,
next_colour=self._colour)
@staticmethod
def _burn(particle):
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
@staticmethod
def _colour(particle):
return particle.colours[0]
class DropEmitter(ParticleEmitter):
"""
Replicate the whole screen with Particles and then drop them a cell at a
time.
"""
def __init__(self, screen, life_time):
"""
:param screen: The Screen being used for this particle system.
:param life_time: The life time of this particle system.
"""
super(DropEmitter, self).__init__(
screen, 0, 0, 20, self._next_particle, life_time, life_time)
self._particles = None
self._full_count = 0
def _next_particle(self):
# Find all particles on the Screen when we create our first particle.
if self._particles is None:
self._particles = []
for x in range(self._screen.width):
for y in range(self._screen.height):
ch, fg, attr, bg = self._screen.get_from(x, y)
if ch != 32:
self._particles.insert(
randint(0, len(self._particles)),
(x, y, ch, fg, attr, bg))
self._full_count += 1
# Stop now if there were no more particles to move.
if len(self._particles) == 0:
return None
# We got here, so there must still be some screen estate to move.
if randint(0, len(self._particles)) < self._full_count * 0.1:
x, y, ch, fg, attr, bg = self._particles.pop()
return Particle(chr(ch), x, y, 0.0, 0.0, [(fg, attr, bg)], self._life_time, self._move)
@staticmethod
def _move(particle):
result = int(particle.x), int(particle.y)
particle.x += particle.dx
particle.y += particle.dy
particle.dy += 0.3
return result
class ShotEmitter(ParticleEmitter):
"""
Replicate the whole screen with Particles and then explode the screen from
a given location.
"""
def __init__(self, screen, x, y, diameter, life_time):
"""
:param screen: The Screen being used for this particle system.
:param x: The x position of the origin of the explosion.
:param y: The y position of the origin of the explosion.
:param diameter: The diameter of the explosion.
:param life_time: The life time of this particle system.
"""
super(ShotEmitter, self).__init__(
screen, x, y, 50, self._next_particle, life_time, life_time)
self._particles = None
self._diameter = diameter
def _next_particle(self):
# Find all particles on the Screen when we create our first particle
# and sort by distance from the origin.
if self._particles is None:
self._particles = []
for x in range(self._screen.width):
for y in range(self._screen.height):
ch, fg, attr, bg = self._screen.get_from(x, y)
if ch != 32:
self._particles.append((x, y, ch, fg, attr, bg))
if self._diameter:
self._particles = filter(self._filter, self._particles)
self._particles = sorted(self._particles, key=self._sort, reverse=True)
# Stop now if there were no more particles to move.
if len(self._particles) == 0:
return None
# We got here, so there must still be some screen estate to move.
x, y, ch, fg, attr, bg = self._particles.pop()
r = min(10, max(0.001, sqrt(((x - self._x) ** 2) + ((y - self._y) ** 2))))
return Particle(chr(ch), x, y,
(x - self._x) * 40.0 / r ** 2,
(y - self._y) * 20.0 / r ** 2,
[(fg, attr, bg)],
self._life_time,
self._move)
def _sort(self, data):
dx = data[0] - self._x
dy = data[1] - self._y
return (dx * dx / 4.0) + (dy * dy)
def _filter(self, data):
dx = data[0] - self._x
dy = data[1] - self._y
return dx ** 2 / 4.0 + dy ** 2 < self._diameter ** 2 / 4.0
@staticmethod
def _move(particle):
result = int(particle.x), int(particle.y)
if (particle.dx, particle.dy) == (0, 0):
particle.dx, particle.dy = 100, 100
particle.x += particle.dx
particle.y += particle.dy
return result
class RainSource(ParticleEmitter):
"""
Source of the raindrops for a rain storm effect. This emits rain drops
from a single line at the top of the screen (starting sufficiently off-
screen to ensure that it can cover all the screen due to horizontal motion).
"""
def __init__(self, screen, life_time, on_each):
"""
:param screen: The Screen being used for this particle system.
:param life_time: The life time of this particle system.
:param on_each: Function to call on each iteration of the particle.
"""
super(RainSource, self).__init__(
screen, 0, 0, 4, self._next_particle, life_time, life_time)
self._particles = None
self._on_each = on_each
def _next_particle(self):
speed = randint(1, 3)
return Particle(" ``\\"[speed],
randint(-self._screen.height, self._screen.width), 0,
(speed + 1) / 2.0,
(speed + 1) / 2.0,
[(Screen.COLOUR_CYAN, 0, 0)],
self._life_time,
self._move,
on_each=self._on_each)
@staticmethod
def _move(particle):
particle.x += particle.dx
particle.y += particle.dy
return int(particle.x), int(particle.y)
class Splash(ParticleEmitter):
"""
Splash effect for falling rain.
"""
def __init__(self, screen, x, y):
"""
:param screen: The Screen being used for this particle system.
"""
super(Splash, self).__init__(
screen, x, y, 1, self._next_particle, 1, 3)
def _next_particle(self):
return Particle("v",
self._x, self._y,
0, 0,
[(Screen.COLOUR_CYAN, 0, 0)],
self._life_time,
self._splash)
@staticmethod
def _splash(particle):
return int(particle.x), int(particle.y)
class StarFirework(ParticleEffect):
"""
Classic rocket with star explosion.
"""
def reset(self):
self._active_systems = []
self._active_systems.append(
Rocket(self._screen, self._x, self._y, 10, on_destroy=self._next))
def _next(self, parent):
self._active_systems.append(
StarExplosion(
self._screen, parent.x, parent.y, self._life_time - 10,
randint(6, 20), on_each=self._trail))
def _trail(self, parent):
if len(self._active_systems) < 150 and randint(0, 100) < 50:
self._active_systems.insert(
0, StarTrail(self._screen,
parent.x,
parent.y,
10,
parent.colours[0][0]))
class RingFirework(ParticleEffect):
"""
Classic rocket with ring explosion.
"""
def reset(self):
self._active_systems = []
self._active_systems.append(
Rocket(self._screen, self._x, self._y, 10, on_destroy=self._next))
def _next(self, parent):
self._active_systems.append(RingExplosion(
self._screen, parent.x, parent.y, self._life_time - 10))
class SerpentFirework(ParticleEffect):
"""
A firework where each trail changes direction.
"""
def reset(self):
self._active_systems = []
self._active_systems.append(
Rocket(self._screen, self._x, self._y, 10, on_destroy=self._next))
def _next(self, parent):
self._active_systems.append(SerpentExplosion(
self._screen, parent.x, parent.y, self._life_time - 10))
class PalmFirework(ParticleEffect):
"""
Classic palm shaped firework.
"""
def reset(self):
self._active_systems = []
self._active_systems.append(
Rocket(self._screen, self._x, self._y, 10, on_destroy=self._next))
def _next(self, parent):
self._active_systems.append(PalmExplosion(
self._screen, parent.x, parent.y, self._life_time - 10,
on_each=self._trail))
def _trail(self, parent):
if len(self._active_systems) < 100 and randint(0, 100) < 80:
self._active_systems.insert(
0, StarTrail(self._screen,
parent.x,
parent.y,
10,
parent.colours[0][0]))
class Explosion(ParticleEffect):
"""
An explosion effect.
"""
def reset(self):
self._active_systems = []
self._active_systems.append(
ExplosionFlames(self._screen, self._x, self._y, self._life_time))
class DropScreen(ParticleEffect):
"""
Drop all the text on the screen as if it was subject to gravity.
"""
def __init__(self, screen, life_time, **kwargs):
"""
See :py:obj:`.ParticleEffect` for details of the parameters.
"""
# No need for an origin as this uses the whole screen.
super(DropScreen, self).__init__(screen, 0, 0, life_time, **kwargs)
def reset(self):
self._active_systems = []
self._active_systems.append(
DropEmitter(self._screen, self._life_time))
class ShootScreen(ParticleEffect):
"""
Shoot the screen out like a massive gunshot.
"""
def __init__(self, screen, x, y, life_time, diameter=None, **kwargs):
"""
See :py:obj:`.ParticleEffect` for details of the parameters.
In addition, it is possible to set the diameter of this effect using the extra keyword parameter.
"""
# Need to set the field first because the underlying constructor calls reset.
self._diameter = diameter
super(ShootScreen, self).__init__(screen, x, y, life_time, **kwargs)
def reset(self):
self._active_systems = []
self._active_systems.append(
ShotEmitter(self._screen, self._x, self._y, self._diameter, self._life_time))
class Rain(ParticleEffect):
"""
Rain storm effect.
"""
def __init__(self, screen, life_time, **kwargs):
"""
See :py:obj:`.ParticleEffect` for details of the parameters.
"""
# No need for an origin as this uses the whole screen.
super(Rain, self).__init__(screen, 0, 0, life_time, **kwargs)
def reset(self):
self._active_systems = []
self._active_systems.append(
RainSource(self._screen, self._life_time, self._collision))
def _collision(self, particle):
# Already calculated new position, so go back in history
_, x, y, _, _, _ = particle.last()
# Note that dx = dy, so simply calculation of next point to check.
current_char = None
dx = 0
for dx in range(min(1, int(particle.dx))):
next_point = self._screen.get_from(int(x + dx), int(y + dx))
if next_point is None:
current_char = None
break
current_char = next_point[0]
if current_char != 32:
break
# If there's a collision, kill this drop and make a splash.
if (current_char not in [32, None, ord("`"), ord("\\"), ord("v")] or
particle.y + dx >= self._screen.height):
particle.time = particle.life_time
self._active_systems.append(
Splash(self._screen, x + dx - 1, y + dx - 1))
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import skip_because
CONF = config.CONF
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ListServerFiltersTestJSON, cls).setUpClass()
cls.client = cls.servers_client
# Check to see if the alternate image ref actually exists...
images_client = cls.images_client
resp, images = images_client.list_images()
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.images_client.get_image(cls.image_ref)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.images_client.get_image(cls.image_ref_alt)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s1 = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s2 = cls.create_test_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
wait_until='ACTIVE')
cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
cls.fixed_network_name = cls.config.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='gate')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_shutoff_status(self):
# Filter the list of servers by server shutoff status
params = {'status': 'shutoff'}
self.client.stop(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'SHUTOFF')
resp, body = self.client.list_servers(params)
self.client.start(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'ACTIVE')
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='gate')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
@attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@skip_because(bug="1170718")
@attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@skip_because(bug="1182883",
condition=CONF.service_available.neutron)
@attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
@attr(type='gate')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
class ListServerFiltersTestXML(ListServerFiltersTestJSON):
_interface = 'xml'
| |
"""\
DMP.py - Implementation of density matrix purification methods, including
Niklasson, Tymczak and Challacombe's density matrix minimizer
JCP 118, 8611 (2003)
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
# Status:
# PM,TCP,TRS work for h2/* and h2o/*. Appears noisy for
# h2o/631gss. Also, when lots of iterations are done in the
# dm convergence, the program gives unreliable results. This
# normally kicks in around 70 or 80 iterations.
# MCW works, provided we have a guess for efermi
import logging
from math import sqrt
from PyQuante.Ints import getbasis,getints,get2JmK
from PyQuante.Molecule import Molecule
from PyQuante.LA2 import mkdens,SymOrth,simx
from PyQuante.hartree_fock import get_energy
from PyQuante.NumWrap import matrixmultiply,identity,trace,zeros,eigh,solve
class AbstractDMP:
"AbstractDMP - Functions common to all density matrix purifiers"
method = "Abstract"
def __init__(self,F,Ne,S=None,**opts):
self.tol = opts.get('tol',1e-7)
self.maxit = opts.get('maxit',100)
self.do_orth = S is not None
self.N = F.shape[0]
self.I = identity(self.N,'d')
self.Ne = Ne
if self.do_orth:
self.X = SymOrth(S)
self.F = simx(F,self.X)
self.emin, self.emax = gershgorin_minmax(self.F)
self.initialize()
self.print_init_info()
return
def iterate(self):
for self.iter in range(self.maxit):
if self.converged(): break
self.update()
self.print_iter_info()
self.print_iter_end_info()
if self.do_orth:
self.D = simx(self.D,self.X,'T')
return
def reinitialize(self,F):
"Used for restarting in a later SCF iteration"
if self.do_orth:
self.F = SymOrth(F,self.X)
else:
self.F = F
self.emin,self.emax = gershgorin_minmax(self.F)
self.initialize()
self.print_init_info()
return
def print_iter_end_info(self):
if self.iter == self.maxit-1:
logging.warning("Too many iterations taken in %s: %d" %
(self.method,self.iter))
else:
logging.debug("%s converged in %d iterations" %
(self.method,self.iter))
return
def print_iter_info(self): return
def print_init_info(self): return
# Functions that must be overloaded
def initialize(self): print "AbstractDMP.initialize()"
def update(self): print "AbstractDMP.update()"
def converged(self): print "AbstractDMP.converged()"
class NOTCP:
"Nonorthogonal version of Niklasson Trace Correcting Purification"
method = "NOTCP"
def __init__(self,F,Ne,S,**opts):
self.tol = opts.get('tol',1e-7)
self.maxit = opts.get('maxit',50)
self.S = S
self.N = F.shape[0]
self.I = identity(self.N,'d')
self.Ne = Ne
self.F = F
self.emin, self.emax = lanczos_minmax(self.F,self.S)
self.initialize()
self.print_init_info()
return
def iterate(self):
for self.iter in range(self.maxit):
if self.converged(): break
self.update()
self.print_iter_info()
self.print_iter_end_info()
return
def reinitialize(self,F):
"Used for restarting in a later SCF iteration"
self.F = F
self.emin,self.emax = lanczos_minmax(self.F,self.S)
self.initialize()
self.print_init_info()
return
def print_iter_end_info(self):
if self.iter == self.maxit-1:
logging.warning("Too many iterations taken in %s: %d" %
(self.method,self.iter))
else:
logging.debug("%s converged in %d iterations" %
(self.method,self.iter))
return
def print_iter_info(self):
#print self.iter,self.Ne,self.Ne_curr
return
def print_init_info(self): return
def initialize(self):
from PyQuante.NumWrap import inv
self.D = inv(self.F-(self.emin-1)*self.S)
return
def update(self):
D2 = matrixmultiply(self.DS,self.D)
if self.Ne_curr < self.Ne:
self.D = 2*self.D-D2
else:
self.D = D2
return
def converged(self):
self.DS = matrixmultiply(self.D,self.S)
self.Ne_curr = trace(self.DS)
return abs(self.Ne_curr - self.Ne) < self.tol
class TCP(AbstractDMP):
"Niklasson Trace Correcting Purification"
method = "TCP"
def initialize(self):
self.D = (self.emax*self.I-self.F)/(self.emax-self.emin)
return
def update(self):
Ne_curr = trace(self.D)
D2 = matrixmultiply(self.D,self.D)
Ne2 = trace(D2)
self.Ne_curr = Ne_curr
# Anders claims this works better; I didn't see a difference
#if abs(2*Ne_curr-Ne2-self.Ne) < abs(Ne2-self.Ne):
if Ne_curr < self.Ne:
self.D = 2*self.D-D2
else:
self.D = D2
return
def converged(self):
return abs(trace(self.D) - self.Ne) < self.tol
def print_iter_info(self):
#print self.iter,self.Ne,self.Ne_curr
return
class TRP(TCP):
"Niklasson/Tymczak/Challacombe Trace Resetting purification"
method = "TRP"
def update(self):
D2 = matrixmultiply(self.D,self.D)
Df = matrixmultiply(D2,4*self.D-3*D2)
trf = trace(Df)
Dp = self.I-self.D
Dp2 = matrixmultiply(Dp,Dp)
Dg = matrixmultiply(D2,Dp2)
trg = trace(Dg)
gamma = (self.Ne-trf)/trg
if gamma > 2:
self.D = 2*self.D-D2
elif gamma < 0:
self.D = D2
else:
self.D = Df-gamma*Dg
return
class CP(AbstractDMP):
"Palser/Manolopolous Canonical Purification"
method = "CP"
def initialize(self):
efermi = trace(self.F)/self.N
beta = self.Ne/float(self.N)
alpha = min(self.Ne/(self.emax-efermi),
(self.N-self.Ne)/(efermi-self.emin))/float(self.N)
self.D = alpha*(efermi*self.I-self.F) + beta*self.I
self.Dsumold = 0
return
def update(self):
D2 = matrixmultiply(self.D,self.D)
D3 = matrixmultiply(self.D,D2)
cn = trace(D2-D3)/trace(self.D-D2)
if cn < 0.5:
self.D = ((1.0-2.0*cn)*self.D+(1.0+cn)*D2-D3)/(1.0-cn)
else:
self.D = ((1+cn)*D2-D3)/cn
return
def converged(self):
Dsum = sum(sum(self.D))
val = abs(Dsum-self.Dsumold) < self.tol
self.Dsumold = Dsum
return val
class McWeeny(AbstractDMP):
method = "MCW"
def initialize(self):
"Set efermi and create the initial D matrix"
beta = 0.5
elow = self.emin
ehigh = self.emax+20
de = ehigh-elow
alpha = beta/de
#nelow = trace(alpha*(elow*I-self.F) + beta*I)
#nehigh = trace(alpha*(ehigh*I-self.F) + beta*I)
nelow = self.get_nel(elow,alpha,beta)
nehigh = self.get_nel(ehigh,alpha,beta)
for i in range(100):
efermi = 0.5*(elow+ehigh)
#nefermi = trace(alpha*(efermi*I-F)+ beta*I)
nefermi = self.get_nel(efermi,alpha,beta)
if abs(self.Ne-nefermi) < self.tol: break
if nefermi < self.Ne:
elow = efermi
nelow = nefermi
else:
ehigh = efermi
nehigh = nefermi
alpha = min(beta/(self.emax-efermi),(1-beta)/(efermi-self.emin))
self.D = alpha*(efermi*self.I-self.F)+beta*self.I
return
def get_nel(self,efermi,alpha,beta):
return trace(alpha*(efermi*self.I-self.F)*beta*self.I)
def update(self):
D2 = matrixmultiply(self.D,self.D)
self.D = 3*D2-2*matrixmultiply(self.D,D2)
return
def converged(self): return abs(trace(self.D) - self.Ne) < self.tol
def init_dmat_solver(Method,**opts):
"Wrapper around Dmat classes to make them work like simple solvers"
def solver(F,S,Ne):
solve = Method(F,Ne,S)
solve.iterate()
return solve.D
return solver
def gershgorin_minmax(A):
n,m = A.shape
mins = []
maxs = []
for i in range(n):
offsum = sum(abs(A[i,:]))-abs(A[i,i])
mins.append(A[i,i]-offsum)
maxs.append(A[i,i]+offsum)
return min(mins),max(maxs)
def tridiagmat(alpha,beta):
N = len(alpha)
A = zeros((N,N),'d')
for i in range(N):
A[i,i] = alpha[i]
if i < N-1:
A[i,i+1] = A[i+1,i] = beta[i]
return A
def lanczos_minmax(F,S=None,**kwargs):
"Estimate the min/max evals of F using a few iters of Lanczos"
doS = S is not None
niter = kwargs.get('niter',8)
N = F.shape[0]
niter = min(N,niter)
x = zeros(N,'d')
x[0] = 1
q = x
avals = []
bvals = []
if doS:
r = matrixmultiply(S,q)
else:
r = q
beta = sqrt(matrixmultiply(q,r))
wold = zeros(N,'d')
for i in range(niter):
w = r/beta
v = q/beta
r = matrixmultiply(F,v)
r = r - wold*beta
alpha = matrixmultiply(v,r)
avals.append(alpha)
r = r-w*alpha
if doS:
q = solve(S,r)
else:
q = r
beta = sqrt(matrixmultiply(q,r))
bvals.append(beta)
wold = w
E,V = eigh(tridiagmat(avals,bvals))
return min(E),max(E)
def test():
from PyQuante.PyQuante2 import SCF,DmatSolver
print "Target energy: ",-1.130501
h2 = Molecule('H2',atomlist=[(1,(0.35,0,0)),(1,(-0.35,0,0))],
units='Angs')
h2_hf = SCF(h2,method='HF',SolverConstructor=DmatSolver)
h2_hf.iterate()
print "Energy: ",h2_hf.energy
if __name__ == '__main__': test()
| |
import os
from unittest import TestCase
from nose.tools import eq_
import tempfile
import shutil
import gzip
from leisure import shuffle, disco
from leisure.path import makedirs
def cat(fname, content):
open(fname,'w').write(content)
class TestShuffle(TestCase):
def setUp(self):
self.data_root = tempfile.mkdtemp()
self.job_name = "Job@123"
self.host = "localhost"
self.job_home = disco.job_home(self.job_name, os.path.join(self.data_root, self.host))
self.job_url = disco.job_url(self.host, self.job_name)
makedirs(self.job_home)
self.part_info = self.make_part_info(self.job_home)
def tearDown(self):
shutil.rmtree(self.data_root)
def make_part_info(self, job_home):
part_dir = "partitions-{}".format(disco.timestamp())
part_path = os.path.join(
job_home,
part_dir
)
makedirs(part_path)
part_url = os.path.join("disco://localhost", self.job_url, part_dir)
return (
part_path,
part_url
)
def mk_output_file(self, name, content, job_home=None):
if job_home is None:
job_home = self.job_home
path = os.path.join(job_home, name)
cat(path, content)
return path
def mk_task_results(self, task_name, mode='map', host="localhost"):
"""
Creates a file suitable for using as task results and return it's url
"""
job_home = disco.job_home(self.job_name, os.path.join(self.data_root, host))
self.mk_output_file('{}-0'.format(mode),
'line1\n'
'line2\n',
job_home=job_home
)
self.mk_output_file('{}-1'.format(mode),
'line1\n'
'line2\n',
job_home=job_home
)
self.mk_output_file('{}-2'.format(mode),
'line1\n'
'line2\n',
job_home=job_home
)
job_url = disco.job_url(host, self.job_name)
makedirs(job_home)
task_result_path = os.path.join(job_home, task_name)
cat(task_result_path,
(
"0 part://{host}/{job_url}/{mode}-0\n"
"1 part://{host}/{job_url}/{mode}-1\n"
"0 part://{host}/{job_url}/{mode}-2\n"
).format(job_url = job_url, host=host, mode=mode)
)
return os.path.join("disco://", host, job_url, task_name)
def test_write_index(self):
index = [
"line1\n",
"line2\n"
]
filename = os.path.join(self.data_root, "blah")
shuffle.write_index(filename, index)
read_lines = gzip.GzipFile(filename).readlines()
self.assertSequenceEqual(index, read_lines)
def test_process_url_non_local(self):
eq_(
'0 tag://blah\n',
shuffle.process_url(
("0", "tag://blah"),
self.data_root,
self.part_info
)
)
def test_process_url_local(self):
self.mk_output_file('map-0',
'line1\n'
'line2\n'
)
self.mk_output_file('map-1',
'line3\n'
'line4\n'
)
part_path,part_url = self.part_info
part_dir = os.path.basename(part_path)
eq_(
'0 disco://localhost/{}/{}/part-0\n'.format(self.job_url, part_dir),
shuffle.process_url(
("0", "part://localhost/{}/map-0".format(self.job_url)),
self.data_root,
self.part_info
)
)
eq_(
open(os.path.join(part_path, "part-0")).read(),
'line1\n'
'line2\n'
)
eq_(
'0 disco://localhost/{}/{}/part-0\n'.format(self.job_url, part_dir),
shuffle.process_url(
("0", "part://localhost/{}/map-1".format(self.job_url)),
self.data_root,
self.part_info
)
)
eq_(
open(os.path.join(part_path, "part-0")).read(),
'line1\n'
'line2\n'
'line3\n'
'line4\n'
)
def test_process_task(self):
task_result_url = self.mk_task_results('task-1')
part_files = list(shuffle.process_task(
task_result_url,
self.data_root, self.part_info
))
part_url = self.part_info[1]
expected = [
s.format(part_url=part_url) for s in [
"0 {part_url}/part-0\n",
"1 {part_url}/part-1\n",
"0 {part_url}/part-0\n"
]
]
self.assertSequenceEqual(
expected,
part_files
)
def test_merged_index(self):
dir_urls = [self.mk_task_results('task-1')]
m_index = shuffle.merged_index(dir_urls, self.data_root, self.part_info)
part_url = self.part_info[1]
expected = [
s.format(part_url=part_url) for s in [
"0 {part_url}/part-0\n",
"1 {part_url}/part-1\n",
]
]
self.assertSequenceEqual(
set(expected),
m_index
)
def test_combine_tasks(self):
task_results =[
[
"node1",
self.mk_task_results('task-1', "node1",)
],
["node2", self.mk_task_results('task-1', "node2")],
["node1", self.mk_task_results('task-2', "node1")]
]
indexes = list(shuffle.combine_tasks(
data_root=self.data_root,
job=self.job_name,
mode="map",
task_results=task_results
))
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-complex-comprehension
# pylint: disable=missing-docstring
import tensorflow as tf
from tensorflow.keras import layers
from muzero import network
LARGE_NUM = 1e9
class MLPandLSTM(network.AbstractEncoderandLSTM):
"""Conv+LSTM network for use with MuZero."""
def __init__(self,
trivial_encoding,
observation_space,
*args,
encoder_size=3,
pretrain_temperature=1.,
**kwargs):
super().__init__(*args, **kwargs)
self.trivial_encoding = trivial_encoding
self.pretrain_temperature = 1.
if encoder_size == 0:
encoding_layers = [
layers.Conv2D(
filters=32,
kernel_size=8,
strides=(4, 4),
padding='valid',
activation='relu',
batch_input_shape=(None, *observation_space)),
layers.Conv2D(
filters=64,
kernel_size=4,
strides=(2, 2),
padding='valid',
activation=None,
use_bias=False,
),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
layers.Conv2D(
filters=128,
kernel_size=4,
strides=(2, 2),
padding='valid',
activation='relu',
),
layers.Conv2D(
filters=256,
kernel_size=3,
strides=(1, 1),
padding='valid',
activation=None,
use_bias=False,
),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
]
else:
encoding_layers = [
layers.Conv2D(
filters=64,
kernel_size=3,
strides=(2, 2),
padding='same',
activation='relu',
batch_input_shape=(None, *observation_space)), # add activation?
]
if encoder_size > 0:
encoding_layers.append(ResidualBlock(64),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(64),)
encoding_layers.append(
layers.Conv2D(
filters=128,
kernel_size=3,
strides=(2, 2),
activation='relu',
padding='same'), # add activation?
)
if encoder_size > 0:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 2:
encoding_layers.append(ResidualBlock(128),)
encoding_layers.append(
layers.AveragePooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same'),)
if encoder_size > 0:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 2:
encoding_layers.append(ResidualBlock(128),)
encoding_layers.append(
layers.AveragePooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same'))
self._observation_encoder = tf.keras.Sequential(
encoding_layers, name='observation_encoder')
pretrain_hidden_layers = self._head_hidden_layers()
pretrain_output_size = self.head_hidden_sizes[
-1] if self.head_hidden_sizes else self.hidden_state_size
self._pretrain_head = tf.keras.Sequential(
pretrain_hidden_layers + [
layers.Dense(pretrain_output_size, name='pretrain_output'),
],
name='pretrain_head')
self._pretrain_predictor = tf.keras.Sequential([
tf.keras.layers.Dense(pretrain_output_size // 4, use_bias=False),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(pretrain_output_size),
],
name='pretrain_predictor')
def _encode_observation(self, observation, training=True):
observation = observation * 2 - 1.
if self.trivial_encoding:
# use the trivial observation encoding from
# https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
# Simply take the difference between the last two observations.
return observation[:, :, :, -1] - observation[:, :, :, -2]
return self._observation_encoder(observation, training=training)
# The loss is according to SimCLR(https://arxiv.org/abs/2002.05709).
def pretraining_loss(self, sample, training=True):
obs1, obs2 = sample
out1 = self._pretrain_head(
self.initial_inference(obs1, training=training).hidden_state)
out2 = self._pretrain_head(
self.initial_inference(obs2, training=training).hidden_state)
pred1 = self._pretrain_predictor(out1)
pred2 = self._pretrain_predictor(out2)
loss = self.add_contrastive_loss(
pred1, out2) / 2. + self.add_contrastive_loss(pred2, out1) / 2.
return loss, None
def add_contrastive_loss(self,
hidden1,
hidden2,
hidden_norm=True,
weights=1.0):
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden1 = tf.math.l2_normalize(hidden1, -1)
hidden2 = tf.math.l2_normalize(hidden2, -1)
batch_size = tf.shape(hidden1)[0]
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
logits_aa = tf.matmul(
hidden1, hidden1, transpose_b=True) / self.pretrain_temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(
hidden2, hidden2, transpose_b=True) / self.pretrain_temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(
hidden1, hidden2, transpose_b=True) / self.pretrain_temperature
logits_ba = tf.matmul(
hidden2, hidden1, transpose_b=True) / self.pretrain_temperature
logits_a = tf.concat([logits_ab, logits_aa], 1)
logits_b = tf.concat([logits_ba, logits_bb], 1)
loss_a = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits_a)
loss_b = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits_b)
loss = loss_a + loss_b
return loss
def get_pretraining_trainable_variables(self):
return (self._observation_encoder.trainable_variables +
self._to_hidden.trainable_variables +
self._pretrain_head.trainable_variables +
self._pretrain_predictor.trainable_variables)
class ResidualBlock(layers.Layer):
"""Residualblock.
Implementation adapted from:
https://towardsdatascience.com/from-scratch-implementation-of-alphazero-for-connect4-f73d4554002a
.
"""
def __init__(self, planes):
super(ResidualBlock, self).__init__(name='')
self.planes = planes
self.conv2a = layers.Conv2D(
filters=self.planes,
kernel_size=3,
strides=(1, 1),
padding='same',
use_bias=False)
self.bn2a = layers.LayerNormalization()
self.conv2b = layers.Conv2D(
filters=self.planes,
kernel_size=3,
strides=(1, 1),
padding='same',
use_bias=False)
self.bn2b = layers.LayerNormalization()
self.relu = layers.ReLU()
def __call__(self, input_tensor, training=True, **kwargs):
x = self.conv2a(input_tensor, training=training)
x = self.bn2a(x, training=training)
x = self.relu(x)
x = self.conv2b(x, training=training)
x = self.bn2b(x, training=training)
x += input_tensor
return self.relu(x)
| |
from __future__ import division
import numpy as np
import pandas as pd
import random
from enum import Enum
from joint_dependency.recorder import Record
def get_state(q, states):
i = 0
for i, k in enumerate(states):
if q < k:
return i
return i + 1
def sgn(x):
if x < 0:
return -1
else:
return 1
class Joint(object):
def __init__(self, states, dampings, limits, noise, index=None, position=None):
if index is not None:
self.index = index
self.max_vel = np.inf
self.vel = 0
self.q = 0
self.states = states
self.dampings = dampings
self.min_limit = limits[0]
self.max_limit = limits[1]
self.direction = 1
self.locked = False
self.noise = noise
self.position = position
def add_force(self, f):
if not self.locked:
state = get_state(self.q, self.states)
self.vel += f
self.vel = min(self.vel, self.max_vel)
def lock(self):
self.vel = 0
self.locked = True
def unlock(self):
self.locked = False
def is_locked(self):
return self.locked
@Record(["q", "v", "locked", "direction"])
def step(self, dt):
if self.locked:
return [self.q, self.vel, self.locked, 0]
self.q += self.vel * dt
vel = self.vel * dt
change_direction = -1
if (self.max_limit is not None) and (self.q > self.max_limit):
self.q = self.max_limit
change_direction = 1
if (self.min_limit is not None) and (self.q < self.min_limit):
self.q = self.min_limit
change_direction = 1
state = get_state(self.q, self.states)
damping = self.dampings[state]
direction = -change_direction * sgn(self.vel)
tmp_sqr_vel = max(self.vel ** 2 - abs(damping * self.vel * dt), 0)
self.vel = direction * np.sqrt(tmp_sqr_vel)
return [self.q, self.vel, self.locked, direction]
def get_vel(self):
return random.gauss(self.vel, self.noise['vel'])
def get_q(self):
return random.gauss(self.q, self.noise['q'])
class World(object):
def __init__(self, joints):
self.joints = joints
self.listeners = []
self.time = 0
for joint in joints:
joint.world = self
def step(self, dt):
self.time += dt
for joint in self.joints:
joint.step(dt)
self._inform_listeners(dt)
def get_index(self):
return [pd.to_datetime(self.time, unit="s")]
def register(self, listener):
self.listeners.append(listener)
def get_joint(self, num):
return self.joints[num]
def _inform_listeners(self, dt):
for listener in self.listeners:
listener.step(dt)
def add_joint(self, joint):
self.joints.append(joint)
joint.world = self
joint.index = len(self.joints)-1
class Controller(object):
def __init__(self, world, joint_idx):
self.joint = world.joints[joint_idx]
self.world = world
self.world.register(self)
self.index = joint_idx
self.force_control = False
self.force_controller = ForceController(world, joint_idx)
self.position_control = False
self.position_controller = PositionController(world, joint_idx)
self.max_force = 15
@Record(["applied_force", "desired_force"])
def step(self, dt):
desired_force = 0
if self.force_control:
if self.force_controller.is_done():
self.force_control = False
else:
desired_force = self.force_controller.step(dt)
elif self.position_control:
if self.position_controller.is_done():
self.position_control = False
else:
desired_force = self.position_controller.step(dt)
sign = sgn(desired_force)
applied_force = sign * min(abs(desired_force), self.max_force)
self.joint.add_force(applied_force)
return [applied_force, desired_force]
def move_to(self, goal):
self.position_controller.move_to(goal)
self.position_control = True
def apply_force(self, time, force):
self.force_controller.apply_force(time, force)
self.force_control = True
def is_done(self):
return self.position_control is False and self.force_control is False
class PositionController(object):
def __init__(self, world, joint_idx):
self.goal_pos = None
self.joint = world.joints[joint_idx]
self.q_eps = .5
self.v_eps = 10e-3
self.q = 0
self.v = 0
self.i = 0
self.kp = 2
self.kd = 1
self.ki = 0
self.max_force = 30
def move_to(self, pos):
self.goal_pos = pos
def step(self, dt):
self.q = self.joint.get_q()
self.v = self.joint.get_vel()
return self._pid_control()
def _pid_control(self):
if self.goal_pos is None:
return 0
if self.is_done():
return 0
self.i += (self.goal_pos - self.q)
force = (self.kp * (self.goal_pos - self.q)
+ self.kd * (-self.v)
+ self.ki * self.i)
return force
def is_done(self):
if self.goal_pos is None:
return True
if (abs(self.q - self.goal_pos) < self.q_eps and
abs(self.v) < self.v_eps):
return True
if self.joint.is_locked():
return True
return False
class ForceController(object):
def __init__(self, world, joint_idx):
self.joint = world.joints[joint_idx]
self.time = 0
self.force = 0
def apply_force(self, time, force):
self.force = force
self.time = time
def is_done(self):
return self.time <= 0
def step(self, dt):
self.time = max(0, self.time - (dt))
if self.time > 0:
return self.force
else:
return 0
class Locker(object):
def __init__(self, world, locker, locked, lower, upper):
self.world = world
self.world.register(self)
self.locker = locker
self.locked = locked
self.lower = lower
self.upper = upper
def step(self, dt):
if self.lower < self.locker.q < self.upper:
if not self.locked.is_locked():
self.locked.lock()
else:
if self.locked.is_locked():
self.locked.unlock()
class MultiLocker(object):
def __init__(self, world, master, slave, locks):
"""
:param world: The world
:param master: The master joint of the locking
:param slave: The slave joint of the locking
:param locks: The locking position of the master (a tuple)
"""
self.world = world
self.world.register(self)
self.master = master
self.slave = slave
self.locks = locks
def step(self, dt):
is_locked = self.slave.is_locked()
should_be_locked = False
for lock in self.locks:
if lock[0] <= self.master.q <= lock[1]:
should_be_locked = True
if is_locked and not should_be_locked:
# print("unlock")
self.slave.unlock()
elif not is_locked and should_be_locked:
# print("lock")
self.slave.lock()
class ActionMachine(object):
def __init__(self, world, controller, tau=0.1):
self.world = world
self.controllers = controller
self.tau = tau
def run_action(self, pos, joint=None):
self.controllers[joint].move_to(pos[joint])
while not self.controllers[joint].is_done():
self.world.step(self.tau)
if abs(self.world.joints[joint].q - pos[joint]) < 0.5:
return True
else:
return False
# todo: return True if joint moved, and false if not
def check_state(self, joint):
old_pos = self.world.joints[joint].q
self.controllers[joint].apply_force(1, 10)
for i in range(10):
self.world.step(self.tau)
new_pos = self.world.joints[joint].q
if abs(old_pos - new_pos) > 10e-3:
locked_state = 0
else:
locked_state = 1
return locked_state
class Furniture(Enum):
drawer_key = 0
drawer_handle = 1
cupboard_key = 2
cupboard_handle = 3
# window = 4
def create_furniture(furniture, *args, **kwargs):
if furniture == Furniture.drawer_key:
return create_drawer_with_key(*args, **kwargs)
elif furniture == Furniture.drawer_handle:
return create_drawer_with_handle(*args, **kwargs)
elif furniture == Furniture.cupboard_key:
return create_cupboard_with_key(*args, **kwargs)
elif furniture == Furniture.cupboard_handle:
return create_cupboard_with_handle(*args, **kwargs)
# elif furniture == Furniture.window:
# return create_window(*args, **kwargs)
else:
raise TypeError("{} is not a valid furniture.".format(furniture))
def create_drawer_with_key(world, noise, limits):
open_at = np.random.randint(limits[0][0]+20, limits[0][1]-20)
open_d = (open_at - 10, open_at + 10)
# The 'handle'
states = [open_d[0], open_d[1]]
dampings = [15, 200, 15]
world.add_joint(Joint(states, dampings, limits[0], noise))
# The 'window'
states = [limits[1][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[1], noise))
MultiLocker(world, master=world.joints[-2], slave=world.joints[-1],
locks=[(limits[0][0], open_d[0]), (open_d[1], limits[0][1])])
def create_drawer_with_handle(world, noise, limits):
open_upper = np.random.uniform() > .5
if open_upper:
open_d = (limits[0][1] - 20, limits[0][1])
locked_d = (limits[0][0], limits[0][1]-20)
else:
open_d = (limits[0][0], limits[0][0]+20)
locked_d = (limits[0][0]+20, limits[0][1])
# The 'handle'
states = [open_d[0], open_d[1]]
dampings = [15, 200, 15]
world.add_joint(Joint(states, dampings, limits[0], noise))
# The 'window'
states = [limits[1][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[1], noise))
MultiLocker(world, master=world.joints[-2], slave=world.joints[-1],
locks=[locked_d])
def create_cupboard_with_key(world, noise, limits):
open_at = np.random.randint(limits[0][0]+20, limits[0][1]-20)
open_d = (open_at - 10, open_at + 10)
# The 'handle'
states = [open_d[0], open_d[1]]
dampings = [15, 200, 15]
world.add_joint(Joint(states, dampings, limits[0], noise))
# The 'window'
states = [limits[1][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[1], noise))
MultiLocker(world, master=world.joints[-2], slave=world.joints[-1],
locks=[(limits[0][0], open_d[0]), (open_d[1], limits[0][1])])
def create_cupboard_with_handle(world, noise, limits):
open_upper = np.random.uniform() > .5
if open_upper:
open_d = (limits[0][1] - 20, limits[0][1])
locked_d = (limits[0][0], limits[0][1]-20)
else:
open_d = (limits[0][0], limits[0][0]+20)
locked_d = (limits[0][0]+20, limits[0][1])
# The 'handle'
states = [open_d[0], open_d[1]]
dampings = [15, 200, 15]
world.add_joint(Joint(states, dampings, limits[0], noise))
# The 'window'
states = [limits[1][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[1], noise))
MultiLocker(world, master=world.joints[-2], slave=world.joints[-1],
locks=[locked_d])
def create_window(world, noise, limits):
tilt_at = (limits[0][0]+limits[0][1])/2
tilt_d = [(limits[0][0], tilt_at - 10), (limits[0][1], tilt_at + 10)]
open_upper = np.random.uniform() > .5
if open_upper:
open_d = (limits[0][1] - 20, limits[0][1])
locked_d = (limits[0][0], limits[0][1]-20)
else:
open_d = (limits[0][0], limits[0][0]+20)
locked_d = (limits[0][0]+20, limits[0][1])
# The 'handle'
states = [limits[0][0], tilt_d[0][1], tilt_d[1][0], limits[0][1]]
dampings = [15, 200, 15, 200, 15]
world.add_joint(Joint(states, dampings, limits[0], noise))
# The 'tilted window'
states = [limits[1][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[1], noise))
# The 'open window'
states = [limits[2][1]]
dampings = [15, 15]
world.add_joint(Joint(states, dampings, limits[2], noise))
MultiLocker(world, master=world.joints[-3], slave=world.joints[-2],
locks=tilt_d)
MultiLocker(world, master=world.joints[-3], slave=world.joints[-1],
locks=locked_d)
def create_world(n=3):
noise = {'q': 10e-6, 'vel': 10e-6}
world = World([])
for _ in range(n):
next_furniture = random.choice(list(Furniture))
create_furniture(next_furniture, world, noise, [[0, 180], [0, 120]])
return world
# FIXME find better location
lockbox_joint_positions = map( np.array, [
[6, 1.2, 0],
[6.8, 4, 0],
[6.8, 6.5, 0],
[4, 6.5, 0],
[2.2, 7, 0]
])
def create_lockbox(num_of_joints=5, noise=None, use_joint_positions=False,
use_simple_locking_state=False):
if noise is None:
noise = {'q': 10e-6, 'vel': 10e-6}
if use_joint_positions:
assert(len(lockbox_joint_positions) >= num_of_joints)
world = World([])
limits = (0, 180)
for i in range(num_of_joints):
dampings = [15, 200, 15]
if use_simple_locking_state:
m = 170.
else:
m = random.randint(10, 170)
lower = (-1, m - 10)
upper = (m + 41, 281)
if i > 0:
locks = [lower, upper]
jpos = lockbox_joint_positions[i]
world.add_joint(Joint([lower[1], upper[0]], dampings,
limits=limits, noise=noise,
position=jpos
))
if i > 0:
MultiLocker(world, master=world.joints[i - 1],
slave=world.joints[i], locks=locks)
print("Joint {}{} opens at {} - {}".format(i,
(" [%.1f, %.1f, %.1f]" % tuple(jpos.tolist()) ) if jpos is not None else "",
lower[1], upper[0]))
# MultiLocker(world, master=world.joints[2], slave=world.joints[1],
# locks=[(-1, -1), (20, 180)])
# MultiLocker(world, master=world.joints[3], slave=world.joints[2],
# locks=[(-1, -1), (20, 180)])
# for i in range(2, 5):
# MultiLocker(self.world, locker=self.world.joints[i-1],
# locked=self.world.joints[i], locks=[closed])
# controllers = [Controller(world, j)
# for j, _ in enumerate(world.joints)]
# action_machine = ActionMachine(world, controllers, tau)
world.step(.1)
return world
| |
import re
import StringIO
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.db import transaction
from rest_framework import permissions
from rest_framework import status
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.authentication import (
BasicAuthentication,
TokenAuthentication,
SessionAuthentication,)
from rest_framework.response import Response
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from onadata.apps.logger.models import Instance
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.viewer.models.parsed_instance import update_mongo_instance
from onadata.libs import filters
from onadata.libs.authentication import DigestAuthentication
from onadata.libs.mixins.openrosa_headers_mixin import OpenRosaHeadersMixin
from onadata.libs.renderers.renderers import TemplateXMLRenderer
from onadata.libs.serializers.data_serializer import SubmissionSerializer
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
# 10,000,000 bytes
DEFAULT_CONTENT_LENGTH = getattr(settings, 'DEFAULT_CONTENT_LENGTH', 10000000)
xml_error_re = re.compile('>(.*)<')
def is_json(request):
return 'application/json' in request.content_type.lower()
def dict_lists2strings(d):
"""Convert lists in a dict to joined strings.
:param d: The dict to convert.
:returns: The converted dict."""
for k, v in d.items():
if isinstance(v, list) and all([isinstance(e, basestring) for e in v]):
d[k] = ' '.join(v)
elif isinstance(v, dict):
d[k] = dict_lists2strings(v)
return d
def create_instance_from_xml(username, request):
xml_file_list = request.FILES.pop('xml_submission_file', [])
xml_file = xml_file_list[0] if len(xml_file_list) else None
media_files = request.FILES.values()
return safe_create_instance(username, xml_file, media_files, None, request)
def create_instance_from_json(username, request):
request.accepted_renderer = JSONRenderer()
request.accepted_media_type = JSONRenderer.media_type
dict_form = request.data
submission = dict_form.get('submission')
if submission is None:
# return an error
return [_(u"No submission key provided."), None]
# convert lists in submission dict to joined strings
submission_joined = dict_lists2strings(submission)
xml_string = dict2xform(submission_joined, dict_form.get('id'))
xml_file = StringIO.StringIO(xml_string)
return safe_create_instance(username, xml_file, [], None, request)
def update_mongo(i):
d = i.parsed_instance.to_dict_for_mongo()
try:
x = i.fieldsight_instance
d.update(
{'fs_project_uuid': str(x.project_fxf_id), 'fs_project': x.project_id, 'fs_status': 0, 'fs_site': str(x.site_id),
'fs_uuid': str(x.site_fxf_id)})
try:
synced = update_mongo_instance(d, i.id)
print(synced, "updated in mongo success")
except Exception as e:
print(str(e))
except Exception as e:
print(str(e))
class XFormSubmissionApi(OpenRosaHeadersMixin,
mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
Implements OpenRosa Api [FormSubmissionAPI](\
https://bitbucket.org/javarosa/javarosa/wiki/FormSubmissionAPI)
## Submit an XML XForm submission
<pre class="prettyprint">
<b>POST</b> /api/v1/submissions</pre>
> Example
>
> curl -X POST -F xml_submission_file=@/path/to/submission.xml \
https://example.com/api/v1/submissions
## Submit an JSON XForm submission
<pre class="prettyprint">
<b>POST</b> /api/v1/submissions</pre>
> Example
>
> curl -X POST -d '{"id": "[form ID]", "submission": [the JSON]} \
http://localhost:8000/api/v1/submissions -u user:pass -H "Content-Type: \
application/json"
Here is some example JSON, it would replace `[the JSON]` above:
> {
> "transport": {
> "available_transportation_types_to_referral_facility": \
["ambulance", "bicycle"],
> "loop_over_transport_types_frequency": {
> "ambulance": {
> "frequency_to_referral_facility": "daily"
> },
> "bicycle": {
> "frequency_to_referral_facility": "weekly"
> },
> "boat_canoe": null,
> "bus": null,
> "donkey_mule_cart": null,
> "keke_pepe": null,
> "lorry": null,
> "motorbike": null,
> "taxi": null,
> "other": null
> }
> }
> "meta": {
> "instanceID": "uuid:f3d8dc65-91a6-4d0f-9e97-802128083390"
> }
> }
"""
filter_backends = (filters.AnonDjangoObjectPermissionFilter,)
model = Instance
permission_classes = (permissions.AllowAny,)
renderer_classes = (TemplateXMLRenderer,
JSONRenderer,
BrowsableAPIRenderer)
serializer_class = SubmissionSerializer
template_name = 'submission.xml'
def __init__(self, *args, **kwargs):
super(XFormSubmissionApi, self).__init__(*args, **kwargs)
# Respect DEFAULT_AUTHENTICATION_CLASSES, but also ensure that the
# previously hard-coded authentication classes are included first.
# We include BasicAuthentication here to allow submissions using basic
# authentication over unencrypted HTTP. REST framework stops after the
# first class that successfully authenticates, so
# HttpsOnlyBasicAuthentication will be ignored even if included by
# DEFAULT_AUTHENTICATION_CLASSES.
authentication_classes = [
DigestAuthentication,
BasicAuthentication,
TokenAuthentication
]
# Do not use `SessionAuthentication`, which implicitly requires CSRF prevention
# (which in turn requires that the CSRF token be submitted as a cookie and in the
# body of any "unsafe" requests).
self.authentication_classes = authentication_classes + [
auth_class for auth_class in self.authentication_classes
if not auth_class in authentication_classes and \
not issubclass(auth_class, SessionAuthentication)
]
def create(self, request, *args, **kwargs):
username = self.kwargs.get('username')
# if self.request.user.is_anonymous():
# if username is None:
# # raises a permission denied exception, forces authentication
# self.permission_denied(self.request)
# else:
# user = get_object_or_404(User, username=username.lower())
#
# profile, created = UserProfile.objects.get_or_create(user=user)
#
# if profile.require_auth:
# # raises a permission denied exception,
# # forces authentication
# self.permission_denied(self.request)
# elif not username:
# # get the username from the user if not set
# username = (request.user and request.user.username)
if request.method.upper() == 'HEAD':
return Response(status=status.HTTP_204_NO_CONTENT,
headers=self.get_openrosa_headers(request),
template_name=self.template_name)
is_json_request = is_json(request)
error, instance = (create_instance_from_json if is_json_request else
create_instance_from_xml)(username, request)
if error or not instance:
return self.error_response(error, is_json_request, request)
update_mongo(instance)
context = self.get_serializer_context()
serializer = SubmissionSerializer(instance, context=context)
return Response(serializer.data,
headers=self.get_openrosa_headers(request),
status=status.HTTP_201_CREATED,
template_name=self.template_name)
def error_response(self, error, is_json_request, request):
if not error:
error_msg = _(u"Unable to create submission.")
status_code = status.HTTP_400_BAD_REQUEST
elif isinstance(error, basestring):
error_msg = error
status_code = status.HTTP_400_BAD_REQUEST
elif not is_json_request:
return error
else:
error_msg = xml_error_re.search(error.content).groups()[0]
status_code = error.status_code
return Response({'error': error_msg},
headers=self.get_openrosa_headers(request),
status=status_code)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import timeit
import unittest
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.internal.backends.chrome_inspector.tracing_backend import _DevToolsStreamReader
from telemetry.testing import fakes
from telemetry.testing import simple_mock
from telemetry.testing import tab_test_case
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
class TracingBackendTest(tab_test_case.TabTestCase):
# Number of consecutively requested memory dumps.
_REQUESTED_DUMP_COUNT = 3
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs([
# Memory maps currently cannot be retrieved on sandboxed processes.
# See crbug.com/461788.
'--no-sandbox',
# Workaround to disable periodic memory dumps. See crbug.com/513692.
'--enable-memory-benchmarking'
])
def setUp(self):
super(TracingBackendTest, self).setUp()
self._tracing_controller = self._browser.platform.tracing_controller
if not self._tracing_controller.IsChromeTracingSupported():
self.skipTest('Browser does not support tracing, skipping test.')
if not self._browser.supports_memory_dumping:
self.skipTest('Browser does not support memory dumping, skipping test.')
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemorySuccess(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps enabled.
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddDisabledByDefault(
'disabled-by-default-memory-infra')
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Request several memory dumps in a row and test that they were all
# successfully created with unique IDs.
expected_dump_ids = []
for _ in xrange(self._REQUESTED_DUMP_COUNT):
dump_id = self._browser.DumpMemory()
self.assertIsNotNone(dump_id)
self.assertNotIn(dump_id, expected_dump_ids)
expected_dump_ids.append(dump_id)
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains the requested memory dumps in the
# correct order (and nothing more).
actual_dump_ids = [d.dump_id for d in model.IterGlobalMemoryDumps()]
self.assertEqual(actual_dump_ids, expected_dump_ids)
@decorators.Disabled('win') # crbug.com/570955
def testDumpMemoryFailure(self):
# Check that dumping memory before tracing starts raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Start tracing with memory dumps disabled.
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
self._tracing_controller.StartTracing(config)
# Check that the method returns None if the dump was not successful.
self.assertIsNone(self._browser.DumpMemory())
trace_data = self._tracing_controller.StopTracing()
# Check that dumping memory after tracing stopped raises an exception.
self.assertRaises(Exception, self._browser.DumpMemory)
# Test that trace data is parsable.
model = model_module.TimelineModel(trace_data)
self.assertGreater(len(model.processes), 0)
# Test that the resulting model contains no memory dumps.
self.assertEqual(len(list(model.IterGlobalMemoryDumps())), 0)
class TracingBackendUnitTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def tearDown(self):
self._mock_timer.Restore()
def testCollectTracingDataTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 19)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 35)
backend = tracing_backend.TracingBackend(self._inspector_socket)
# The third response is 16 seconds after the second response, so we expect
# a TracingTimeoutException.
with self.assertRaises(tracing_backend.TracingTimeoutException):
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertFalse(backend._has_received_all_tracing_data)
def testCollectTracingDataNoTimeout(self):
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'B'}]}, 9)
self._inspector_socket.AddEvent(
'Tracing.dataCollected', {'value': [{'ph': 'E'}]}, 14)
self._inspector_socket.AddEvent('Tracing.tracingComplete', {}, 19)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(2, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testCollectTracingDataFromStream(self):
self._inspector_socket.AddEvent(
'Tracing.tracingComplete', {'stream': '42'}, 1)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '[{},{},{'}, 2)
self._inspector_socket.AddAsyncResponse(
'IO.read', {'data': '},{},{}]', 'eof': True}, 3)
backend = tracing_backend.TracingBackend(self._inspector_socket)
backend._CollectTracingData(10)
self.assertEqual(5, len(backend._trace_events))
self.assertTrue(backend._has_received_all_tracing_data)
def testDumpMemorySuccess(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': True, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertEqual(backend.DumpMemory(), '42abc')
def testDumpMemoryFailure(self):
self._inspector_socket.AddResponseHandler(
'Tracing.requestMemoryDump',
lambda req: {'result': {'success': False, 'dumpGuid': '42abc'}})
backend = tracing_backend.TracingBackend(self._inspector_socket)
self.assertIsNone(backend.DumpMemory())
class DevToolsStreamPerformanceTest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(tracing_backend)
self._inspector_socket = fakes.FakeInspectorWebsocket(self._mock_timer)
def _MeasureReadTime(self, count):
mock_time = self._mock_timer.time() + 1
payload = ','.join(['{}'] * 5000)
self._inspector_socket.AddAsyncResponse('IO.read', {'data': '[' + payload},
mock_time)
startClock = timeit.default_timer()
done = {'done': False}
def mark_done(data):
del data # unused
done['done'] = True
reader = _DevToolsStreamReader(self._inspector_socket, 'dummy')
reader.Read(mark_done)
while not done['done']:
mock_time += 1
if count > 0:
self._inspector_socket.AddAsyncResponse('IO.read', {'data': payload},
mock_time)
elif count == 0:
self._inspector_socket.AddAsyncResponse('IO.read',
{'data': payload + ']', 'eof': True}, mock_time)
count -= 1
self._inspector_socket.DispatchNotifications(10)
return timeit.default_timer() - startClock
def testReadTime(self):
n1 = 1000
while True:
t1 = self._MeasureReadTime(n1)
if t1 > 0.01:
break
n1 *= 5
t2 = self._MeasureReadTime(n1 * 10)
# Time is an illusion, CPU time is doubly so, allow great deal of tolerance.
toleranceFactor = 5
self.assertLess(t2, t1 * 10 * toleranceFactor)
| |
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import logging
import uuid
from .SharedAttribute import SharedAttribute
class RefinableSearchState (object):
"""
Object encapsulating the state of an IRQ session, including:
- user feedback on video event classifications
Pickle-able
"""
_classifier_config = SharedAttribute()
# TODO: Make thread safe without hindering on transportability or ability
# to hash. i.e. can't use Locks.
pool_size_schedule = (10, 30, 75, 200, 1000)
def __init__(self, event_type_or_prev_state, search_query=None,
distance_kernel=None, classifier_config=None, mdb_info=None,
result_model_id=None, state_uuid=None):
"""
Initialize an IQR session state as a base state (given the UUID of the
search) or as a new state in the chain by providing the previous
state object,
If this state is constructed from a parent state, we also inherit that
parent's refinement state.
When constructing the first state in a chain, the first state of a chain
must be initialized with all parameters, not just the event ID.
>>> RefinableSearchState(None)
Traceback (most recent call last):
...
ValueError: Initial state must be given the search query!
Subsequent state along a chain should be constructed with the parent
state as the sole parameter. The new state will inherit the parent
state's values, except for the state UUID, which is globally unique
across all states.:
:param event_type_or_prev_state: The event type goal of the search or a
previous search state to start from.
:type event_type_or_prev_state: None or int or RefinableSearchState
:param search_query: If given, the search UUID (this is the first state
along a chain), the search query must be provided for historical
purposes. If a previous state, this parameter will be ignored.
:type search_query: None or *
:param distance_kernel: The distance kernel interface instance to use
for this search session.
:type distance_kernel: DistanceKernel
:param classifier_config: Only required for the first search state of a
chain, and is the configuration dictionary for ECD classifiers.
:type classifier_config: dict
:param mdb_info: Database connection information. This will be copied
because we will overwrite the collection parameter of our internal
copy to a relevant value.
:type mdb_info: DatabaseInfo
"""
self._log = logging.getLogger('.'.join((self.__module__,
self.__class__.__name__)))
self._state_uuid = state_uuid or uuid.uuid4()
# Maps defining user input positive and negative classifications
# If a user declares that a video ID is positive for an event type, it
# cannot be negative for that event type at the same time.
#
# Expected format for both maps:
# {
# <eventID>: set(<video_IDs>),
# ...
# }
#
#: :type: set of int
self._positives = set()
#: :type: set of int
self._negatives = set()
# slot to record "child" states when this state becomes the parent of
# another state.
self._child_state = None
if isinstance(event_type_or_prev_state, (int, long, type(None))):
self._log.info("Creating an initial state")
if search_query is None:
raise ValueError("Initial state must be given the search "
"query!")
if distance_kernel is None:
raise ValueError("Initial state must be given a distance "
"kernel interface instance!")
if classifier_config is None:
raise ValueError("Initial state must be given a classifier "
"configuration dictionary!")
if mdb_info is None:
raise ValueError("Initial state must be given a MongoDB info "
"object!")
if result_model_id is None:
raise ValueError("Initial state must be given a result storage "
"model ID string!")
self._parent_state = None
self._search_uuid = uuid.uuid4()
self._search_event_type = event_type_or_prev_state
self._search_query = search_query
self._distance_kernel = distance_kernel
self._classifier_config = classifier_config
self._mdb_info = mdb_info.copy()
self._mdb_info.collection = str(self._state_uuid)
self._result_mID = result_model_id
elif isinstance(event_type_or_prev_state, RefinableSearchState):
self._log.info("Extending previous state %s",
event_type_or_prev_state)
self._parent_state = event_type_or_prev_state
event_type_or_prev_state._child_state = self
self._search_uuid = event_type_or_prev_state.search_uuid
self._search_event_type = event_type_or_prev_state.search_event_type
self._search_query = event_type_or_prev_state.search_query
self._distance_kernel = event_type_or_prev_state.distance_kernel
# Inherit refinement state
# single layer container, so we don't need deepcopy
#: :type: set of int
self._positives = set(event_type_or_prev_state._positives)
#: :type: set of int
self._negatives = set(event_type_or_prev_state._negatives)
# Inherit previous state's classifier config for now, but it will
# need to be overwritten when new modes are trained.
self._classifier_config = event_type_or_prev_state.classifier_config
self._mdb_info = event_type_or_prev_state.mdb_info.copy()
self._mdb_info.collection = str(self._state_uuid)
self._result_mID = event_type_or_prev_state.result_mID
else:
raise ValueError("Invalid parameter given to constructor (%s: %s)."
% (type(event_type_or_prev_state),
event_type_or_prev_state))
def __hash__(self):
return hash(self.uuid)
def __eq__(self, other):
if isinstance(other, RefinableSearchState) and self.uuid == other.uuid:
return True
return False
def __ne__(self, other):
return not (self == other)
@property
def search_uuid(self):
"""
The search UUID this state is associated with.
States inheriting from one another will share the same search UUID.
States not on the same inheritance chain will not have the same search
UUID.
:return: The UUID of the search this state is associated with.
:rtype: uuid.UUID
"""
return self._search_uuid
@property
def search_event_type(self):
"""
The event type context of this search state.
:return: The event type of the parent search.
:rtype: int or None
"""
return self._search_event_type
@property
def search_query(self):
return self._search_query
@property
def distance_kernel(self):
"""
:rtype: DistanceKernel
"""
return self._distance_kernel
@property
def uuid(self):
"""
The unique UUID of this search state. No two states will share the same
UUID.
:return: the UUID of this state
:rtype: uuid.UUID
"""
return self._state_uuid
@property
def parent_state(self):
"""
Return the parent state of this search state. If there is no parent,
meaning that this is the first state of the chain, then None is
returned.
:return: The parent search state of this state. It may be None if this
state doesn't have a parent.
:rtype: RefinableSearchState or None
"""
return self._parent_state
@property
def num_parents(self):
"""
:return: The number of parent states of this state along the sequence.
:rtype: int
"""
p_states = 0
state = self.parent_state
while state is not None:
p_states += 1
state = state.parent_state
return p_states
@property
def child_state(self):
"""
Return the child of this state. This state will not have a child until
it has become the parent of a state, aka passed to the constructor when
creating a new RefinableSearchState.
:return: The child search state of this state. It may be None.
:rtype: RefinableSearchState or None
"""
return self._child_state
@property
def num_children(self):
"""
:return: The number of child states of this state along the sequence.
:rtype: int
"""
c_states = 0
state = self.child_state
while state is not None:
c_states += 1
state = state.child_state
return c_states
@property
def positives(self):
"""
:return: Positive user feedback of videos for events.
:rtype: set of int
"""
return frozenset(self._positives)
@property
def negatives(self):
"""
:return: Negative user feedback of videos for events.
:rtype: set of int
"""
return frozenset(self._negatives)
@property
def classifier_config(self):
return self._classifier_config
@classifier_config.setter
def classifier_config(self, value):
self._classifier_config = value
@property
def mdb_info(self):
"""
:rtype: DatabaseInfo
"""
return self._mdb_info
@property
def result_mID(self):
"""
:rtype: str
"""
return self._result_mID
def register_positive_feedback(self, vID_or_IDs):
"""
Register the given video ID ``vID`` as a positive match to the search.
:param vID_or_IDs: Video integer ID key or keys.
:type vID_or_IDs: int or Iterable of int
"""
# Convert to an iterable if a single int
if not hasattr(vID_or_IDs, '__iter__'):
# make sure its int-able
assert isinstance(int(vID_or_IDs), int)
vID_or_IDs = (vID_or_IDs,)
self._positives.update(vID_or_IDs)
# If this video was previously classified negatively for this event
# type, remove that negative entry. Can't be both positive and
# negative for the same event type at the same time.
self.remove_negative_feedback(vID_or_IDs)
def register_negative_feedback(self, vID_or_IDs):
"""
Register the given video ID ``vID`` as negatively match to the search
:param vID_or_IDs: Video integer ID key.
:type vID_or_IDs: int or Iterable of int
"""
# Convert to an iterable if a single int
if not hasattr(vID_or_IDs, '__iter__'):
# make sure its int-able
assert isinstance(int(vID_or_IDs), int)
vID_or_IDs = (vID_or_IDs,)
self._negatives.update(vID_or_IDs)
# If this video was previously classified positively for this event
# type, remove that positive entry. Can't be both positive and
# negative for the same event type at the same time.
self.remove_positive_feedback(vID_or_IDs)
def remove_positive_feedback(self, vID_or_IDs):
"""
Remove the given video ID as a positive match to the given event ID. If
the pairing doesn't exist in the positives registry, this does nothing.
:param vID_or_IDs: Video integer ID key
:type vID_or_IDs: int or Iterable of int
"""
# Convert to an iterable if a single int
if not hasattr(vID_or_IDs, '__iter__'):
# make sure its int-able
assert isinstance(int(vID_or_IDs), int)
vID_or_IDs = (vID_or_IDs,)
self._positives.difference_update(vID_or_IDs)
def remove_negative_feedback(self, vID_or_IDs):
"""
Remove the given video ID as a negative match to the given event ID. If
the pairing doesn't exist in the negatives registry, this does nothing.
:param vID_or_IDs: Video integer ID key
:type vID_or_IDs: int
"""
# Convert to an iterable if a single int
if not hasattr(vID_or_IDs, '__iter__'):
# make sure its int-able
assert isinstance(int(vID_or_IDs), int)
vID_or_IDs = (vID_or_IDs,)
self._negatives.difference_update(vID_or_IDs)
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
import os
import sys
import shutil
import logging
import json
import types
import threading
import happybase
import subprocess as sub
from flask import Flask, render_template, Response
from flask import request, abort, redirect, url_for
from flask_cors import CORS, cross_origin
from flask_restful import Resource, Api
from config import config
from locker import Locker
import rest
#from oozie_job_manager import build_images_workflow_payload_v2, build_images_index_workflow_payload, build_images_index_qpr_workflow_payload, submit_worfklow, get_job_info, rerun_job
from oozie_job_manager import build_images_index_qpr_loadesdump_workflow_payload, submit_worfklow, get_job_info, rerun_job
import pymongo
from pymongo import MongoClient
# logger
logger = logging.getLogger('api-manager.log')
log_file = logging.FileHandler(config['logging']['file_path'])
logger.addHandler(log_file)
log_file.setFormatter(logging.Formatter(config['logging']['format']))
logger.setLevel(config['logging']['level'])
# flask app
app = Flask(__name__)
cors = CORS(app, resources={r"*": {"origins": "*"}})
api = Api(app)
max_ts = 9999999999999
def api_route(self, *args, **kwargs):
def wrapper(cls):
self.add_resource(cls, *args, **kwargs)
return cls
return wrapper
api.route = types.MethodType(api_route, api)
# mongoDB
client = MongoClient()
db = client.api_manager_db
db_domains = db.domains
db_projects = db.projects
# in-memory data
# TODO: make it persistent i.e. deal with restart using mongoDB
data = {}
data['domains'] = {}
data['projects'] = {}
# ports ?
# what we really care about is knowing for each domain:
# - what is the address for the image similarity service for one domain (actually one project)
# - what is the status of indexing (not indexed, indexing, indexed)
# - what is the time range that we have indexed (if not everything)
# what are the ports used on the host.
# use before_first_request to try to load data from disk? Build docker image?
# use after_request for all functions that modify data to save data to disk? ~ http://flask.pocoo.org/snippets/53/
def initialize_data_fromdb():
# try to read data stored in db from a previous session
# fill projects, domains and ports
for project in db_projects.find():
logger.info('loading project from mongodb: {}'.format(project))
data['projects'][project['project_name']] = dict()
for key in project:
# id is an object that is not JSON serializable
if str(key).strip() != '_id':
data['projects'][project['project_name']][key] = project[key]
logger.info('Loaded project %s, dict keys are %s' % (project['project_name'], data['projects'][project['project_name']].keys()))
for domain in db_domains.find():
logger.info('loading domain from mongodb: {}'.format(domain))
data['domains'][domain['domain_name']] = dict()
for key in domain:
# id is an object that is not JSON serializable
if str(key).strip() != '_id':
data['domains'][domain['domain_name']][key] = domain[key]
if key == 'port':
if 'ports' not in data:
data['ports'] = [domain[key]]
else:
data['ports'].append(domain[key])
logger.info('Loaded domain %s, dict keys are %s' % (domain['domain_name'], data['domains'][domain['domain_name']].keys()))
# reset apache conf
reset_apache_conf()
restart_apache()
# restart dockers
for domain_name in data['domains']:
start_docker(data['domains'][domain_name]['port'], domain_name)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Keep-Alive,User-Agent,If-Modified-Since,Cache-Control,x-requested-with,Content-Type,origin,authorization,accept,client-security-token')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
# One locker for project and domain
project_lock = Locker()
domain_lock = Locker()
def _copy_from_hdfs(hdfs_path, local_path):
import subprocess
subprocess.call(['hadoop', 'fs', '-copyToLocal', hdfs_path, local_path])
def _get_project_dir_path(project_name):
return os.path.join(config['repo']['local_path'], project_name)
def _get_domain_dir_path(domain_name):
return os.path.join(config['image']['base_domain_dir_path'], domain_name)
# deprecated
# def _submit_worfklow(start_ts, end_ts, table_sha1, table_update, domain):
# payload = build_images_workflow_payload_v2(start_ts, end_ts, table_sha1, table_update, domain)
# json_submit = submit_worfklow(payload)
# job_id = json_submit['id']
# logger.info('[submit_worfklow: log] submitted workflow %s for domain %s.' % (job_id, domain))
# # can use job_id to check status with: get_job_info(job_id)
# return job_id
# deprecated
# def _submit_buildindex_worfklow(ingestion_id, table_sha1infos, pingback_url):
# payload = build_images_index_workflow_payload(ingestion_id, table_sha1infos, pingback_url)
# logger.info('[submit_worfklow: log] submitted payload for ingestion_id: {}'.format(payload))
# json_submit = submit_worfklow(payload)
# job_id = json_submit['id']
# logger.info('[submit_worfklow: log] submitted workflow for ingestion_id: %s.' % (ingestion_id))
# return job_id
# def _submit_buildindex_worfklow_qpr(ingestion_id, table_sha1infos, pingback_url):
# payload = build_images_index_qpr_workflow_payload(ingestion_id, table_sha1infos, pingback_url)
# logger.info('[submit_worfklow: log] submitted payload for ingestion_id: {}'.format(payload))
# json_submit = submit_worfklow(payload)
# job_id = json_submit['id']
# logger.info('[submit_worfklow: log] submitted workflow for ingestion_id: %s.' % (ingestion_id))
# return job_id
#HG ES instances are slow. Amandeep reads once the index and dumps to HDFS. Load from there
# TODO: reference here the workflow from Amandeep that dumps the data?
def _submit_buildindex_worfklow_qpr_loadesdump(ingestion_id, table_sha1infos, pingback_url):
payload = build_images_index_qpr_loadesdump_workflow_payload(ingestion_id, table_sha1infos, pingback_url)
logger.info('[submit_worfklow: log] submitted payload for ingestion_id: {}'.format(payload))
json_submit = submit_worfklow(payload)
job_id = json_submit['id']
logger.info('[submit_worfklow: log] submitted workflow for ingestion_id: %s.' % (ingestion_id))
return job_id
def parse_isodate_to_ts(input_date):
import dateutil.parser
import calendar
parsed_date = dateutil.parser.parse(input_date)
print "[parsed_date: {} from {}]".format(parsed_date, input_date)
return calendar.timegm(parsed_date.utctimetuple())*1000
def reset_apache_conf():
# for each domain create proxypass and add it to initial conf file
# read initial apache conf file up to '</VirtualHost>'
inconf_file = config['image']['in_apache_conf_file']
outconf_str = ""
with open(inconf_file, 'rt') as inconf:
for line in inconf:
outconf_str += line
for domain_name in data['domains']:
port = data['domains'][domain_name]['port']
proxypass_filled, service_url = fill_proxypass(domain_name, port)
logger.info("[setup_service_url: log] updating Apache conf with: {}".format(proxypass_filled))
outconf_str = add_proxypass_to_conf(outconf_str.split('\n'), proxypass_filled)
write_out_apache_conf(outconf_str)
def write_out_apache_conf(outconf_str):
try:
with open(config['image']['out_apache_conf_file'], 'wt') as outconf:
outconf.write(outconf_str)
except Exception as inst:
logger.info("[setup_service_url: log] Could not overwrite Apache conf file. {}".format(inst))
raise IOError("Could not overwrite Apache conf file")
def fill_proxypass(domain_name, port):
endpt = "/cuimgsearch_{}".format(domain_name)
service_url = config['image']['base_service_url']+endpt
lurl = "http://localhost:{}/".format(port)
proxypass_template = "\nProxyPass {}/ {}\nProxyPassReverse {}/ {}\n<Location {}>\n\tRequire all granted\n</Location>\n"
proxypass_filled = proxypass_template.format(endpt, lurl, endpt, lurl, endpt)
return proxypass_filled, service_url
def add_proxypass_to_conf(inconf, proxypass_filled):
outconf_str = ""
for line in inconf:
# add the new rule before the end
if line.strip()=='</VirtualHost>':
outconf_str += proxypass_filled
outconf_str += line+'\n'
return outconf_str
def setup_service_url(domain_name):
# attribute a port (how to make sure it is free? for now just assume it is)
if 'ports' not in data or len(data['ports']) == 0:
port = config['image']['first_port']
data['ports'] = []
else:
port = max(data['ports'])+1
data['ports'].append(port)
# build the proxypass rule for Apache
# TODO: should have all this predefined for domain1-4
proxypass_filled, service_url = fill_proxypass(domain_name, port)
logger.info("[setup_service_url: log] updating Apache conf with: {}".format(proxypass_filled))
# read apache conf file up to '</VirtualHost>'
inconf_file = config['image']['in_apache_conf_file']
# check if we already setup one domain...
if os.path.isfile(config['image']['out_apache_conf_file']):
# start from there
inconf_file = config['image']['out_apache_conf_file']
with open(inconf_file, 'rt') as inconf:
outconf_str = add_proxypass_to_conf(inconf, proxypass_filled)
# overwrite conf file
write_out_apache_conf(outconf_str)
return port, service_url
def restart_apache():
# this requires root privilege
# v2. dirty but seems to work
command_shell = 'sleep 3; sudo service apache2 restart'
logger.info("[setup_service_url: log] restarting Apache in 3 seconds...")
subproc = sub.Popen(command_shell, shell=True)
def get_start_end_ts(one_source):
'''Parse start and end timestamp from `start_date` and `end_date` in the provided source'''
try:
start_ts = parse_isodate_to_ts(one_source['start_date'])
except Exception as inst:
err_msg = "Could not parse 'start_date' (error was: {}).".format(inst)
logger.error("[get_start_end_ts: log] "+err_msg)
raise ValueError(err_msg)
try:
end_ts = parse_isodate_to_ts(one_source['end_date'])
except Exception as inst:
err_msg = "Could not parse 'end_date' (error was: {}).".format(inst)
logger.error("[get_start_end_ts: log] "+err_msg)
raise ValueError(err_msg)
return start_ts, end_ts
def check_project_indexing_finished(project_name):
"""Check if we can find lopq_model and lopd_codes.
"""
if data['projects'][project_name]['status'] == 'indexing' or data['projects'][project_name]['status'] == 'rerunning':
# look for columns lopq_model and lopd_codes in hbase update table row of this ingestion
ingestion_id = data['projects'][project_name]['ingestion_id']
logger.info('[check_project_indexing_finished: log] checking if ingestion %s has completed.' % (ingestion_id))
try:
from happybase.connection import Connection
conn = Connection(config['image']['hbase_host'])
table = conn.table(config['image']['hbase_table_updates'])
columns=[config['image']['lopq_model_column'], config['image']['lopq_codes_column']]
row = table.row(ingestion_id, columns=columns)
# if found, copy to domain data folder
if len(row)==len(columns):
logger.info('[check_project_indexing_finished: log] ingestion %s looks completed' % (ingestion_id))
# copy codes first
local_codes_path = os.path.join(_get_domain_dir_path(data['projects'][project_name]['domain']), config['image']['lopq_codes_local_suffix'])
_copy_from_hdfs(row[config['image']['lopq_codes_column']], local_codes_path)
local_model_path = os.path.join(_get_domain_dir_path(data['projects'][project_name]['domain']), config['image']['lopq_model_local_suffix'])
_copy_from_hdfs(row[config['image']['lopq_model_column']], local_model_path)
if os.path.exists(local_codes_path) and os.path.exists(local_model_path):
logger.info('[check_project_indexing_finished: log] ingestion %s has completed and should be ready now...' % (ingestion_id))
data['projects'][project_name]['status'] = 'ready'
else:
data['projects'][project_name]['status'] = 'failed'
logger.info('[check_project_indexing_finished: log] ingestion %s has completed but local copy failed...' % (ingestion_id))
# for debugging store infos: row[config['image']['lopq_codes_column']], row[config['image']['lopq_model_column']]
else: # else,
# check the job is still running
job_id = data['projects'][project_name]['job_id']
output = get_job_info(job_id)
if output['status'] == 'RUNNING':
pass # we just have to wait for the job to end
else:
# if it is not, the job failed... what should we do?
# mark project as failed?
if data['projects'][project_name]['status'] == 'indexing':
# TODO: check why resubmission does not work.
# try to rerun once
logger.info('[check_project_indexing_finished: log] rerunning ingestion %s which has failed once...' % (ingestion_id))
endpt = "/cu_imgsearch_manager/projects/{}".format(project_name)
#pingback_url = config['image']['base_service_url']+endpt
pingback_url = config['image']['base_service_url_vpn']+endpt
domain_name = data['projects'][project_name]['domain']
rerun_output = rerun_job(job_id, data['projects'][project_name]['ingestion_id'], data['domains'][domain_name]['table_sha1infos'], pingback_url)
data['projects'][project_name]['status'] = 'rerunning'
logger.info('[check_project_indexing_finished: log] resubmission output was: {}'.format(rerun_output))
elif data['projects'][project_name]['status'] == 'rerunning':
logger.info('[check_project_indexing_finished: log] ingestion %s has failed twice...' % (ingestion_id))
logger.info('[check_project_indexing_finished: log] job info output was: {}'.format(output))
data['projects'][project_name]['status'] = 'failed'
# for debugging store info: output
except Exception as inst:
logger.error('[check_project_indexing_finished: error] {}'.format(inst))
def start_docker(port, domain_name):
# call start_docker_columbia_image_search_qpr.sh with the right domain and port
# this can take a while if the docker image was not build yet...
command = '{}{} -p {} -d {}'.format(config['image']['host_repo_path'], config['image']['setup_docker_path'], port, domain_name)
logger.info("[start_docker: log] Starting docker for domain {} with command: {}".format(domain_name, command))
docker_proc = sub.Popen(command.split(' '), stdout=sub.PIPE, stderr=sub.PIPE)
def check_domain_service(project_sources, project_name):
#logger.info('[check_domain_service: log] project_sources: %s' % (project_sources))
# why is project_sources a list actually? Assume we want the first entry? Or loop?
one_source = project_sources[0]
domain_name = one_source['type']
if domain_name not in ['domain1', 'domain2', 'domain3', 'domain4']:
err_msg = 'Invalid domain {} for Summer QPR 2017.'.format(domain_name)
logger.info('[check_domain_service: error] '+err_msg)
return -1, domain_name, None, None, err_msg
start_ts, end_ts = get_start_end_ts(one_source)
logger.info('[check_domain_service: log] domain_name: %s, start_ts: %s, end_ts: %s' % (domain_name, start_ts, end_ts))
# should we check domain_name is valid e.g. exists in CDR?
domain_dir_path = _get_domain_dir_path(domain_name)
# get domain lock
if os.path.isdir(domain_dir_path): # or test 'domain_name' in data['domains']?
msg = 'service exists for domain_name: %s. updating is not allowed.' % (domain_name)
logger.info('[check_domain_service: log] '+msg)
return 1, domain_name, None, None, msg
# # deprecated
# logger.info('[check_domain_service: log] service exists for domain_name: %s, check if we need to update.' % (domain_name))
# # Check conf to see if we need to update
# # Do we actually want to allow that?
# config_file = os.path.join(domain_dir_path, config['image']['config_filepath'])
# config_json = json.load(open(config_file,'rt'))
# if 'start_ts' not in config_json or 'end_ts' not in config_json:
# err_msg = 'service exists for domain: %s, but creation seems incomplete.' % (domain_name)
# logger.error('[check_domain_service: error] '+err_msg)
# domain_lock.release(domain_name)
# return -1, domain_name, None, None, err_msg
# new_start_ts = min(start_ts, config_json['start_ts'])
# new_end_ts = max(end_ts, config_json['end_ts'])
# if new_start_ts < config_json['start_ts'] or new_end_ts > config_json['end_ts']:
# domain_lock.acquire(domain_name)
# # save update infos in domain data
# ingestion_id = '-'.join([domain_name, str(start_ts), str(end_ts)])
# data['domains'][domain_name]['ingestion_id'].append(ingestion_id)
# # submit workflow with min(start_ts, stored_start_ts) and max(end_ts, stored_end_ts)
# endpt = "/cu_imgsearch_manager/projects/{}".format(project_name)
# pingback_url = config['image']['base_service_url']+endpt
# job_id = _submit_buildindex_worfklow(ingestion_id, data['domains'][domain_name]['table_sha1infos'], pingback_url)
# # add job_id to job_ids and save config
# config_json['job_ids'].append(job_id)
# data['domains'][domain_name]['job_ids'].append(job_id)
# # write out new config file
# with open(config_file, 'wt') as conf_out:
# conf_out.write(json.dumps(config_json))
# msg = 'updating domain %s' % (domain_name)
# logger.info('[check_domain_service: log] '+msg)
# # update in mongodb too
# db_domains.find_one_and_replace({'domain_name':domain_name}, data['domains'][domain_name])
# domain_lock.release(domain_name)
# return 1, domain_name, ingestion_id, job_id, msg
# else:
# msg = 'no need to update for domain %s' % (domain_name)
# logger.info('[check_domain_service: log] '+msg)
# return 1, domain_name, None, None, msg
else:
domain_lock.acquire(domain_name)
# if folder is empty copy data from config['image']['sample_dir_path']
# copy the whole folder
logger.info('[check_domain_service: log] copying from %s to %s' % (config['image']['sample_dir_path'], domain_dir_path))
try:
if not os.path.isdir(config['image']['base_domain_dir_path']):
os.makedirs(config['image']['base_domain_dir_path'])
shutil.copytree(config['image']['sample_dir_path'], domain_dir_path)
except shutil.Error as inst:
raise ValueError('Could not copy from template directory {} to {}. {}'.format(config['image']['sample_dir_path'], domain_dir_path, inst))
# then copy sample config file
source_conf = os.path.join(config['image']['host_repo_path'],config['image']['config_sample_filepath'])
config_file = os.path.join(domain_dir_path,config['image']['config_filepath'])
logger.info('[check_domain_service: log] copying config file from %s to %s' % (source_conf, config_file))
shutil.copy(source_conf, config_file)
# edit config_file by replacing DOMAIN by the actual domain in :
# "ist_els_doc_type", "HBI_table_sha1infos", and "HBI_table_updatesinfos" (and "HBI_table_sim" ?)
logger.info('[check_domain_service: log] loading config_file from %s' % (config_file))
config_json = json.load(open(config_file,'rt'))
# - HBI_table_sha1infos
config_json['HBI_table_sha1infos'] = config_json['HBI_table_sha1infos'].replace('DOMAIN', domain_name)
ingestion_id = '-'.join([domain_name, str(start_ts), str(end_ts)])
# save that in project and domain infos too
config_json['ingestion_id'] = ingestion_id
# setup service
port, service_url = setup_service_url(domain_name)
endpt = "/cu_imgsearch_manager/projects/{}".format(project_name)
#pingback_url = config['image']['base_service_url']+endpt
pingback_url = config['image']['base_service_url_vpn']+endpt
# submit workflow to get images data
logger.info('[check_domain_service: log] submitting workflow with parameters: %s, %s, %s' % (ingestion_id, config_json['HBI_table_sha1infos'], pingback_url))
#job_id = _submit_buildindex_worfklow(ingestion_id, config_json['HBI_table_sha1infos'], pingback_url)
job_id = _submit_buildindex_worfklow_qpr_loadesdump(ingestion_id, config_json['HBI_table_sha1infos'], pingback_url)
# save job id to be able to check status?
config_json['job_ids'] = job_id
# write out new config file
logger.info('[check_domain_service: log] updating config_file: %s' % config_file)
with open(config_file, 'wt') as conf_out:
conf_out.write(json.dumps(config_json))
logger.info('[check_domain_service: log] wrote config_file: %s' % config_file)
start_docker(port, domain_name)
# store all infos of that domain
data['domains'][domain_name] = {}
data['domains'][domain_name]['domain_name'] = domain_name
data['domains'][domain_name]['port'] = port
data['domains'][domain_name]['table_sha1infos'] = config_json['HBI_table_sha1infos']
data['domains'][domain_name]['service_url'] = service_url
data['domains'][domain_name]['ingestion_id'] = [ingestion_id] # to allow for updates?
data['domains'][domain_name]['job_ids'] = [job_id]
data['domains'][domain_name]['docker_name'] = 'columbia_university_search_similar_images_'+domain_name
# insert in mongoDB
# this actually modifies data['domains'][domain_name] and inserts _id???
db_domains.insert_one(data['domains'][domain_name])
if '_id' in data['domains'][domain_name]:
del data['domains'][domain_name]['_id']
domain_lock.release(domain_name)
# we will restart apache AFTER returning
return 0, domain_name, ingestion_id, job_id, None
def json_encode(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
@api.route('/debug')
class Debug(Resource):
def get(self):
try:
if not config['debug']:
return abort(404)
debug_info = {
'data': json.loads(json.dumps(data, default=json_encode))
}
return debug_info
except Exception as e:
logger.error('debug: {}. {}'.format(e, sys.exc_info()[0]))
@api.route('/')
class Home(Resource):
def get(self):
return self.__dict__
@api.route('/projects')
class AllProjects(Resource):
def post(self):
input = request.get_json(force=True)
logger.info('/projects received: %s' % (input))
project_name = input.get('project_name', '')
if len(project_name) == 0 or len(project_name) >= 256:
return rest.bad_request('Invalid project name.')
if project_name in data['projects']:
#msg = 'You cannot post an existing project to the /projects endpoint. For updates, post to projects/{your_project_name}'
msg = 'You cannot post an existing project to the /projects endpoint.'
return rest.bad_request(msg)
project_sources = input.get('sources', [])
if len(project_sources) == 0:
return rest.bad_request('Invalid sources.')
logger.info('/projects project_name: %s' % (project_name))
logger.info('/projects project_sources: %s' % (project_sources))
try:
# create project data structure, folders & files
project_dir_path = _get_project_dir_path(project_name)
project_lock.acquire(project_name)
logger.info('/projects creating directory: %s' % (project_dir_path))
os.makedirs(project_dir_path)
data['projects'][project_name] = {'sources': {}}
data['projects'][project_name]['project_name'] = project_name
data['projects'][project_name]['sources'] = project_sources
with open(os.path.join(project_dir_path, 'project_config.json'), 'w') as f:
f.write(json.dumps(data['projects'][project_name], indent=4, default=json_encode))
# we should try to create a service for domain "sources:type"
# (or update it if timerange defined by "sources:start_date" and "sources:end_date" is bigger than existing)
ret, domain_name, ingestion_id, job_id, err = check_domain_service(project_sources, project_name)
data['projects'][project_name]['domain'] = domain_name
if ret==0:
msg = 'project %s created.' % project_name
logger.info(msg)
# store job infos
data['projects'][project_name]['ingestion_id'] = ingestion_id
data['projects'][project_name]['job_id'] = job_id
data['projects'][project_name]['status'] = 'indexing'
# insert into mongoDB
logger.info('Project %s (before mongodb insertion) dict keys are %s' % (project_name, data['projects'][project_name].keys()))
db_projects.insert_one(data['projects'][project_name])
logger.info('Project %s (after mongodb insertion) dict keys are %s' % (project_name, data['projects'][project_name].keys()))
# How come data['projects'][project_name] has an '_id' field now???
if '_id' in data['projects'][project_name]:
del data['projects'][project_name]['_id']
logger.info('Project %s (after mongodb insertion and cleaning) dict keys are %s' % (project_name, data['projects'][project_name].keys()))
try:
return rest.created(msg)
finally:
restart_apache()
elif ret==1:
msg = 'domain for project %s was already previously created. %s' % (project_name, err)
logger.info(msg)
# what should we return in this case
return rest.ok(msg)
else:
# we should remove project_name
del data['projects'][project_name]
msg = 'project %s creation failed while creating search service: %s' % (project_name, err)
logger.info(msg)
return rest.internal_error(msg)
except Exception as e:
# try to remove project_name
try:
del data['projects'][project_name]
except:
pass
# try to remove data files too
try:
shutil.rmtree(os.path.join(_get_project_dir_path(project_name)))
except:
pass
msg = 'project {} creation failed: {} {}'.format(project_name, e, sys.exc_info()[0])
logger.error(msg)
return rest.internal_error(msg)
finally:
project_lock.release(project_name)
def get(self):
return data['projects'].keys()
def delete(self):
# redundant with projects/project_name/delete
msg = 'cannot delete from projects endpoint. you should call projects/{your_project_name}'
return rest.bad_request(msg)
@api.route('/projects/<project_name>')
class Project(Resource):
def post(self, project_name):
return rest.bad_request('A project update is not allowed.')
# # for updates?
# if project_name not in data['projects']:
# return rest.not_found()
# input = request.get_json(force=True)
# project_sources = input.get('sources', [])
# if len(project_sources) == 0:
# return rest.bad_request('Invalid sources.')
# try:
# project_lock.acquire(project_name)
# data['projects'][project_name]['master_config'] = project_sources
# # This would mean an update, we need to update the corresponding domain image similarity service
# ret, domain_name, ingestion_id, job_id, err = check_domain_service(project_sources, project_name)
# logger.info('Updated project %s, dict keys are %s' % (project_name, data['projects'][project_name].keys()))
# return rest.created()
# except Exception as e:
# logger.error('Updating project %s: %s' % (project_name, e.message))
# return rest.internal_error('Updating project %s error, halted.' % project_name)
# finally:
# project_lock.release(project_name)
def get(self, project_name):
if project_name not in data['projects']:
return rest.not_found()
check_project_indexing_finished(project_name)
logger.info('Getting project %s, dict keys are %s' % (project_name, data['projects'][project_name].keys()))
return data['projects'][project_name]
def delete(self, project_name):
if project_name not in data['projects']:
return rest.not_found()
try:
project_lock.acquire(project_name)
# - get corresponding domain
domain_name = data['projects'][project_name]['domain']
# - delete ingestion_id row from hbase updates table
# should we delete corresponding files on HDFS?
# delete hbase table sha1_infos?
ingestion_id = data['projects'][project_name]['ingestion_id']
from happybase.connection import Connection
conn = Connection(config['image']['hbase_host'])
table = conn.table(config['image']['hbase_table_updates'])
table.delete(ingestion_id, columns=['info:lopq_codes_path', 'info:lopq_model_pkl'])
# remove project:
# - from current data dict
del data['projects'][project_name]
# - files associated with project
shutil.rmtree(os.path.join(_get_project_dir_path(project_name)))
# - from mongodb
db_projects.delete_one({'project_name':project_name})
msg = 'project {} has been deleted'.format(project_name)
logger.info(msg)
# if it's the last project from a domain, shoud we remove the domain?
# for now assume one project per domain and delete too
# stop and remove docker container
docker_name = data['domains'][domain_name]['docker_name']
subproc = sub.Popen("sudo docker stop {}; sudo docker rm {}".format(docker_name, docker_name), shell=True)
# cleanup ports list
data['ports'].remove(data['domains'][domain_name]['port'])
# remove domain:
# - from current data dict
del data['domains'][domain_name]
# - files associated with project
shutil.rmtree(os.path.join(_get_domain_dir_path(domain_name)))
# - from mongodb
db_domains.delete_one({'domain_name':domain_name})
# should we also clean up things in HDFS?...
msg2 = 'domain {} has been deleted'.format(domain_name)
logger.info(msg2)
# regenerate apache conf from scratch for domains that are still active.
reset_apache_conf()
return rest.deleted(msg+' '+msg2)
except Exception as e:
logger.error('deleting project %s: %s' % (project_name, e.message))
return rest.internal_error('deleting project %s error, halted.' % project_name)
finally:
project_lock.remove(project_name)
@api.route('/domains')
class AllDomains(Resource):
def post(self):
return rest.bad_request('You cannot post to this endpoint. Domains are created from projects.')
def get(self):
return data['domains'].keys()
@api.route('/domains/<domain_name>')
class Domain(Resource):
def post(self, domain_name):
return rest.bad_request('You cannot post a domain, you should post a project using a domain.')
def put(self, domain_name):
return self.post(domain_name)
def get(self, domain_name):
if domain_name not in data['domains']:
return rest.not_found()
return data['domains'][domain_name]
def delete(self, domain_name):
# Should we allow it?
return rest.bad_request('Deleting a domain is not allowed.')
if __name__ == '__main__':
initialize_data_fromdb()
# we should also check if dockers are running?
# api services within each docker?
from gevent.wsgi import WSGIServer
http_server = WSGIServer(('', config['server']['port']), app)
http_server.serve_forever()
| |
#!/usr/bin/env python -u
# -*- coding: utf-8 -*-
"""
Copyright 2013 Jacek Markowski, jacek87markowski@gmail.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sqlite3
from csv import reader
import os
import locale
import codecs
import unicodedata
locale.setlocale(locale.LC_ALL, "C")
import platform
system = platform.system()
if system == 'Windows':
from pyfann_win import libfann
elif system == 'Linux':
from pyfann import libfann
else:
from pyfann import libfann
from bb_shared import Shared
from PySide import QtGui
class Database(Shared):
''' SQL base'''
def __init__(self, parent=None):
'''Creates all nessesary tables in sql'''
Shared.__init__(self)
self.mybase = sqlite3.connect(':memory:')
self.relations_base = self.mybase.cursor()
self.stop_action = 0 # when 1 stops simulation , export
#This function creates sql database and tables#
self.relations_base.execute('''CREATE TABLE results
(id INTEGER PRIMARY KEY,
date_txt txt,
date_num FLOAT,
home TEXT,
away TEXT,
gHomeEnd INTEGER,
gAwayEnd INTEGER,
odd_home FLOAT,
odd_draw FLOAT,
odd_away FLOAT
fake TEXT NOT NULL DEFAULT "-")''')
self.relations_base.execute('''CREATE TABLE league
(id INTEGER PRIMARY KEY,
team TEXT,
matches INTEGER DEFAULT 000.0,
matchesHome INTEGER DEFAULT 000.0,
matchesAway INTEGER DEFAULT 000.0,
points INTEGER DEFAULT 0,
pointsHome INTEGER DEFAULT 0,
pointsAway INTEGER DEFAULT 0,
form INTEGER DEFAULT 000.0,
formHome INTEGER DEFAULT 000.0,
formAway INTEGER DEFAULT 000.0,
pointsBB FLOAT DEFAULT 0.000,
pointsBBHome FLOAT DEFAULT 000.0,
pointsBBAway FLOAT DEFAULT 000.0,
formBB FLOAT DEFAULT 000.0,
formBBHome FLOAT DEFAULT 000.0,
formBBAway FLOAT DEFAULT 000.0,
wins INTEGER DEFAULT 000.0,
draws INTEGER DEFAULT 000.0,
loses INTEGER DEFAULT 000.0,
winhome INTEGER DEFAULT 000.0,
drawhome INTEGER DEFAULT 000.0,
losehome INTEGER DEFAULT 000.0,
winaway INTEGER DEFAULT 000.0,
drawaway INTEGER VARCHAR(2) NOT NULL DEFAULT 0,
loseaway INTEGER VARCHAR(2) NOT NULL DEFAULT 0,
goalsscored INTEGER NOT NULL DEFAULT 0,
goalslost INTEGER NOT NULL DEFAULT 0,
goalsscoredhome INTEGER NOT NULL DEFAULT 0,
goalslosthome INTEGER NOT NULL DEFAULT 0,
goalsscoredaway INTEGER NOT NULL DEFAULT 0,
goalslostaway INTEGER NOT NULL DEFAULT 0,
mowins FLOAT DEFAULT 0.0,
moloses FLOAT DEFAULT 0.0,
diffgwins FLOAT DEFAULT 0.0,
diffgloses FLOAT DEFAULT 0.0,
mowinsHome FLOAT DEFAULT 0.0,
molosesHome FLOAT DEFAULT 0.0,
diffgwinsHome FLOAT DEFAULT 0.0,
diffglosesHome FLOAT DEFAULT 0.0,
mowinsAway FLOAT DEFAULT 0.0,
molosesAway FLOAT DEFAULT 0.0,
diffgwinsAway FLOAT DEFAULT 0.0,
diffglosesAway FLOAT DEFAULT 0.0,
f1 INTEGER DEFAULT 000.0,
f2 INTEGER DEFAULT 000.0,
f3 INTEGER DEFAULT 000.0,
f4 INTEGER DEFAULT 000.0,
f1Home INTEGER DEFAULT 000.0,
f2Home INTEGER DEFAULT 000.0,
f1Away INTEGER DEFAULT 000.0,
f2Away INTEGER DEFAULT 000.0,
f1BB FLOAT DEFAULT 000.0,
f2BB FLOAT DEFAULT 000.0,
f3BB FLOAT DEFAULT 000.0,
f4BB FLOAT DEFAULT 000.0,
f1BBHome FLOAT DEFAULT 000.0,
f2BBHome FLOAT DEFAULT 000.0,
f1BBAway FLOAT DEFAULT 000.0,
f2BBAway FLOAT DEFAULT 000.0,
f1op TEXT,
f2op TEXT,
f3op TEXT,
f4op TEXT,
f1opHome TEXT,
f2opHome TEXT,
f1opAway TEXT,
f2opAway TEXT,
bts INTEGER DEFAULT 000.0,
btsHome INTEGER DEFAULT 000.0,
btsAway INTEGER DEFAULT 000.0,
over25 INTEGER DEFAULT 000.0,
over25Home INTEGER DEFAULT 000.0,
over25Away INTEGER DEFAULT 000.0,
under25 INTEGER DEFAULT 000.0,
under25Home INTEGER DEFAULT 000.0,
under25Away INTEGER DEFAULT 000.0,
fake TEXT NOT NULL DEFAULT "-")''')
self.relations_base.execute('''CREATE TABLE series
(id INTEGER PRIMARY KEY,
team TEXT,
series_wins INTEGER NOT NULL DEFAULT 0,
series_draws INTEGER NOT NULL DEFAULT 0,
series_loses INTEGER NOT NULL DEFAULT 0,
series_winshome INTEGER NOT NULL DEFAULT 0,
series_drawshome INTEGER NOT NULL DEFAULT 0,
series_loseshome INTEGER NOT NULL DEFAULT 0,
series_winsaway INTEGER NOT NULL DEFAULT 0,
series_drawsaway INTEGER NOT NULL DEFAULT 0,
series_losesaway INTEGER NOT NULL DEFAULT 0,
series_noloses INTEGER NOT NULL DEFAULT 0,
series_noloseshome INTEGER NOT NULL DEFAULT 0,
series_nolosesaway INTEGER NOT NULL DEFAULT 0,
series_nowins INTEGER NOT NULL DEFAULT 0,
series_nowinshome INTEGER NOT NULL DEFAULT 0,
series_nowinsaway INTEGER NOT NULL DEFAULT 0,
series_nodraws INTEGER NOT NULL DEFAULT 0,
series_nodrawshome INTEGER NOT NULL DEFAULT 0,
series_nodrawsaway INTEGER NOT NULL DEFAULT 0,
series_bts INTEGER DEFAULT 000.0,
series_btsHome INTEGER DEFAULT 000.0,
series_btsAway INTEGER DEFAULT 000.0,
series_over25 INTEGER DEFAULT 000.0,
series_over25Home INTEGER DEFAULT 000.0,
series_over25Away INTEGER DEFAULT 000.0,
series_under25 INTEGER DEFAULT 000.0,
series_under25Home INTEGER DEFAULT 000.0,
series_under25Away INTEGER DEFAULT 000.0)''')
self.relations_base.execute('''CREATE TABLE scaled
(id INTEGER PRIMARY KEY,
team TEXT,
matches FLOAT DEFAULT 000.0,
points FLOAT DEFAULT 000.0,
pointsHome FLOAT DEFAULT 000.0,
pointsAway FLOAT DEFAULT 000.0,
pointsBB FLOAT DEFAULT 0.000,
pointsBBHome FLOAT DEFAULT 000.0,
pointsBBAway FLOAT DEFAULT 000.0,
form FLOAT DEFAULT 000.0,
formHome FLOAT DEFAULT 000.0,
formAway FLOAT DEFAULT 000.0,
formBB FLOAT DEFAULT 000.0,
formBBHome FLOAT DEFAULT 000.0,
formBBAway FLOAT DEFAULT 000.0,
points_b FLOAT DEFAULT 000.0,
pointsHome_b FLOAT DEFAULT 000.0,
pointsAway_b FLOAT DEFAULT 000.0,
pointsBB_b FLOAT DEFAULT 0.000,
pointsBBHome_b FLOAT DEFAULT 000.0,
pointsBBAway_b FLOAT DEFAULT 000.0,
form_b FLOAT DEFAULT 000.0,
formHome_b FLOAT DEFAULT 000.0,
formAway_b FLOAT DEFAULT 000.0,
formBB_b FLOAT DEFAULT 000.0,
formBBHome_b FLOAT DEFAULT 000.0,
formBBAway_b FLOAT DEFAULT 000.0,
winhome FLOAT DEFAULT 000.0,
drawhome FLOAT DEFAULT 000.0,
losehome FLOAT DEFAULT 000.0,
winaway FLOAT DEFAULT 000.0,
drawaway FLOAT DEFAULT 000.0,
loseaway FLOAT DEFAULT 000.0,
winhome_b FLOAT DEFAULT 000.0,
drawhome_b FLOAT DEFAULT 000.0,
losehome_b FLOAT DEFAULT 000.0,
winaway_b FLOAT DEFAULT 000.0,
drawaway_b FLOAT DEFAULT 000.0,
loseaway_b FLOAT DEFAULT 000.0,
goalsscored FLOAT NOT NULL DEFAULT 0,
goalslost FLOAT NOT NULL DEFAULT 0,
goalsscoredhome FLOAT NOT NULL DEFAULT 0,
goalslosthome FLOAT NOT NULL DEFAULT 0,
goalsscoredaway FLOAT NOT NULL DEFAULT 0,
goalslostaway FLOAT NOT NULL DEFAULT 0,
goalsscored_b FLOAT NOT NULL DEFAULT 0,
goalslost_b FLOAT NOT NULL DEFAULT 0,
goalsscoredhome_b FLOAT NOT NULL DEFAULT 0,
goalslosthome_b FLOAT NOT NULL DEFAULT 0,
goalsscoredaway_b FLOAT NOT NULL DEFAULT 0,
goalslostaway_b FLOAT NOT NULL DEFAULT 0,
mowins FLOAT DEFAULT 0.0,
moloses FLOAT DEFAULT 0.0,
mowinsHome FLOAT DEFAULT 0.0,
molosesHome FLOAT DEFAULT 0.0,
mowinsAway FLOAT DEFAULT 0.0,
molosesAway FLOAT DEFAULT 0.0,
f1 FLOAT DEFAULT 000.0,
f2 FLOAT DEFAULT 000.0,
f3 FLOAT DEFAULT 000.0,
f4 FLOAT DEFAULT 000.0,
f1Home FLOAT DEFAULT 000.0,
f2Home FLOAT DEFAULT 000.0,
f1Away FLOAT DEFAULT 000.0,
f2Away FLOAT DEFAULT 000.0,
f1BB FLOAT DEFAULT 000.0,
f2BB FLOAT DEFAULT 000.0,
f3BB FLOAT DEFAULT 000.0,
f4BB FLOAT DEFAULT 000.0,
f1BBHome FLOAT DEFAULT 000.0,
f2BBHome FLOAT DEFAULT 000.0,
f1BBAway FLOAT DEFAULT 000.0,
f2BBAway FLOAT DEFAULT 000.0,
bts INTEGER DEFAULT 000.0,
btsHome INTEGER DEFAULT 000.0,
btsAway INTEGER DEFAULT 000.0,
over25 INTEGER DEFAULT 000.0,
over25Home INTEGER DEFAULT 000.0,
over25Away INTEGER DEFAULT 000.0,
under25 INTEGER DEFAULT 000.0,
under25Home INTEGER DEFAULT 000.0,
under25Away INTEGER DEFAULT 000.0,
series_wins FLOAT DEFAULT 000.0,
series_draws FLOAT DEFAULT 000.0,
series_loses FLOAT DEFAULT 000.0,
series_winshome FLOAT DEFAULT 000.0,
series_drawshome FLOAT DEFAULT 000.0,
series_loseshome FLOAT DEFAULT 000.0,
series_winsaway FLOAT DEFAULT 000.0,
series_drawsaway FLOAT DEFAULT 000.0,
series_losesaway FLOAT DEFAULT 000.0,
series_noloses FLOAT DEFAULT 000.0,
series_noloseshome FLOAT DEFAULT 000.0,
series_nolosesaway FLOAT DEFAULT 000.0,
series_nowins FLOAT DEFAULT 000.0,
series_nowinshome FLOAT DEFAULT 000.0,
series_nowinsaway FLOAT DEFAULT 000.0,
series_nodraws FLOAT DEFAULT 000.0,
series_nodrawshome FLOAT DEFAULT 000.0,
series_nodrawsaway FLOAT DEFAULT 000.0,
series_bts INTEGER DEFAULT 000.0,
series_btsHome INTEGER DEFAULT 000.0,
series_btsAway INTEGER DEFAULT 000.0,
series_over25 INTEGER DEFAULT 000.0,
series_over25Home INTEGER DEFAULT 000.0,
series_over25Away INTEGER DEFAULT 000.0,
series_under25 INTEGER DEFAULT 000.0,
series_under25Home INTEGER DEFAULT 000.0,
series_under25Away INTEGER DEFAULT 000.0,
fake TEXT NOT NULL DEFAULT "-")''')
self.relations_base.execute('''CREATE TABLE odds
(id INTEGER PRIMARY KEY,
name TETX,
odd_home FLOAT DEFAULT 000.0,
odd_draw FLOAT DEFAULT 000.0,
odd_away FLOAT DEFAULT 000.0)''')
def load_csv(self, folder, name, expt_name = None, r_min = 5,
r_max = 50, mode=0,net = None):
'''mode:
0-normal
1-export
2-simulation'''
print net
# with open(os.path.join('tmp','')+'comm','w') as comm:
# # communicates with export manager
# comm.write('')
self.clear_tables()
teams = self.return_teams(folder, name)
for team in teams:
item = team[0]
self.relations_base.execute('''INSERT INTO league(team)
VALUES(?)''', [(item)])
self.relations_base.execute('''INSERT INTO series(team)
VALUES(?)''', [(item)])
self.relations_base.execute('''INSERT INTO scaled(team)
VALUES(?)''', [(item)])
self.relations_base.execute('''INSERT INTO odds(name,odd_home,odd_draw,odd_away)
VALUES("odds",0.0,0.0,0.0)''')
# Selecting all matches from database to process
results = self.relations_base.execute('''SELECT
date_txt,
home,
away,
gHomeEnd,
gAwayEnd,
odd_home,
odd_draw,
odd_away
FROM results WHERE NOT gHomeEnd='NULL'
ORDER BY date_num ASC
''')
results = results.fetchall()
# Processing selected matches
teams_num = len(teams)
self.match_group = 0
self.match_group_date = 0
index = 0
for i in results:
day, home, away, fth, fta,odd_home,odd_draw,odd_away = i[:]
rounds_m = self.relations_base.execute('''SELECT
max(matches) FROM league''')
rounds = rounds_m.fetchone()
rounds = rounds[0]
if mode == 1: #export
#with open(os.path.join('tmp','')+'comm','r') as comm:
# communicates with export manager
# comm_var = comm.readline()
# if comm_var != '':
# break
self.scale_odds(home,away,day)
if r_min <= rounds <= r_max:
index += 1
self.scale_group_check(day)
if self.match_group == 1:
self.scale_group(teams_num)
# with open(os.path.join('tmp','')+'print','w') as export_print_file:
# print '==== Scaling====', day
# export_print_file.write('Process data :'+day+' Round %d'%rounds)
line = '==== Scaling====' + day + ' Round:' + str(rounds)
QtGui.QApplication.processEvents()
self.gui.text_export.append(line)
self.export('tmp', home, away, rounds, fth, fta)
if rounds <= r_max:
self.process_csv(i)
if self.stop_action == 1:
break
if mode == 2: # simulation
if r_min <= rounds <= r_max:
self.scale_group_check(day)
if self.match_group == 1:
self.scale_group(teams_num)
self.simulation_prediction(home, away, net,mode=0)
self.simulation_filters(home, away)
### used in simulation module
self.date = day
self.home = home
self.away = away
self.fth = fth
self.fta = fta
self.batch_print() # simulation module
if rounds <= r_max:
self.process_csv(i)
if mode == 0:
self.process_csv(i)
# final scale for predicting in stats window
if mode==0:
self.scale_group(teams_num)
def scale_odds(self, home,away,date,min_value=1,max_value=15):
''' Scales data to range(-1,1)'''
odds = self.relations_base.execute('''SELECT odd_home,odd_draw,odd_away
FROM results WHERE home="%s" AND away="%s" AND date_txt="%s"'''%(home,away,date))
odd_home,odd_draw,odd_away = odds.fetchone()
scaled_1 = 2.0*(float(odd_home)-min_value)/(max_value-min_value)-1
scaled_x = 2.0*(float(odd_draw)-min_value)/(max_value-min_value)-1
scaled_2 = 2.0*(float(odd_away)-min_value)/(max_value-min_value)-1
val = [scaled_1,scaled_x,scaled_2]
for i in range(0,len(val)):
if val[i] > 1:
val[i]=1
elif val[i] < 1:
val[i]=-1
self.relations_base.execute('''UPDATE odds SET odd_home=?,odd_draw=?,odd_away=?''',(scaled_1,scaled_x,scaled_2))
def batch_print(self):
''' Used in simulator app'''
pass
def export(self, expt_name, home, away, rounds, fth, fta):
''' Exports data for learning'''
with open(os.path.join('tmp', '')+'export', 'a') as save:
scaled_h = self.relations_base.execute('''SELECT
matches,
points,
pointsHome,
pointsBB,
pointsBBHome,
form,
formHome,
formBB,
formBBHome,
points_b,
pointsHome_b,
pointsBB_b,
pointsBBHome_b,
form_b,
formHome_b,
formBB_b,
formBBHome_b,
winhome,
drawhome,
losehome,
winhome_b,
drawhome_b,
losehome_b,
goalsscored,
goalslost,
goalsscoredhome,
goalslosthome,
goalsscored_b,
goalslost_b,
goalsscoredhome_b,
goalslosthome_b,
mowins,
moloses,
mowinsHome,
molosesHome,
f1,
f2,
f3,
f4,
f1Home,
f2Home,
f1BB,
f2BB,
f3BB,
f4BB,
f1BBHome,
f2BBHome,
bts,
btsHome,
over25,
over25Home,
under25,
under25Home,
series_wins,
series_draws,
series_loses,
series_winshome,
series_drawshome,
series_loseshome,
series_noloses,
series_noloseshome,
series_nowins,
series_nowinshome,
series_nodraws,
series_nodrawshome,
series_bts,
series_btsHome,
series_over25,
series_over25Home,
series_under25,
series_under25Home
FROM scaled
WHERE team="%s"'''%home)
scaled_h = scaled_h.fetchone()
scaled_h = str(scaled_h[:])
scaled_a = self.relations_base.execute('''SELECT
matches,
points,
pointsAway,
pointsBB,
pointsBBAway,
form,
formAway,
formBB,
formBBAway,
points_b,
pointsAway_b,
pointsBB_b,
pointsBBAway_b,
form_b,
formAway_b,
formBB_b,
formBBAway_b,
winaway,
drawaway,
loseaway,
winaway_b,
drawaway_b,
loseaway_b,
goalsscored,
goalslost,
goalsscoredaway,
goalslostaway,
goalsscored_b,
goalslost_b,
goalsscoredaway_b,
goalslostaway_b,
mowins,
moloses,
mowinsAway,
molosesAway,
f1,
f2,
f3,
f4,
f1Away,
f2Away,
f1BB,
f2BB,
f3BB,
f4BB,
f1BBAway,
f2BBAway,
bts,
btsAway,
over25,
over25Away,
under25,
under25Away,
series_wins,
series_draws,
series_loses,
series_winsaway,
series_drawsaway,
series_losesaway,
series_noloses,
series_nolosesaway,
series_nowins,
series_nowinsaway,
series_nodraws,
series_nodrawsaway,
series_bts,
series_btsAway,
series_over25,
series_over25Away,
series_under25,
series_under25
FROM scaled
WHERE team="%s"'''%away)
scaled_a = scaled_a.fetchone()
scaled_a = str(scaled_a[:])
line = scaled_h[1:-1]+','+scaled_a[1:-1]+self.nl
odds= self.relations_base.execute('''SELECT odd_home,odd_draw,odd_away FROM odds''')
odd_home,odd_draw,odd_away = odds.fetchone()
line_odds = str(odd_home)+'_'+str(odd_draw)+'_'+str(odd_away)+self.nl
# save.write(home+' '+away)
save.write(line)
save.write(line_odds)
save.close()
def export_fix(self, expt_name):
''' Count lines,inputs and outputs and write in title'''
print '=============fix'
path = os.path.join('export','')
with open(os.path.join('tmp','')+'export','r') as f:
tmp = reader(f)
tmp = list(tmp)
with open(path+expt_name,'w') as fix_file:
inputs = str(len(tmp[0]))
outputs = '3'
sets = str(len(tmp)/2)
title = sets+' ' +inputs+' '+outputs+self.nl
fix_file.write(title)
for i in tmp:
line = str(i)
line = line.replace('[','')
line = line.replace(']','')
line = line.replace(' ','')
line = line.replace('_',' ')
line = line.replace(',',' ')
line = line.replace("'",'')
fix_file.write(line+self.nl)
with open(path+expt_name,'r') as f:
fix_file = f.readline()
print fix_file
def scale_group_check(self, day):
''' Checks is scaling has been done for current round'''
if day != self.match_group_date:
self.match_group_date = day
self.match_group = 1
def scale_group(self, teams):
'''Scales data only once a day before first match'''
self.match_group = 0
############## scale variables
max_matches = (teams-1)*2.0 # when each team plays 2 matches
#(at home,at away)
max_points = max_matches*3
max_points_h = max_matches*1.5
max_points_a = max_matches*1.5
max_form = 12
max_form_h = 6
max_form_a = 6
max_points_bb = max_matches*4
max_points_bb_h = max_matches*2
max_points_bb_a = max_matches*2
max_form_bb = 16
max_form_bb_h = 8
max_form_bb_a = 8
max_goals = max_matches * 3
max_goals_ha = max_goals/2
############## in comparision to max to achieve in season
self.scale('matches', 'matches', 0, max_matches)
self.scale('points', 'points', 0, max_points)
self.scale('pointsHome', 'pointsHome', 0, max_points_h)
self.scale('pointsAway', 'pointsAway', 0, max_points_a)
self.scale('pointsBB', 'pointsBB', 0, max_points_bb)
self.scale('pointsBBHome', 'pointsBBHome', 0, max_points_bb_h)
self.scale('pointsBBAway', 'pointsBBAway', 0, max_points_bb_a)
self.scale('form', 'form', 0, max_form)
self.scale('f1', 'f1', 0, 3)
self.scale('f2', 'f2', 0, 3)
self.scale('f3', 'f3', 0, 3)
self.scale('f4', 'f4', 0, 3)
self.scale('formHome', 'formHome', 0, max_form_h)
self.scale('f1Home', 'f1Home', 0, 3)
self.scale('f2Home', 'f2Home', 0, 3)
self.scale('formAway', 'formAway', 0, max_form_a)
self.scale('f1Away', 'f1Away', 0, 3)
self.scale('f2Away', 'f2Away', 0, 3)
self.scale('formBB', 'formBB', 0, max_form_bb)
self.scale('f1BB', 'f1BB', 0, 4)
self.scale('f2BB', 'f2BB', 0, 4)
self.scale('f3BB', 'f3BB', 0, 4)
self.scale('f4BB', 'f4BB', 0, 4)
self.scale('formBBHome', 'formBBHome', 0, max_form_bb_h)
self.scale('f1BBHome', 'f1BBHome', 0, 4)
self.scale('f2BBHome', 'f2BBHome', 0, 4)
self.scale('formBBAway', 'formBBAway', 0, max_form_bb_a)
self.scale('f1BBAway', 'f1BBAway', 0, 4)
self.scale('f2BBAway', 'f2BBAway', 0, 4)
self.scale('goalsscored', 'goalsscored', 0, max_goals)
self.scale('goalsscoredhome', 'goalsscoredhome', 0, max_goals_ha)
self.scale('goalsscoredaway', 'goalsscoredaway', 0, max_goals_ha)
self.scale('goalslost', 'goalslost', 0, max_goals)
self.scale('goalslosthome', 'goalslosthome', 0, max_goals_ha)
self.scale('goalslostaway', 'goalslostaway', 0, max_goals_ha)
self.scale('bts', 'bts', 0, max_matches)
self.scale('btsHome','btsHome', 0, max_matches/2)
self.scale('btsAway','btsAway', 0, max_matches/2)
self.scale('over25', 'over25', 0, max_matches)
self.scale('over25Home','over25Home', 0, max_matches/2)
self.scale('over25Away','over25Away', 0, max_matches/2)
self.scale('under25', 'under25', 0, max_matches)
self.scale('under25Home','under25Home', 0, max_matches/2)
self.scale('under25Away','under25Away', 0, max_matches/2)
############## in comparision to others
self.scale('winhome', 'winhome_b')
self.scale('drawhome', 'drawhome_b')
self.scale('losehome', 'losehome_b')
self.scale('winaway', 'winaway_b')
self.scale('drawaway', 'drawaway_b')
self.scale('loseaway', 'loseaway_b')
self.scale('points', 'points_b')
self.scale('pointsHome', 'pointsHome_b')
self.scale('pointsAway', 'pointsAway_b')
self.scale('pointsBB', 'pointsBB_b')
self.scale('pointsBBHome', 'pointsBBHome_b')
self.scale('pointsBBAway', 'pointsBBAway_b')
self.scale('form', 'form_b')
self.scale('formHome', 'formHome_b')
self.scale('formAway', 'formAway_b')
self.scale('formBB', 'formBB_b')
self.scale('formBBHome', 'formBBHome_b')
self.scale('formBBAway', 'formBBAway_b')
self.scale('goalsscored', 'goalsscored_b')
self.scale('goalsscoredhome', 'goalsscoredhome_b')
self.scale('goalsscoredaway', 'goalsscoredaway_b')
self.scale('goalslost', 'goalslost_b')
self.scale('goalslosthome', 'goalslosthome_b')
self.scale('goalslostaway', 'goalslostaway_b')
############## mov,mol
self.scale('mowins', 'mowins', 0, 3)
self.scale('mowinsHome', 'mowinsHome', 0, 3)
self.scale('mowinsAway', 'mowinsAway', 0, 3)
self.scale('moloses', 'moloses', 0, 3)
self.scale('molosesHome', 'molosesHome', 0, 3)
self.scale('molosesAway', 'molosesAway', 0, 3)
################ series
self.scale('series_wins', 'series_wins',
min_value=0, max_value=10, series=1)
self.scale('series_draws', 'series_draws',
min_value=0, max_value=10, series=1)
self.scale('series_loses', 'series_loses',
min_value=0, max_value=10, series=1)
self.scale('series_winshome', 'series_winshome',
min_value=0, max_value=10, series=1)
self.scale('series_drawshome', 'series_drawshome',
min_value=0, max_value=10, series=1)
self.scale('series_loseshome', 'series_loseshome',
min_value=0, max_value=10, series=1)
self.scale('series_winsaway', 'series_winsaway',
min_value=0, max_value=10, series=1)
self.scale('series_drawsaway', 'series_drawsaway',
min_value=0, max_value=10, series=1)
self.scale('series_losesaway', 'series_losesaway'
, min_value=0, max_value=10, series=1)
self.scale('series_noloses', 'series_noloses',
min_value=0, max_value=10, series=1)
self.scale('series_noloseshome', 'series_noloseshome',
min_value=0, max_value=10, series=1)
self.scale('series_nolosesaway', 'series_nolosesaway',
min_value=0, max_value=10, series=1)
self.scale('series_nowins', 'series_nowins',
min_value=0, max_value=10, series=1)
self.scale('series_nowinshome', 'series_nowinshome',
min_value=0, max_value=10, series=1)
self.scale('series_nowinsaway', 'series_nowinsaway',
min_value=0, max_value=10, series=1)
self.scale('series_nodraws', 'series_nodraws',
min_value=0, max_value=10, series=1)
self.scale('series_nodrawshome', 'series_nodrawshome',
min_value=0, max_value=10, series=1)
self.scale('series_nodrawsaway', 'series_nodrawsaway',
min_value=0, max_value=10, series=1)
self.scale('series_bts', 'series_bts',
min_value=0, max_value=10, series=1)
self.scale('series_btsHome', 'series_btsHome',
min_value=0, max_value=10, series=1)
self.scale('series_btsAway', 'series_btsAway',
min_value=0, max_value=10, series=1)
self.scale('series_over25', 'series_over25',
min_value=0, max_value=10, series=1)
self.scale('series_over25Home', 'series_over25Home',
min_value=0, max_value=10, series=1)
self.scale('series_over25Away', 'series_over25Away',
min_value=0, max_value=10, series=1)
self.scale('series_under25', 'series_under25',
min_value=0, max_value=10, series=1)
self.scale('series_under25Home', 'series_under25Home',
min_value=0, max_value=10, series=1)
self.scale('series_under25Away', 'series_under25Away',
min_value=0, max_value=10, series=1)
def scale(self, record_in, record_out, min_value=None, max_value=None,
series=0):
''' Scales data to range(-1,1), Need some tweaks to speed up'''
if max_value == None:
if series == 0:
max_value = self.relations_base.execute('''SELECT max(%s)
FROM league''' %record_in)
else:
max_value = self.relations_base.execute('''SELECT max(%s)
FROM series''' %record_in)
max_value, = max_value.fetchone()
if min_value == None:
if series == 0:
min_value = self.relations_base.execute('''SELECT min(%s)
FROM league''' %record_in)
else:
min_value = self.relations_base.execute('''SELECT min(%s)
FROM series''' %record_in)
min_value, = min_value.fetchone()
if series == 0:
teams = self.relations_base.execute('''SELECT %s,team
FROM league''' %record_in)
else:
teams = self.relations_base.execute('''SELECT %s,team
FROM series''' %record_in)
teams = tuple(teams)
try:
for i in teams:
scaled = 2.0*(i[0]-min_value)/(max_value-min_value)-1
if scaled < -1:
scaled = -1
elif scaled > 1:
scaled = 1
self.relations_base.execute('''UPDATE scaled
SET %s=? WHERE team=? ''' %record_out, (scaled, i[1]))
except:
print 'Scale: error'
def simulation_filters(self, home, away):
''' Loads into variables actual team stats to compare with filters'''
# filters variables
t1_stats = self.relations_base.execute('''SELECT points,pointsHome,
form,formHome FROM league
WHERE team="%s"'''%home)
t1 = tuple(t1_stats)
for i in t1:
self.t1_points = i[0]
self.t1_points_h = i[1]
self.t1_form = i[2]
self.t1_form_h = i[3]
t1_series = self.relations_base.execute('''SELECT series_wins,
series_winshome,series_draws,series_drawshome,series_loses,
series_loseshome,series_nowins,series_nowinshome,
series_nodraws,series_nodrawshome,series_noloses,
series_noloseshome,series_bts,series_btsHome,series_over25,
series_over25Home,series_under25,series_under25Home
FROM series
WHERE team="%s"'''%home)
t1 = tuple(t1_series)
for i in t1:
self.t1_wins = i[0]
self.t1_winshome = i[1]
self.t1_draws = i[2]
self.t1_drawshome = i[3]
self.t1_loses = i[4]
self.t1_loseshome = i[5]
self.t1_nowins = i[6]
self.t1_nowinshome = i[7]
self.t1_nodraws = i[8]
self.t1_nodrawshome = i[9]
self.t1_noloses = i[10]
self.t1_noloseshome = i[11]
self.t1_bts = i[12]
self.t1_btshome = i[13]
self.t1_over = i[14]
self.t1_overhome = i[15]
self.t1_under = i[16]
self.t1_underhome = i[17]
t2_stats = self.relations_base.execute('''SELECT points,pointsHome,
form,formHome FROM league
WHERE team="%s"'''%away)
t2 = tuple(t2_stats)
for i in t2:
self.t2_points = i[0]
self.t2_points_a = i[1]
self.t2_form = i[2]
self.t2_form_a = i[3]
t2_series = self.relations_base.execute('''SELECT series_wins,
series_winsaway,series_draws,series_drawsaway,
series_loses,series_losesaway,series_nowins,
series_nowinsaway,series_nodraws,series_nodrawsaway,
series_noloses,series_nolosesaway,series_bts,
series_btsAway,series_over25,series_over25Away,
series_under25,series_under25Away
FROM series
WHERE team="%s"'''%away)
t2 = tuple(t2_series)
for i in t2:
self.t2_wins = i[0]
self.t2_winsaway = i[1]
self.t2_draws = i[2]
self.t2_drawsaway = i[3]
self.t2_loses = i[4]
self.t2_losesaway = i[5]
self.t2_nowins = i[6]
self.t2_nowinsaway = i[7]
self.t2_nodraws = i[8]
self.t2_nodrawsaway = i[9]
self.t2_noloses = i[10]
self.t2_nolosesaway = i[11]
self.t2_bts = i[12]
self.t2_btsaway = i[13]
self.t2_over = i[14]
self.t2_overaway = i[15]
self.t2_under = i[16]
self.t2_underaway = i[17]
####
# Odds
####
self.odds = self.simulation_prediction(home,away,'default',mode=1)
self.odd_1 = round(self.odds_rescale(self.odds[0],self.odds_level),3)
self.odd_x = round(self.odds_rescale(self.odds[1],self.odds_level),3)
self.odd_2 = round(self.odds_rescale(self.odds[2],self.odds_level),3)
self.odd_1x = round(1/((1/self.odd_1) + (1/self.odd_x)),3)
self.odd_x2 = round(1/((1/self.odd_x) + (1/self.odd_2)),3)
if self.odd_1 < 1:
self.odd_1 = 1.0
if self.odd_2 < 1:
self.odd_2 = 1.0
if self.odd_x < 1:
self.odd_x = 1.0
if self.odd_x2 < 1:
self.odd_x2 = 1.0
if self.odd_1x < 1:
self.odd_1x = 1.0
def process_csv(self, results):
'''Calculates points,form,series etc.'''
date, team_home, team_away, goals_home, goals_away,odh,odd,oda = results
if goals_home > goals_away:
winner = 1 #home team won
if goals_home < goals_away:
winner = 2 #away team won
if goals_home == goals_away:
winner = 0 # draw
goal_diff = abs(goals_home - goals_away) # for margins of wins/loses
#######
#points BBrating variables
#######
max_ph = self.relations_base.execute('''SELECT max(pointsBBHome)
FROM league''')
max_ph, = max_ph.fetchone()
max_pa = self.relations_base.execute('''SELECT max(pointsBBAway)
FROM league''')
max_pa, = max_pa.fetchone()
max_f = self.relations_base.execute('''SELECT max(formBB)
FROM league''')
max_f, = max_f.fetchone()
team_home_p = self.relations_base.execute('''SELECT pointsBBHome
FROM league
WHERE team="%s"'''%team_home)
team_home_p = team_home_p.fetchone()
team_home_p = team_home_p[0]
team_away_p = self.relations_base.execute('''SELECT pointsBBAway
FROM league
WHERE team="%s"'''%team_away)
team_away_p = team_away_p.fetchone()
team_away_p = team_away_p[0]
team_home_f = self.relations_base.execute('''SELECT formBB
FROM league
WHERE team="%s"'''%team_home)
team_home_f = team_home_f.fetchone()
team_home_f = team_home_f[0]
team_away_f = self.relations_base.execute('''SELECT formBB
FROM league
WHERE team="%s"'''%team_away)
team_away_f = team_away_f.fetchone()
team_away_f = team_away_f[0]
if winner == 1:
if max_pa > 0:
bb_rating_h = 3 + ((team_away_p/float(max_pa))+ (team_away_f/float(max_f)))/2
else:
bb_rating_h = 3
if max_ph > 0:
bb_rating_a = 0 + ((team_home_p/float(max_ph))+ (team_home_f/float(max_f)))/2
else:
bb_rating_a = 0
points_h = 3
points_a = 0
form_h = 3
form_a = 0
#######
#margin of wins,margin of loses variables
#######
self.relations_base.execute('''UPDATE league SET
diffgwins=diffgwins+?,
diffgwinsHome=diffgwinsHome+?
WHERE team=?''',(goal_diff, goal_diff, team_home))
self.relations_base.execute('''UPDATE league SET
diffgloses=diffgloses+?,
diffglosesAway=diffglosesAway+?
WHERE team=?''',(goal_diff, goal_diff, team_away))
#######
# wine/lose/draw
#######
self.relations_base.execute('''UPDATE league SET
winhome=winhome+1
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
loseaway=loseaway+1
WHERE team=? ''', [(team_away)])
#######
# series
#######
self.relations_base.execute('''UPDATE series SET
series_wins=series_wins+1,
series_winshome=series_winshome+1,
series_draws=0,
series_drawshome=0,
series_loses=0,
series_loseshome=0,
series_noloses=series_noloses+1,
series_noloseshome=series_noloseshome+1,
series_nowins=0,
series_nowinshome=0,
series_nodraws=series_nodraws+1,
series_nodrawshome=series_nodrawshome+1
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_loses=series_loses+1,
series_losesaway=series_losesaway+1,
series_draws=0,
series_drawsaway=0,
series_wins=0,
series_winsaway=0,
series_noloses=0,
series_nolosesaway=0,
series_nowins=series_nowins+1,
series_nowinsaway=series_nowinsaway+1,
series_nodraws=series_nodraws+1,
series_nodrawsaway=series_nodrawsaway+1
WHERE team=? ''', [(team_away)])
if winner == 2:
if max_pa > 0:
bb_rating_h = 0 + ((team_away_p/float(max_pa))+ (team_away_f/float(max_f)))/2
else:
bb_rating_h = 0
if max_ph > 0:
bb_rating_a = 3 + ((team_home_p/float(max_ph))+ (team_home_f/float(max_f)))/2
else:
bb_rating_a = 3
points_h = 0
points_a = 3
form_h = 0
form_a = 3
#######
#margin of wins,margin of loses variables
#######
self.relations_base.execute('''UPDATE league SET
diffgloses=diffgloses+?,
diffglosesHome=diffglosesHome+?
WHERE team=?''',(goal_diff, goal_diff, team_home))
self.relations_base.execute('''UPDATE league SET
diffgwins=diffgwins+?,
diffgwinsAway=diffgwinsAway+?
WHERE team=?''',(goal_diff, goal_diff, team_away))
#######
# wine/lose/draw
#######
self.relations_base.execute('''UPDATE league SET
losehome=losehome+1
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
winaway=winaway+1
WHERE team=? ''', [(team_away)])
#######
# series
#######
self.relations_base.execute('''UPDATE series SET
series_wins=0,
series_winshome=0,
series_draws=0,
series_drawshome=0,
series_loses=series_loses+1,
series_loseshome=series_loseshome+1,
series_noloses=0,
series_noloseshome=0,
series_nowins=series_nowins+1,
series_nowinshome=series_nowinshome+1,
series_nodraws=series_nodraws+1,
series_nodrawshome=series_nodrawshome+1
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_loses=0,
series_losesaway=0,
series_draws=0,
series_drawsaway=0,
series_wins=series_wins+1,
series_winsaway=series_winsaway+1,
series_noloses=series_noloses+1,
series_nolosesaway=series_nolosesaway+1,
series_nowins=0,
series_nowinsaway=0,
series_nodraws=series_nodraws+1,
series_nodrawsaway=series_nodrawsaway+1
WHERE team=? ''', [(team_away)])
if winner == 0:
if max_pa > 0:
bb_rating_h = 1 + ((team_away_p/float(max_pa))+ (team_away_f/float(max_f)))/2
else:
bb_rating_h = 1
if max_ph > 0:
bb_rating_a = 1 + ((team_home_p/float(max_ph))+ (team_home_f/float(max_f)))/2
else:
bb_rating_a = 1
points_h = 1
points_a = 1
form_h = 1
form_a = 1
#######
# wine/lose/draw
#######
self.relations_base.execute('''UPDATE league SET
drawhome=drawhome+1
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
drawaway=drawaway+1
WHERE team=? ''', [(team_away)])
#######
# series
#######
self.relations_base.execute('''UPDATE series SET
series_wins=0,
series_winshome=0,
series_draws=series_draws+1,
series_drawshome=series_drawshome+1,
series_loses=0,
series_loseshome=0,
series_noloses=series_noloses+1,
series_noloseshome=series_noloseshome+1,
series_nowins=series_nowins+1,
series_nowinshome=series_nowinshome+1,
series_nodraws=0,
series_nodrawshome=0
WHERE team=? ''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_loses=0,
series_losesaway=0,
series_draws=series_draws+1,
series_drawsaway=series_drawsaway+1,
series_wins=0,
series_winsaway=0,
series_noloses=series_noloses+1,
series_nolosesaway=series_nolosesaway+1,
series_nowins=series_nowins+1,
series_nowinsaway=series_nowinsaway+1,
series_nodraws=0,
series_nodrawsaway=0
WHERE team=? ''', [(team_away)])
#######
#points Classic
#######
self.relations_base.execute('''UPDATE league SET
points=points+?,
pointsHome=pointsHome+?
WHERE team=? ''', (points_h, points_h, team_home))
self.relations_base.execute('''UPDATE league SET
points=points+?,
pointsAway=pointsAway+?
WHERE team=? ''', (points_a, points_a, team_away))
#######
#points BBrating
#######
self.relations_base.execute('''UPDATE league SET
pointsBB=pointsBB+?,
pointsBBHome=pointsBBHome+?
WHERE team=? ''', (bb_rating_h, bb_rating_h, team_home))
self.relations_base.execute('''UPDATE league SET
pointsBB=pointsBB+?,
pointsBBAway=pointsBBAway+?
WHERE team=? ''', (bb_rating_a, bb_rating_a, team_away))
#######
#form BBrating
#######
self.relations_base.execute('''UPDATE league SET
f4BB=f3BB,
f3BB=f2BB,
f2BB=f1BB,
f1BB=?,
f2BBHome=f1BBHome,
f1BBHome=?
WHERE team=? ''', (bb_rating_h, bb_rating_h, team_home))
self.relations_base.execute('''UPDATE league SET
f4BB=f3BB,
f3BB=f2BB,
f2BB=f1BB,
f1BB=?,
f2BBAway=f1BBAway,
f1BBAway=?
WHERE team=? ''', (bb_rating_a, bb_rating_a, team_away))
#######
#goals
#######
self.relations_base.execute('''UPDATE league SET
goalsscoredhome=goalsscoredhome+?,
goalslosthome=goalslosthome+?
WHERE team=?''',(goals_home, goals_away, team_home))
self.relations_base.execute('''UPDATE league SET
goalsscoredaway=goalsscoredaway+?,
goalslostaway=goalslostaway+?
WHERE team=?''',(goals_away, goals_home, team_away))
#######
#form Classic
#######
self.relations_base.execute('''UPDATE league SET
f4op=f3op,
f3op=f2op,
f2op=f1op,
f1op=?,
f4=f3,
f3=f2,
f2=f1,
f1=?,
f2opHome=f1opHome,
f1opHome=?,
f2Home=f1Home,
f1Home=?
WHERE team=? ''', (team_away, form_h, team_away, form_h, team_home))
self.relations_base.execute('''UPDATE league SET
f4op=f3op,
f3op=f2op,
f2op=f1op,
f1op=?,
f4=f3,
f3=f2,
f2=f1,
f1=?,
f2opAway=f1opAway,
f1opAway=?,
f2Away=f1Away,
f1Away=?
WHERE team=? ''', (team_home, form_a, team_home, form_a, team_away))
######
#matches/form/goals sum
######
self.relations_base.execute('''UPDATE league SET
matches=winhome+drawhome+losehome+winaway+drawaway+loseaway,
matchesHome=winhome+drawhome+losehome,
matchesAway=winaway+drawaway+loseaway,
form=f1+f2+f3+f4,
formHome=f1Home+f2Home,
formAway=f1Away+f2Away,
formBB=f1BB+f2BB+f3BB+f4BB,
formBBHome=f1BBHome+f2BBHome,
formBBAway=f1BBAway+f2BBAway,
goalsscored=goalsscoredHome+goalsscoredAway,
goalslost=goalslostHome+goalslostAway''')
######
#margin of winning,losing
######
self.relations_base.execute('''UPDATE league SET
mowins=diffgwins/matches,
moloses=diffgloses/matches,
mowinsHome=diffgwinsHome/matchesHome,
molosesHome=diffglosesHome/matchesHome,
mowinsAway=diffgwinsAway/matchesAway,
molosesAway=diffglosesAway/matchesAway''')
#####
#BTS
#####
if goals_home > 0 and goals_away > 0:
self.relations_base.execute('''UPDATE league SET
bts=bts+1,
btsHome=btsHome+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_bts=series_bts+1,
series_btsHome=series_btsHome+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
bts=bts+1,
btsAway=btsAway+1
WHERE team=?''', [(team_away)])
self.relations_base.execute('''UPDATE series SET
series_bts=series_bts+1,
series_btsAway=series_btsAway+1
WHERE team=?''', [(team_away)])
else:
self.relations_base.execute('''UPDATE series SET
series_bts=0,
series_btsHome=0
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_bts=0,
series_btsAway=0
WHERE team=?''', [(team_away)])
#####
#under/over 2.5
#####
if (goals_home+goals_away) > 2:
self.relations_base.execute('''UPDATE league SET
over25=over25+1,
over25Home=over25Home+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_over25=series_over25+1,
series_over25Home=series_over25Home+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
over25=over25+1,
over25Away=over25Away+1
WHERE team=?''', [(team_away)])
self.relations_base.execute('''UPDATE series SET
series_over25=series_over25+1,
series_over25Away=series_over25Away+1
WHERE team=?''', [(team_away)])
self.relations_base.execute('''UPDATE series SET
series_under25=0,
series_under25Home=0
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_under25=0,
series_under25Away=0
WHERE team=?''', [(team_away)])
elif (goals_home+goals_away) < 3:
self.relations_base.execute('''UPDATE league SET
under25=under25+1,
under25Home=under25Home+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE league SET
under25=under25+1,
under25Away=under25Away+1
WHERE team=?''', [(team_away)])
self.relations_base.execute('''UPDATE series SET
series_under25=series_under25+1,
series_under25Home=series_under25Home+1
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_under25=series_under25+1,
series_under25Away=series_under25Away+1
WHERE team=?''', [(team_away)])
self.relations_base.execute('''UPDATE series SET
series_over25=0,
series_over25Home=0
WHERE team=?''', [(team_home)])
self.relations_base.execute('''UPDATE series SET
series_over25=0,
series_over25Away=0
WHERE team=?''', [(team_away)])
def clear_tables(self):
'''Reamoves all data form tables for new file process '''
try:
self.relations_base.execute('''DELETE FROM league
WHERE id''')
except:
print 'League table deletion error'
try:
self.relations_base.execute('''DELETE FROM results WHERE id''')
except:
print 'League results deletion error'
try:
self.relations_base.execute('''DELETE FROM series WHERE id''')
except:
print 'Series table deletion error'
try:
self.relations_base.execute('''DELETE FROM scaled WHERE id''')
except:
print 'Scaled table deletion error'
try:
self.relations_base.execute('''DELETE FROM odds WHERE id''')
except:
print 'Odds table deletion error'
def simulation_prediction(self, home, away, net, mode=0):
''' Predict outcome form match using given net
mode 0 predicting outcomes (1,x,2)
mode 1 predictiong odds'''
path_net = os.path.join('net','')
path_odds = os.path.join('odds','')
input_list = []
t1 = self.relations_base.execute('''SELECT
matches,
points,
pointsHome,
pointsBB,
pointsBBHome,
form,
formHome,
formBB,
formBBHome,
points_b,
pointsHome_b,
pointsBB_b,
pointsBBHome_b,
form_b,
formHome_b,
formBB_b,
formBBHome_b,
winhome,
drawhome,
losehome,
winhome_b,
drawhome_b,
losehome_b,
goalsscored,
goalslost,
goalsscoredhome,
goalslosthome,
goalsscored_b,
goalslost_b,
goalsscoredhome_b,
goalslosthome_b,
mowins,
moloses,
mowinsHome,
molosesHome,
f1,
f2,
f3,
f4,
f1Home,
f2Home,
f1BB,
f2BB,
f3BB,
f4BB,
f1BBHome,
f2BBHome,
bts,
btsHome,
over25,
over25Home,
under25,
under25Home,
series_wins,
series_draws,
series_loses,
series_winshome,
series_drawshome,
series_loseshome,
series_noloses,
series_noloseshome,
series_nowins,
series_nowinshome,
series_nodraws,
series_nodrawshome,
series_bts,
series_btsHome,
series_over25,
series_over25Home,
series_under25,
series_under25Home
FROM scaled
WHERE team="%s"'''%home)
t1 = tuple(t1)
for i in t1[0]:
input_list.append(i)
t2 = self.relations_base.execute('''SELECT
matches,
points,
pointsAway,
pointsBB,
pointsBBAway,
form,
formAway,
formBB,
formBBAway,
points_b,
pointsAway_b,
pointsBB_b,
pointsBBAway_b,
form_b,
formAway_b,
formBB_b,
formBBAway_b,
winaway,
drawaway,
loseaway,
winaway_b,
drawaway_b,
loseaway_b,
goalsscored,
goalslost,
goalsscoredaway,
goalslostaway,
goalsscored_b,
goalslost_b,
goalsscoredaway_b,
goalslostaway_b,
mowins,
moloses,
mowinsAway,
molosesAway,
f1,
f2,
f3,
f4,
f1Away,
f2Away,
f1BB,
f2BB,
f3BB,
f4BB,
f1BBAway,
f2BBAway,
bts,
btsAway,
over25,
over25Away,
under25,
under25Away,
series_wins,
series_draws,
series_loses,
series_winsaway,
series_drawsaway,
series_losesaway,
series_noloses,
series_nolosesaway,
series_nowins,
series_nowinsaway,
series_nodraws,
series_nodrawsaway,
series_bts,
series_btsAway,
series_over25,
series_over25Away,
series_under25,
series_under25Away
FROM scaled
WHERE team="%s"'''%away)
t2 = tuple(t2)
for i in t2[0]:
input_list.append(i)
locale.setlocale(locale.LC_ALL, "C")
ann = libfann.neural_net()
ann.create_from_file(path_odds+'odds.net')
odds = ann.run(input_list[:])
for i in odds:
input_list.append(i)
if mode == 0:
ann = libfann.neural_net()
ann.create_from_file(path_net+str(net))
prediction = ann.run(input_list[:])
self.prediction = prediction[0]
if mode == 0: #prediction
return self.prediction
elif mode == 1: #odds
return odds
def return_teams(self, folder, name):
''' Adds all matches to sql and return list of teams'''
self.clear_tables()
file_open = reader(open(folder+name))
for line in file_open:
date = line[0]
date = date[0:7]+date[8:]
date_num = float(date)
fth = line[3]
fta = line[4]
if fth == '' or fta =='':
fth = 'NULL'
fta = 'NULL'
home_txt = line[1].decode('utf8', 'replace')
away_txt = line[2].decode('utf8', 'replace')
home = unicodedata.normalize('NFD', home_txt).encode('ascii', 'ignore')
away = unicodedata.normalize('NFD', away_txt).encode('ascii', 'ignore')
self.relations_base.execute('''INSERT INTO results(
date_txt,
date_num,
home,
away,
gHomeEnd,
gAwayEnd,
odd_home,
odd_draw,
odd_away) VALUES(?,?,?,?,?,?,?,?,?)''',
(
line[0],
date_num,
home,
away,
fth,
fta,
line[5],
line[6],
line[7]))
#always sort results according to date
self.relations_base.execute('''CREATE TEMPORARY TABLE results_copy
AS SELECT * FROM results ORDER BY date_num ASC''')
self.relations_base.execute('''DELETE FROM results''')
self.relations_base.execute('''INSERT INTO results(
date_txt,
date_num,
home,
away,
gHomeEnd,
gAwayEnd,
odd_home,
odd_draw,
odd_away)
SELECT
date_txt,
date_num,
home,
away,
gHomeEnd,
gAwayEnd,
odd_home,
odd_draw,
odd_away
FROM results_copy''')
self.relations_base.execute('''DROP TABLE results_copy''')
# remove duplicates:
self.relations_base.execute('''delete from results
where exists (select * from results t2
where results.date_num = t2.date_num
and results.home = t2.home
and results.away = t2.away
and results.id > t2.id);''')
teams = self.relations_base.execute('''SELECT DISTINCT home
FROM results''')
teams = teams.fetchall()
self.relations_base.execute('''SELECT DISTINCT away FROM results''')
for i in self.relations_base:
if i not in teams:
teams.append(i)
teams.sort()
return teams
def main():
''' Main function'''
print 'print a'
x = Database()
#x.load_csv(os.path.join('leagues', 'current', ''), 'default', expt_name='jhjh',mode = 1)
if __name__ == '__main__':
main()
| |
import collections
import datetime
import hashlib
import http.cookiejar
import logging
import re
import time
import urllib.parse
import xml.etree.ElementTree as ET
from enum import Enum
import requests
import requests.exceptions
import requests.packages.urllib3
import requests_toolbelt
from requests.auth import HTTPDigestAuth
from pytos.common.logging.definitions import REQUESTS_LOGGER_NAME
from pytos.common.exceptions import REST_HTTP_Exception, REST_Bad_Gateway, REST_Service_Unavailable_Error, \
REST_Unauthorized_Error
from pytos.common.functions.xml import get_xml_text_value
requests.packages.urllib3.disable_warnings()
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
# Uncomment the two lines below to get more debugging information from httplib
# import http.client
# http.client.HTTPConnection.debuglevel = 1
logger = logging.getLogger(REQUESTS_LOGGER_NAME)
class RESTAuthMethods(Enum):
Digest = "digest"
Basic = "basic"
class REST_Request(object):
"""
This class is the base class from which all other Request objects inherit.
:cvar TIMEOUT: The default timeout for requests.
:cvar MAX_RETRIES: The default amount of times to retry requests that result in connection errors.
:cvar RETRY_INTERVAL: The default interval between request retries, in seconds.
:cvar RETRY_BACKOFF: The default exponential backoff for retries.
"""
RETRY_BACKOFF = 2
TIMEOUT = 300
MAX_RETRIES = 5
RETRY_INTERVAL = 5
MAX_URI_LENGTH = 6500
def __init__(self, hostname, uri, protocol="https", **kwargs):
"""
Constructor for REST_Request
:param uri: The URI the request will access.
:type uri: str
:param protocol: The protocol the request will use.
:type protocol: str
:keyword timeout: (Optional) Set the timeout for the request (Default is 300 seconds).
:type timeout: float
:keyword login_data: The username and password that will be used for HTTP basic authentication for the request
({"username" : username,"password" : password})
:type login_data: dict
:keyword verify_ssl: If set to False, SSL verification for requests is disabled, otherwise it is enabled.
:type verify_ssl: bool
:keyword cookies: If set, the contents will be appended to the cookies sent with the request.
:type cookies: str/dict/cookielib.CookieJar
:keyword headers: Headers to be sent with the request.
:type headers: dict
:keyword max_retries: The amount of times to retry the request if a connection error occurs.
:type max_retries: int
:keyword retry_interval: The interval between retries in seconds.
:type retry_interval: int
:keyword retry_backoff: The exponential backoff for retries.
:type retry_backoff: int
:keyword expected_status_codes: A single integer or a list of integers representing HTTP status codes.
:type expected_status_codes: int or list of ints
:raise REST_HTTP_Exception If expected_status_codes is specified, if the response does not contain at least one
of the status codes, a REST_HTTP_Exception is raised.
:raise requests.exceptions.Timeout: If a timeout error occurs while trying to perform the request.
:raise requests.exceptions.ConnectionError: If an error occurs while trying to connect the specified host.
"""
self.response, self.request = None, None
self.expected_status_codes = None
self.body = None
self.auth_method = kwargs.get("auth_method", RESTAuthMethods.Basic)
if protocol not in ["http", "https"]:
raise ValueError("Protocol must be either http or https!")
else:
self.protocol = protocol
verify_ssl = kwargs.get("verify_ssl")
if verify_ssl is not None:
logger.debug("Setting verify_ssl to '%s'", verify_ssl)
self.verify_ssl = verify_ssl
else:
logger.debug("verify_ssl not set, setting to True by default.")
self.verify_ssl = True
session = kwargs.get("session")
if session is None:
self.session = requests.Session()
else:
self.session = session
proxies = kwargs.get("proxies")
if proxies is not None:
self.session.proxies = proxies
self.hostname = hostname
uri_length = len(uri)
if uri_length <= REST_Request.MAX_URI_LENGTH:
self.uri = uri
else:
raise ValueError("Maximum URI length ({}) exceeded , current URI length is {}, URI is '{}'".format(
REST_Request.MAX_URI_LENGTH, uri_length, uri))
login_data = kwargs.get("login_data")
if login_data is not None:
if all(login_data.values()):
if self.auth_method == RESTAuthMethods.Digest:
self.auth_tuple = HTTPDigestAuth(login_data["username"], login_data["password"])
else:
password_hash = hashlib.sha256()
password_hash.update(login_data["password"].encode("ascii"))
password_hash = password_hash.hexdigest()
logger.debug("Setting login_data to username '%s', SHA256 hashed password '%s'.",
login_data["username"], password_hash)
self.auth_tuple = (login_data["username"], login_data["password"])
else:
raise ValueError("Both username and password must be set.")
else:
self.auth_tuple = None
timeout = kwargs.get("timeout")
if timeout is not None:
logger.debug("Setting request timout to '%s'", timeout)
self.timeout = timeout
else:
self.timeout = REST_Request.TIMEOUT
max_retries = kwargs.get("max_retries")
if max_retries is not None:
logger.debug("Setting maximum retry count to '%s'", max_retries)
self.max_retries = max_retries
else:
self.max_retries = REST_Request.MAX_RETRIES
retry_backoff = kwargs.get("retry_backoff")
if retry_backoff is not None:
logger.debug("Setting retry backoff multiplier to '%s'", retry_backoff)
self.retry_backoff = retry_backoff
else:
self.retry_backoff = REST_Request.RETRY_BACKOFF
retry_interval = kwargs.get("retry_interval")
if retry_interval is not None:
logger.debug("Setting retry interval to '%s'", retry_interval)
self.retry_interval = retry_interval
else:
self.retry_interval = REST_Request.RETRY_INTERVAL
expected_status_codes = kwargs.get("expected_status_codes")
if expected_status_codes is not None:
logger.debug("Setting expected_status_codes to '%s'", expected_status_codes)
self.expected_status_codes = expected_status_codes
cookies = kwargs.get("cookies")
if cookies is not None:
logger.debug("Setting cookies to '%s'", cookies)
if isinstance(cookies, http.cookiejar.CookieJar):
self.cookie_jar = cookies
else:
logger.warning("Unknown cookie type '%s'", type(cookies))
self.cookie_jar = http.cookiejar.CookieJar
else:
self.cookie_jar = http.cookiejar.CookieJar
headers = kwargs.get("headers")
self.headers = {}
if headers is not None:
self.headers.update(headers)
logger.debug("Setting headers to '%s'", headers)
self.url = "{protocol}://{hostname}{uri}".format(protocol=self.protocol, hostname=self.hostname, uri=self.uri)
def get_created_item_id(self):
try:
item_id = self.response.headers["location"].split("/")[-1]
if "?" in item_id:
logger.debug("ID contains a reference to a parameter.")
item_id = re.sub(r"\?.*", "", item_id)
if "-" in item_id:
logger.debug("ID refers to a task.")
return item_id
elif item_id[0].isalpha():
logger.debug("ID refers to a name.")
return item_id
elif "," in item_id:
return [int(item) for item in item_id.split(",")]
return int(item_id)
except (AttributeError, KeyError):
return None
def _ensure_response_status(self):
"""Check if the self.response object contains at least one of HTTP status code in self.expected_status_codes.
:return: Returns True if the specified status code was found in the self.response member object.
:rtype: bool
@raise requests.HTTPError: If the specified status code was not found in the self.response member object.
"""
status_code_ok = True
if not self.expected_status_codes:
return True
try:
self.response.raise_for_status()
except requests.exceptions.HTTPError as local_request_exception:
request_exception = local_request_exception
logger.error("Got the following error while performing request: '%s'.", request_exception)
status_code_ok = False
if status_code_ok:
if isinstance(self.expected_status_codes, collections.Iterable):
if self.response.status_code not in self.expected_status_codes:
status_code_ok = False
elif isinstance(self.expected_status_codes, int):
if self.expected_status_codes != self.response.status_code:
status_code_ok = False
else:
raise ValueError("self.expected_status_codes must either be an int or list of ints.")
if not status_code_ok:
error_message = ""
try:
error_response_xml = ET.fromstring(self.response.content)
api_error_message = get_xml_text_value(error_response_xml, "message")
api_error_code = error_response_xml.find("code").text
if api_error_message is not None:
error_message = "Message from API is '{}'.\n".format(api_error_message)
logger.error(error_message)
error_message += "Error from API is '{}'.".format(api_error_code)
logger.error(error_message)
except (ParseError, AttributeError):
error_message = "Could not parse response from API."
logger.error(error_message)
logger.error("Status code for request is '%s'.", self.response.status_code)
http_exception = REST_HTTP_Exception.get_exception(self.response, self.expected_status_codes)
raise http_exception
else:
logger.info("Status code for request is '%s'.", self.response.status_code)
return True
def _perform_request(self):
start_time = datetime.datetime.now()
exception_copy = None
unauthorized_error = False
for retry_count in range(self.max_retries + 1):
try:
self.response = self.session.send(self.request, verify=self.verify_ssl, timeout=self.timeout)
except requests.exceptions.SSLError as request_exception:
exception_copy = request_exception
logger.error("Connection to '%s://%s%s' failed ('%s').", self.protocol, self.hostname, self.uri,
request_exception.args[0])
except requests.exceptions.ConnectionError as request_exception:
exception_copy = request_exception
message = "Connection to {}://{}{} failed."
try:
message = message.format(self.protocol, self.hostname, self.uri, request_exception.args[0].reason)
except AttributeError:
message = message.format(self.protocol, self.hostname, self.uri, request_exception.args[0])
logger.error(message)
except requests.exceptions.Timeout as request_exception:
exception_copy = request_exception
logger.error("Connection to '%s://%s%s' timed out ('%s' seconds).", self.protocol, self.hostname,
self.uri,
self.timeout)
else:
logger.debug("Sent headers: '%s.", self.headers)
if self.body is not None:
logger.debug("Sent body: '%s'.", self.body)
try:
self._ensure_response_status()
request_duration = datetime.datetime.now() - start_time
logger.debug("Request took '%s' seconds.", request_duration)
logger.info("Received status: '%s'.", self.response.status_code)
logger.debug("Received headers: '%s'.", self.response.headers)
if self.response.content:
logger.debug("Received response body: '%s'", self.response.content)
break
except (REST_Bad_Gateway, REST_Service_Unavailable_Error) as request_exception:
exception_copy = request_exception
self.log_error_details(request_exception)
except REST_Unauthorized_Error as request_exception:
if unauthorized_error:
exception_copy = request_exception
self.log_error_details(request_exception)
break
else:
unauthorized_error = True
REST_Request.cookies = None
except REST_HTTP_Exception as request_exception:
exception_copy = request_exception
self.log_error_details(request_exception)
break
logger.debug("Sleeping for '%s' seconds between retries.", self.retry_interval)
time.sleep(self.retry_interval)
logger.info("Retrying request to '%s', Retry '%s' out of '%s'.", self.url, retry_count + 1,
self.max_retries)
if self.retry_backoff != 1:
self.retry_interval *= self.retry_backoff
logger.debug("Multiplied retry interval with backoff ('%s'), retry_interval is now '%s'.",
self.retry_backoff, self.retry_interval)
if exception_copy is not None:
raise exception_copy
def log_error_details(self, request_exception):
logger.error("Request to '%s://%s%s' resulted in an error from the server: '%s'.",
self.protocol,
self.hostname,
self.uri, request_exception)
logger.error("Sent headers: '%s.", self.headers)
if self.body is not None:
logger.error("Sent body: '%s'.", self.body)
logger.error("Received headers: '%s'.", self.response.headers)
if self.response.content:
logger.error("Received response body: '%s'", self.response.content)
def _encode_body_params(self, params):
logger.debug("Params: '%s'.", params)
for index, key in enumerate(params.keys()):
if index == 0:
self.body = "{}={}".format(key, urllib.parse.quote_plus(str(params[key])))
else:
self.body += "&{}={}".format(key, urllib.parse.quote_plus(str(params[key])))
class GET_Request(REST_Request):
"""
This class wraps a requests GET request.
"""
def __init__(self, hostname, uri, protocol="https", **kwargs):
"""
Constructor
"""
super().__init__(hostname, uri, protocol, **kwargs)
logger.info("Sending GET request to '%s'", self.url)
request_obj = requests.Request("GET", self.url, auth=self.auth_tuple,
params=kwargs.get("params"), headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class POST_Request(REST_Request):
"""
This class wraps a requests POST request.
"""
def __init__(self, hostname, uri, body=None, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param body: Body contents to be sent with the request
:type body: str|dict
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
:keyword params: If set, these parameters that will be URL encoded and included in the request body.
:type params: dict
:keyword multi_part_form_params: A tuple of parameters that will be encoded in multipart/form encoding.
If the tuple contains 2 items, the first one will be used as the parameter name, the second
will be the parameter value.
If the tuple contains 3 items, the first will be used as the parameter name, the second will
be a open file handle, the third will be the name for the file to be sent.
:type multi_part_form_params: tuple
"""
super().__init__(hostname, uri, protocol, **kwargs)
# Handle parameters in dict form
params = kwargs.get("params")
# Handle files
files = kwargs.get("files")
# Handle multi part params
multi_part_form_params = kwargs.get("multi_part_form_params")
if multi_part_form_params is not None:
logger.debug("Got the following multi-part form params '%s'", multi_part_form_params)
data_types = (params, multi_part_form_params, body)
true_count = sum([1 for data_type in data_types if data_type])
if true_count > 1:
raise ValueError("Only one data type to be sent can be used: body, params or multi_part_form_params.")
if multi_part_form_params is not None:
multi_part_form = requests_toolbelt.MultipartEncoder(fields=multi_part_form_params)
self.headers["Content-Type"] = multi_part_form.content_type
self.body = multi_part_form.to_string()
multi_part_form_length = str(multi_part_form.len) if hasattr(multi_part_form, 'len') else len(multi_part_form)
self.headers["Content-Size"] = multi_part_form_length
self.headers["Accept"] = "*/*"
else:
if params is not None:
self._encode_body_params(params)
else:
self.body = body
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending POST request to '%s'", self.url)
request_obj = requests.Request("POST", self.url, data=self.body, auth=self.auth_tuple, headers=self.headers,
files=files, cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class PUT_Request(REST_Request):
"""
This class wraps a requests PUT request.
"""
def __init__(self, hostname, uri, body=None, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param body: Body contents to be sent with the request
:type body: str|dict
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
:keyword params: If set, these parameters that will be URL encoded and included in the request body.
:type params: dict
"""
super().__init__(hostname, uri, protocol, **kwargs)
# Handle parameters in dict form
params = kwargs.get("params")
data_types = (params, body)
true_count = sum([1 for data_type in data_types if data_type])
if true_count > 1:
raise ValueError("Only one data type to be POSTed can be used: body or params.")
if params is not None:
self._encode_body_params(params)
else:
self.body = body
if self.body is not None:
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending PUT request to '%s'", self.url)
request_obj = requests.Request("PUT", self.url, data=self.body, auth=self.auth_tuple, headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class DELETE_Request(REST_Request):
"""
This class wraps a requests DELETE request.
"""
def __init__(self, hostname, uri, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
"""
super().__init__(hostname, uri, protocol, **kwargs)
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending DELETE request to '%s'", self.url)
request_obj = requests.Request("DELETE", self.url, auth=self.auth_tuple, headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
| |
"""The tests for the Monoprice Blackbird media player platform."""
from collections import defaultdict
import unittest
from unittest import mock
import pytest
import voluptuous as vol
from homeassistant.components.blackbird.const import DOMAIN, SERVICE_SETALLZONES
from homeassistant.components.blackbird.media_player import (
DATA_BLACKBIRD,
PLATFORM_SCHEMA,
setup_platform,
)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import STATE_OFF, STATE_ON
import tests.common
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockBlackbird:
"""Mock for pyblackbird object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(lambda: AttrDict(power=True, av=1))
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_zone_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].av = source_idx
def set_zone_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_all_zone_source(self, source_idx):
"""Set source for all zones."""
self.zones[3].av = source_idx
class TestBlackbirdSchema(unittest.TestCase):
"""Test Blackbird schema."""
def test_valid_serial_schema(self):
"""Test valid schema."""
valid_schema = {
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"zones": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
6: {"name": "a"},
7: {"name": "a"},
8: {"name": "a"},
},
"sources": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
6: {"name": "a"},
7: {"name": "a"},
8: {"name": "a"},
},
}
PLATFORM_SCHEMA(valid_schema)
def test_valid_socket_schema(self):
"""Test valid schema."""
valid_schema = {
"platform": "blackbird",
"host": "192.168.1.50",
"zones": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
},
"sources": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
},
}
PLATFORM_SCHEMA(valid_schema)
def test_invalid_schemas(self):
"""Test invalid schemas."""
schemas = (
{}, # Empty
None, # None
# Port and host used concurrently
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"host": "192.168.1.50",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Port or host missing
{
"platform": "blackbird",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Invalid zone number
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {11: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Invalid source number
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {9: {"name": "b"}},
},
# Zone missing name
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {}},
"sources": {1: {"name": "b"}},
},
# Source missing name
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {}},
},
)
for value in schemas:
with pytest.raises(vol.MultipleInvalid):
PLATFORM_SCHEMA(value)
class TestBlackbirdMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self):
"""Set up the test case."""
self.blackbird = MockBlackbird()
self.hass = tests.common.get_test_home_assistant()
self.hass.start()
# Note, source dictionary is unsorted!
with mock.patch(
"homeassistant.components.blackbird.media_player.get_blackbird",
new=lambda *a: self.blackbird,
):
setup_platform(
self.hass,
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"zones": {3: {"name": "Zone name"}},
"sources": {
1: {"name": "one"},
3: {"name": "three"},
2: {"name": "two"},
},
},
lambda *args, **kwargs: None,
{},
)
self.hass.block_till_done()
self.media_player = self.hass.data[DATA_BLACKBIRD]["/dev/ttyUSB0-3"]
self.media_player.hass = self.hass
self.media_player.entity_id = "media_player.zone_3"
def tearDown(self):
"""Tear down the test case."""
self.hass.stop()
def test_setup_platform(self, *args):
"""Test setting up platform."""
# One service must be registered
assert self.hass.services.has_service(DOMAIN, SERVICE_SETALLZONES)
assert len(self.hass.data[DATA_BLACKBIRD]) == 1
assert self.hass.data[DATA_BLACKBIRD]["/dev/ttyUSB0-3"].name == "Zone name"
def test_setallzones_service_call_with_entity_id(self):
"""Test set all zone source service call with entity id."""
self.media_player.update()
assert "Zone name" == self.media_player.name
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
# Call set all zones service
self.hass.services.call(
DOMAIN,
SERVICE_SETALLZONES,
{"entity_id": "media_player.zone_3", "source": "three"},
blocking=True,
)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert "three" == self.media_player.source
def test_setallzones_service_call_without_entity_id(self):
"""Test set all zone source service call without entity id."""
self.media_player.update()
assert "Zone name" == self.media_player.name
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
# Call set all zones service
self.hass.services.call(
DOMAIN, SERVICE_SETALLZONES, {"source": "three"}, blocking=True
)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert "three" == self.media_player.source
def test_update(self):
"""Test updating values from blackbird."""
assert self.media_player.state is None
assert self.media_player.source is None
self.media_player.update()
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
def test_name(self):
"""Test name property."""
assert "Zone name" == self.media_player.name
def test_state(self):
"""Test state property."""
assert self.media_player.state is None
self.media_player.update()
assert STATE_ON == self.media_player.state
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
def test_supported_features(self):
"""Test supported features property."""
assert (
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
== self.media_player.supported_features
)
def test_source(self):
"""Test source property."""
assert self.media_player.source is None
self.media_player.update()
assert "one" == self.media_player.source
def test_media_title(self):
"""Test media title property."""
assert self.media_player.media_title is None
self.media_player.update()
assert "one" == self.media_player.media_title
def test_source_list(self):
"""Test source list property."""
# Note, the list is sorted!
assert ["one", "two", "three"] == self.media_player.source_list
def test_select_source(self):
"""Test source selection methods."""
self.media_player.update()
assert "one" == self.media_player.source
self.media_player.select_source("two")
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert "two" == self.media_player.source
# Trying to set unknown source.
self.media_player.select_source("no name")
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert "two" == self.media_player.source
def test_turn_on(self):
"""Testing turning on the zone."""
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
self.media_player.turn_on()
assert self.blackbird.zones[3].power
self.media_player.update()
assert STATE_ON == self.media_player.state
def test_turn_off(self):
"""Testing turning off the zone."""
self.blackbird.zones[3].power = True
self.media_player.update()
assert STATE_ON == self.media_player.state
self.media_player.turn_off()
assert not self.blackbird.zones[3].power
self.media_player.update()
assert STATE_OFF == self.media_player.state
| |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
VDSM configuration plugin.
"""
import distutils.version
import gettext
import sys
import time
from otopi import plugin, util
from ovirt_engine_setup import config as osetupconfig
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine import vdcoption
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
VDSM configuration plugin.
"""
ENGINE_RETRIES = 60
ENGINE_DELAY = 5
VDSM_RETRIES = 600
VDSM_DELAY = 1
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
def _waitVDSMHostUp(self, engine_api, host):
self.logger.info(_(
'Waiting for VDSM host to become operational. '
'This may take several minutes...'
))
tries = self.VDSM_RETRIES
isUp = False
while not isUp and tries > 0:
tries -= 1
try:
state = engine_api.hosts.get(host).status.state
except Exception as exc:
# sadly all ovirtsdk errors inherit only from Exception
self.logger.debug(
'Error fetching host state: {error}'.format(
error=str(exc),
)
)
state = ''
if 'failed' in state:
self.logger.error(_(
'The VDSM host was found in a failed state. '
'Please check engine and bootstrap installation logs.'
))
tries = -1 # Error state
elif state == 'up':
isUp = True
self.logger.info(_('The VDSM Host is now operational'))
else:
self.logger.debug(
'VDSM host in {state} state'.format(
state=state,
)
)
if tries % 30 == 0:
self.logger.info(_(
'Still waiting for VDSM host to become operational...'
))
time.sleep(self.VDSM_DELAY)
if not isUp and tries == 0:
self.logger.error(_(
'Timed out while waiting for host to start. '
'Please check the logs.'
))
return isUp
def _waitEngineUp(self):
self.logger.debug('Waiting Engine API response')
tries = self.ENGINE_RETRIES
isUp = False
sdk = None
while not isUp and tries > 0:
tries -= 1
try:
# Now we are using the SDK to authenticate vs the API
# to check if the engine is up.
# Maybe in the future we can just rely on a
# not authenticated health API URL
sdk = self._ovirtsdk_api.API(
url='https://localhost:{port}/eayunos/api'.format(
port=self.environment[
oengcommcons.ConfigEnv.PUBLIC_HTTPS_PORT
],
),
username=self.environment[
oenginecons.ConfigEnv.ADMIN_USER
],
password=self.environment[
oenginecons.ConfigEnv.ADMIN_PASSWORD
],
insecure=True,
)
isUp = True
except self._ovirtsdk_errors.RequestError:
self.logger.debug(
'Cannot connect to engine',
exc_info=True,
)
time.sleep(self.ENGINE_DELAY)
if not isUp:
raise RuntimeError(_('Engine unreachable'))
return sdk
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.AIOEnv.LOCAL_DATA_CENTER,
oenginecons.AIODefaults.DEFAULT_LOCAL_DATA_CENTER
)
self.environment.setdefault(
oenginecons.AIOEnv.LOCAL_CLUSTER,
oenginecons.AIODefaults.DEFAULT_LOCAL_CLUSTER
)
self.environment.setdefault(
oenginecons.AIOEnv.LOCAL_HOST,
oenginecons.AIODefaults.DEFAULT_LOCAL_HOST
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self.environment[
oenginecons.AIOEnv.CONFIGURE
],
)
def _validation(self):
import ovirtsdk.api
import ovirtsdk.xml
import ovirtsdk.infrastructure.errors
self._ovirtsdk_api = ovirtsdk.api
self._ovirtsdk_xml = ovirtsdk.xml
self._ovirtsdk_errors = ovirtsdk.infrastructure.errors
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self.environment[
oenginecons.AIOEnv.CONFIGURE
],
)
def _misc(self):
self.environment[oengcommcons.ApacheEnv.NEED_RESTART] = True
def _getSupportedClusterLevels(self):
self.logger.debug('Attempting to load the dsaversion vdsm module')
savedPath = sys.path
vSupportedClusterLevels = []
raw_version_revision = ''
try:
sys.path.append(oenginecons.FileLocations.AIO_VDSM_PATH)
dsaversion = util.loadModule(
path=oenginecons.FileLocations.AIO_VDSM_PATH,
name='dsaversion',
)
vSupportedClusterLevels = dsaversion.version_info['clusterLevels']
raw_version_revision = dsaversion.raw_version_revision
finally:
sys.path = savedPath
return vSupportedClusterLevels, raw_version_revision
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
name=oenginecons.Stages.AIO_CONFIG_VDSM,
condition=lambda self: self.environment[
oenginecons.AIOEnv.CONFIGURE
],
after=(
oenginecons.Stages.AIO_CONFIG_STORAGE,
oenginecons.Stages.AIO_CONFIG_SSH,
oengcommcons.Stages.CORE_ENGINE_START,
oengcommcons.Stages.APACHE_RESTART,
),
)
def _closeup(self):
self.logger.debug('Connecting to the Engine')
engine_api = self._waitEngineUp()
eSupportedClusterLevels = vdcoption.VdcOption(
statement=self.environment[
oenginecons.EngineDBEnv.STATEMENT
]
).getVdcOption(
name='SupportedClusterLevels'
)
self.logger.debug(
'engine SupportedClusterLevels [{levels}], '
'PACKAGE_VERSION [{pv}],'.format(
levels=eSupportedClusterLevels,
pv=osetupconfig.PACKAGE_VERSION,
)
)
vSupportedClusterLevels, raw_version_revision = \
self._getSupportedClusterLevels()
self.logger.debug(
'VDSM SupportedClusterLevels [{levels}], '
'VDSM VERSION [{rv}],'.format(
levels=vSupportedClusterLevels,
rv=raw_version_revision,
)
)
try:
v = max(
distutils.version.LooseVersion(vs).version
for vs in (
set(eSupportedClusterLevels.split(',')) &
set(vSupportedClusterLevels)
)
)
except ValueError:
self.logger.debug('exception', exc_info=True)
raise RuntimeError(_(
'Failed to find a ClusterLevel supported '
'by both engine and VDSM'
))
engine_version = self._ovirtsdk_xml.params.Version(
major=v[0],
minor=v[1],
)
self.logger.debug('Creating the local data center')
engine_api.datacenters.add(
self._ovirtsdk_xml.params.DataCenter(
name=self.environment[oenginecons.AIOEnv.LOCAL_DATA_CENTER],
storage_type='localfs',
version=engine_version,
)
)
self.logger.debug(
'Creating the local cluster into the local data center'
)
engine_api.clusters.add(
self._ovirtsdk_xml.params.Cluster(
name=self.environment[oenginecons.AIOEnv.LOCAL_CLUSTER],
cpu=self._ovirtsdk_xml.params.CPU(
id=self.environment[oenginecons.AIOEnv.VDSM_CPU]
),
data_center=engine_api.datacenters.get(
self.environment[oenginecons.AIOEnv.LOCAL_DATA_CENTER]
),
version=engine_version
)
)
self.logger.debug('Adding the local host to the local cluster')
# At this stage sshd is already running
engine_api.hosts.add(
self._ovirtsdk_xml.params.Host(
name=self.environment[oenginecons.AIOEnv.LOCAL_HOST],
address=self.environment[osetupcons.ConfigEnv.FQDN],
reboot_after_installation=False,
override_iptables=False,
cluster=engine_api.clusters.get(
self.environment[oenginecons.AIOEnv.LOCAL_CLUSTER]
),
ssh=self._ovirtsdk_xml.params.SSH(
authentication_method='publickey',
port=self.environment[oenginecons.AIOEnv.SSHD_PORT],
),
)
)
if not self._waitVDSMHostUp(
engine_api=engine_api,
host=self.environment[oenginecons.AIOEnv.LOCAL_HOST],
):
self.logger.warning(_(
'Local storage domain not added because '
'the VDSM host was not up. Please add it manually.'
))
else:
self.logger.debug('Adding local storage domain')
storage = self._ovirtsdk_xml.params.Storage(
path=self.environment[
oenginecons.AIOEnv.STORAGE_DOMAIN_DIR
].rstrip('/'),
)
storage.set_type('localfs')
storage_domain = self._ovirtsdk_xml.params.StorageDomain(
name=self.environment[oenginecons.AIOEnv.STORAGE_DOMAIN_NAME],
data_center=engine_api.datacenters.get(
self.environment[oenginecons.AIOEnv.LOCAL_DATA_CENTER]
),
storage_format='v3',
host=engine_api.hosts.get(
self.environment[oenginecons.AIOEnv.LOCAL_HOST]
),
storage=storage
)
storage_domain.set_type('data')
engine_api.storagedomains.add(storage_domain)
# vim: expandtab tabstop=4 shiftwidth=4
| |
"""
.. module:: crf
:synopsis: conditional random field
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.sparse as sparse
import model.utils as utils
class CRF_L(nn.Module):
"""Conditional Random Field (CRF) layer. This version is used in Ma et al. 2016, has more parameters than CRF_S
args:
hidden_dim : input dim size
tagset_size: target_set_size
if_biase: whether allow bias in linear trans
"""
def __init__(self, hidden_dim, tagset_size, if_bias=True):
super(CRF_L, self).__init__()
self.tagset_size = tagset_size
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size * self.tagset_size, bias=if_bias)
def rand_init(self):
"""random initialization
"""
utils.init_linear(self.hidden2tag)
def forward(self, feats):
"""
args:
feats (batch_size, seq_len, hidden_dim) : input score from previous layers
return:
output from crf layer (batch_size, seq_len, tag_size, tag_size)
"""
return self.hidden2tag(feats).view(-1, self.tagset_size, self.tagset_size)
class CRF_S(nn.Module):
"""Conditional Random Field (CRF) layer. This version is used in Lample et al. 2016, has less parameters than CRF_L.
args:
hidden_dim: input dim size
tagset_size: target_set_size
if_biase: whether allow bias in linear trans
"""
def __init__(self, hidden_dim, tagset_size, if_bias=True):
super(CRF_S, self).__init__()
self.tagset_size = tagset_size
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size, bias=if_bias)
self.transitions = nn.Parameter(torch.Tensor(self.tagset_size, self.tagset_size))
def rand_init(self):
"""random initialization
"""
utils.init_linear(self.hidden2tag)
self.transitions.data.zero_()
def forward(self, feats):
"""
args:
feats (batch_size, seq_len, hidden_dim) : input score from previous layers
return:
output from crf layer ( (batch_size * seq_len), tag_size, tag_size)
"""
scores = self.hidden2tag(feats).view(-1, self.tagset_size, 1)
ins_num = scores.size(0)
crf_scores = scores.expand(ins_num, self.tagset_size, self.tagset_size) + self.transitions.view(1, self.tagset_size, self.tagset_size).expand(ins_num, self.tagset_size, self.tagset_size)
return crf_scores
class CRFRepack:
"""Packer for word level model
args:
tagset_size: target_set_size
if_cuda: whether use GPU
"""
def __init__(self, tagset_size, if_cuda):
self.tagset_size = tagset_size
self.if_cuda = if_cuda
def repack_vb(self, feature, target, mask):
"""packer for viterbi loss
args:
feature (Seq_len, Batch_size): input feature
target (Seq_len, Batch_size): output target
mask (Seq_len, Batch_size): padding mask
return:
feature (Seq_len, Batch_size), target (Seq_len, Batch_size), mask (Seq_len, Batch_size)
"""
if self.if_cuda:
fea_v = feature.transpose(0, 1).cuda()
tg_v = target.transpose(0, 1).unsqueeze(2).cuda()
mask_v = mask.transpose(0, 1).cuda()
else:
fea_v = feature.transpose(0, 1)
tg_v = target.transpose(0, 1).contiguous().unsqueeze(2)
mask_v = mask.transpose(0, 1).contiguous()
return fea_v, tg_v, mask_v
def repack_gd(self, feature, target, current):
"""packer for greedy loss
args:
feature (Seq_len, Batch_size): input feature
target (Seq_len, Batch_size): output target
current (Seq_len, Batch_size): current state
return:
feature (Seq_len, Batch_size), target (Seq_len * Batch_size), current (Seq_len * Batch_size, 1, 1)
"""
if self.if_cuda:
fea_v = feature.transpose(0, 1).cuda()
ts_v = target.transpose(0, 1).cuda().view(-1)
cs_v = current.transpose(0, 1).cuda().view(-1, 1, 1)
else:
fea_v = feature.transpose(0, 1)
ts_v = target.transpose(0, 1).contiguous().view(-1)
cs_v = current.transpose(0, 1).contiguous().view(-1, 1, 1)
return fea_v, ts_v, cs_v
def convert_for_eval(self, target):
"""convert target to original decoding
args:
target: input labels used in training
return:
output labels used in test
"""
return target % self.tagset_size
class CRFRepack_WC:
"""Packer for model with char-level and word-level
args:
tagset_size: target_set_size
if_cuda: whether use GPU
"""
def __init__(self, tagset_size, if_cuda):
self.tagset_size = tagset_size
self.if_cuda = if_cuda
def repack_vb(self, fc_feature, fc_position, bc_feature, bc_position, word_feature, target, mask, batch_len):
"""packer for viterbi loss
args:
fc_feature (Char_Seq_len, Batch_size) : forward_char input feature
fc_position (Word_Seq_len, Batch_size) : forward_char input position
bc_feature (Char_Seq_len, Batch_size) : backward_char input feature
bc_position (Word_Seq_len, Batch_size) : backward_char input position
word_feature (Word_Seq_len, Batch_size) : input word feature
target (Seq_len, Batch_size) : output target
mask (Word_Seq_len, Batch_size) : padding mask
batch_len (Batch_size, 2) : length of instances in one batch
return:
f_f (Char_Reduced_Seq_len, Batch_size), f_p (Word_Reduced_Seq_len, Batch_size), b_f (Char_Reduced_Seq_len, Batch_size), b_p (Word_Reduced_Seq_len, Batch_size), w_f (size Word_Seq_Len, Batch_size), target (Reduced_Seq_len, Batch_size), mask (Word_Reduced_Seq_len, Batch_size)
"""
mlen, _ = batch_len.max(0)
mlen = mlen.squeeze()
ocl = bc_feature.size(1)
if self.if_cuda:
fc_feature = fc_feature[:, 0:mlen[0]].transpose(0, 1).cuda()
fc_position = fc_position[:, 0:mlen[1]].transpose(0, 1).cuda()
bc_feature = bc_feature[:, -mlen[0]:].transpose(0, 1).cuda()
bc_position = (bc_position[:, 0:mlen[1]] - ocl + mlen[0]).transpose(0, 1).cuda()
word_feature = word_feature[:, 0:mlen[1]].transpose(0, 1).cuda()
tg_v = target[:, 0:mlen[1]].transpose(0, 1).unsqueeze(2).cuda()
mask_v = mask[:, 0:mlen[1]].transpose(0, 1).cuda()
else:
fc_feature = fc_feature[:, 0:mlen[0]].transpose(0, 1)
fc_position = fc_position[:, 0:mlen[1]].transpose(0, 1)
bc_feature = bc_feature[:, -mlen[0]:].transpose(0, 1)
bc_position = (bc_position[:, 0:mlen[1]] - ocl + mlen[0]).transpose(0, 1)
word_feature = word_feature[:, 0:mlen[1]].transpose(0, 1)
tg_v = target[:, 0:mlen[1]].transpose(0, 1).unsqueeze(2)
mask_v = mask[:, 0:mlen[1]].transpose(0, 1).contiguous()
return fc_feature, fc_position, bc_feature, bc_position, word_feature, tg_v, mask_v
def convert_for_eval(self, target):
"""convert for eval
args:
target: input labels used in training
return:
output labels used in test
"""
return target % self.tagset_size
class CRFLoss_gd(nn.Module):
"""loss for greedy decode loss, i.e., although its for CRF Layer, we calculate the loss as
.. math::
\sum_{j=1}^n \log (p(\hat{y}_{j+1}|z_{j+1}, \hat{y}_{j}))
instead of
.. math::
\sum_{j=1}^n \log (\phi(\hat{y}_{j-1}, \hat{y}_j, \mathbf{z}_j)) - \log (\sum_{\mathbf{y}' \in \mathbf{Y}(\mathbf{Z})} \prod_{j=1}^n \phi(y'_{j-1}, y'_j, \mathbf{z}_j) )
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
super(CRFLoss_gd, self).__init__()
self.tagset_size = tagset_size
self.average_batch = average_batch
self.crit = nn.CrossEntropyLoss(size_average=self.average_batch)
def forward(self, scores, target, current):
"""
args:
scores (Word_Seq_len, Batch_size, target_size_from, target_size_to): crf scores
target (Word_Seq_len, Batch_size): golden list
current (Word_Seq_len, Batch_size): current state
return:
crf greedy loss
"""
ins_num = current.size(0)
current = current.expand(ins_num, 1, self.tagset_size)
scores = scores.view(ins_num, self.tagset_size, self.tagset_size)
current_score = torch.gather(scores, 1, current).squeeze()
return self.crit(current_score, target)
class CRFLoss_vb(nn.Module):
"""loss for viterbi decode
.. math::
\sum_{j=1}^n \log (\phi(\hat{y}_{j-1}, \hat{y}_j, \mathbf{z}_j)) - \log (\sum_{\mathbf{y}' \in \mathbf{Y}(\mathbf{Z})} \prod_{j=1}^n \phi(y'_{j-1}, y'_j, \mathbf{z}_j) )
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
super(CRFLoss_vb, self).__init__()
self.tagset_size = tagset_size
self.start_tag = start_tag
self.end_tag = end_tag
self.average_batch = average_batch
def forward(self, scores, target, mask):
"""
args:
scores (seq_len, bat_size, target_size_from, target_size_to) : crf scores
target (seq_len, bat_size, 1) : golden state
mask (size seq_len, bat_size) : mask for padding
return:
loss
"""
# calculate batch size and seq len
seq_len = scores.size(0)
bat_size = scores.size(1)
# calculate sentence score
tg_energy = torch.gather(scores.view(seq_len, bat_size, -1), 2, target).view(seq_len, bat_size) # seq_len * bat_size
tg_energy = tg_energy.masked_select(mask).sum()
# calculate forward partition score
# build iter
seq_iter = enumerate(scores)
# the first score should start with <start>
_, inivalues = seq_iter.__next__() # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, self.start_tag, :].clone() # bat_size * to_target_size
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(bat_size, self.tagset_size, 1).expand(bat_size, self.tagset_size, self.tagset_size)
cur_partition = utils.log_sum_exp(cur_values, self.tagset_size)
# (bat_size * from_target * to_target) -> (bat_size * to_target)
# partition = utils.switch(partition, cur_partition, mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size)).view(bat_size, -1)
mask_idx = mask[idx, :].view(bat_size, 1).expand(bat_size, self.tagset_size)
partition.masked_scatter_(mask_idx, cur_partition.masked_select(mask_idx)) #0 for partition, 1 for cur_partition
#only need end at end_tag
partition = partition[:, self.end_tag].sum()
# average = mask.sum()
# average_batch
if self.average_batch:
loss = (partition - tg_energy) / bat_size
else:
loss = (partition - tg_energy)
return loss
class CRFDecode_vb():
"""Batch-mode viterbi decode
args:
tagset_size: target_set_size
start_tag: ind for <start>
end_tag: ind for <pad>
average_batch: whether average the loss among batch
"""
def __init__(self, tagset_size, start_tag, end_tag, average_batch=True):
self.tagset_size = tagset_size
self.start_tag = start_tag
self.end_tag = end_tag
self.average_batch = average_batch
def decode(self, scores, mask):
"""Find the optimal path with viterbe decode
args:
scores (size seq_len, bat_size, target_size_from, target_size_to) : crf scores
mask (seq_len, bat_size) : mask for padding
return:
decoded sequence (size seq_len, bat_size)
"""
# calculate batch size and seq len
seq_len = scores.size(0)
bat_size = scores.size(1)
mask = 1 - mask
decode_idx = torch.LongTensor(seq_len-1, bat_size)
# calculate forward score and checkpoint
# build iter
seq_iter = enumerate(scores)
# the first score should start with <start>
_, inivalues = seq_iter.__next__() # bat_size * from_target_size * to_target_size
# only need start from start_tag
forscores = inivalues[:, self.start_tag, :] # bat_size * to_target_size
back_points = list()
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + forscores.contiguous().view(bat_size, self.tagset_size, 1).expand(bat_size, self.tagset_size, self.tagset_size)
forscores, cur_bp = torch.max(cur_values, 1)
cur_bp.masked_fill_(mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size), self.end_tag)
back_points.append(cur_bp)
pointer = back_points[-1][:, self.end_tag]
decode_idx[-1] = pointer
for idx in range(len(back_points)-2, -1, -1):
back_point = back_points[idx]
index = pointer.contiguous().view(-1,1)
pointer = torch.gather(back_point, 1, index).view(-1)
decode_idx[idx] = pointer
return decode_idx
| |
#! /usr/bin/env python
#
# This tool is distributed under a BSD licence. A copy of this
# should have been included with this file.
#
# Copyright (c) 2007, Martyn Ruks
#
# This tool is designed for the purpose of performing security
# testing only and is not intended to be used for unlawful
# activities
#
# This tool can be used to execute a PCF Inquire Q Manager command.
# You must have the appropriate level of access to issue this.
#
# If you wish to use a client certificate with this tool uncomment
# the relevant lines in this file and add location of certificate
# and private key. The script will let you enter the passphrase at
# runtime if required.
#
# Author: Martyn Ruks
# Version: 0.0.5
#
# Further information: martyn ({dot}) ruks <(at)> mwrinfosecurity {(dot)} com
#
# Add various required functions
import string
import optparse
import time
import binascii
import socket
import sys
import time
from OpenSSL import SSL
from shared_classes import *
from mq_strings import *
from struct import *
from optparse import OptionParser
#
# Extend optparse to make target options required
#
class OptionParser (optparse.OptionParser):
def check_required (self, opt):
option = self.get_option(opt)
if getattr(self.values, option.dest) is None:
self.error("%s option not supplied" % option)
#
# Command Line Options
#
parser = OptionParser()
parser.add_option("-t", "--target", action="store", dest="target", metavar="TARGET", help="Target IP address or hostname (required)")
parser.add_option("-p", "--port", action="store", dest="port", type= "int", metavar="PORT", help="Port to connect to (required)")
parser.add_option("-c", "--channel", action="store", dest="channel", metavar="CHANNEL", help="Channel to connect to (defaults to SYSTEM.DEF.SVRCONN)")
parser.add_option("-s", "--ssl", action="store", dest="ssl", metavar="SSL", type="choice", default="0", choices=["0","1"], help="Use SSL 1=yes 0=no cipher and SSL version must be specified (defaults to no)")
parser.add_option("-v", "--verbose", action="store", dest="verbose", metavar="VERBOSE", default="0", choices=["0","1"], help="Verbose output 1=yes 0=no (defaults to no)")
parser.add_option("-i", "--cipher", action="store", dest="cipher", metavar="CIPHER", help="SSL cipher to use for connection (defaults to NULL-SHA)")
parser.add_option("-e", "--version", action="store", dest="version", metavar="VERSION", type="choice", default="0", choices=["0","1"], help="SSL version to use for connection 0=SSLv3 1=TLSv1 (defaults to SSLv3)")
# Get the command line options
(options, args) = parser.parse_args()
# Mandate the use of a target and port
parser.check_required("-t")
parser.check_required("-p")
if options.target:
target = options.target
else:
sys.exit(0)
if options.port:
port = options.port
else:
sys.exit(0)
if options.channel:
channel = options.channel
else:
channel = 'SYSTEM.DEF.SVRCONN'
if options.verbose:
verbose = int(options.verbose)
else:
verbose = 0
if options.ssl:
ssl = int(options.ssl)
else:
ssl = 0
if options.cipher:
cipher = options.cipher
else:
cipher = 'NULL-SHA'
if options.version:
version = int(options.version)
else:
version = 0
# Set up the MQ strings object
mq = build_packets()
# Prepare the first handshake string
send_string = mq.get_handshake(channel)
# Set up the connection to our target
if ssl == 1:
if version == 0:
ctx = SSL.Context(SSL.SSLv3_METHOD)
else:
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.set_verify(SSL.VERIFY_NONE, verify_cb) # Don't need a certificate
ctx.set_cipher_list(cipher)
# Uncomment these to use a client cert
#ctx.use_privatekey_file('server.key')
#ctx.use_certificate_file('server.crt')
outgoing = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
outgoing.connect( ( target, port ) )
except Exception:
print 'Error, cannot connect to host', target, 'on port '+str(port)+' using SSL'
sys.exit(0)
else:
print 'SSL Connection suceeded to target host', target, 'on port '+str(port)
else:
outgoing = socket.socket ( socket.AF_INET,socket.SOCK_STREAM )
try:
outgoing.connect ( ( target, port ) )
except Exception:
print 'Error, cannot connect to host', target, 'on port '+str(port)
sys.exit(0)
else:
print 'Connection suceeded to target host', target, 'on port '+str(port)
# Send the first handshake string
outgoing.send(send_string)
# Receive the response
data = read_data(outgoing,ssl)
# Print Packet Response
print "Received Handshake Response"
# Check its MQ
check_mq(data,verbose)
# Get the queue manager name
queue_manager = check_status(data,verbose)
if queue_manager == 0:
check_return_code(queue_manager, outgoing, "", 1)
sys.exit(0)
# Set the flags, message size and heartbeat for the next communication
flags = string.atoi(str(binascii.hexlify(data[33:34])),16)
heartbeat = string.atol(str(binascii.hexlify(data[124:128])),16)
message_size = string.atol(str(binascii.hexlify(data[44:48])),16)
# Send the second handshake string
send_string = mq.get_handshake2(channel, flags, message_size, heartbeat)
# Send the second handshake string
outgoing.send(send_string)
# Print Packet Response
print "Received 2nd Handshake Response"
# Receive the response
data = read_data(outgoing,ssl)
flags = check_handshake(data)
if flags != "complete":
send_string = mq.get_handshake2(channel, flags, message_size, heartbeat)
outgoing.send(send_string)
data = read_data(outgoing,ssl)
check_handshake(data)
# NEED TO ADD BETTER HANDLING IF 3rd HANDSHAKE PACKET IS NEEDED
# Send the connection string
send_string = mq.get_connection(queue_manager)
outgoing.send(send_string)
# Print Packet Response
print "Received Connection Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
send_string = mq.get_open('', 5, '', 0x20)
outgoing.send(send_string)
# Print Packet Response
print "Received Open Queue Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
object_handle = string.atol(str(binascii.hexlify(data[40:44])),16)
send_string = mq.get_inquire(object_handle, 1, 0, 48, 2003)
outgoing.send(send_string)
# Print Packet Response
print "Received Inquire Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
send_string = mq.get_open('', 1, 'SYSTEM.ADMIN.COMMAND.QUEUE', 0x10)
outgoing.send(send_string)
# Print Packet Response
print "Received Open Admin Queue Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
object_handle_admin = string.atol(str(binascii.hexlify(data[40:44])),16)
dynamic_queue_name = get_dynamic_queue_name(data)
object_queue_manager = get_object_queue_manager_name(data)
send_string = mq.get_open('', 1, 'SYSTEM.DEFAULT.MODEL.QUEUE', 0x04)
outgoing.send(send_string)
# Print Packet Response
print "Received Open Model Queue Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
dynamic_queue_name = get_dynamic_queue_name(data)
object_queue_manager = get_object_queue_manager_name(data)
object_handle_model = string.atol(str(binascii.hexlify(data[40:44])),16)
send_string = mq.get_inquire(object_handle, 1, 1, 48, 31)
outgoing.send(send_string)
# Print Packet Response
print "Received 2nd Inquire Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
send_string = mq.get_pcf_inquire_qmgr(object_handle_admin, dynamic_queue_name, object_queue_manager)
outgoing.send(send_string)
# Print Packet Response
print "Received Inquire Queue Manager Response"
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
send_string = mq.get_get(object_handle_model, '')
loop = 0
channel_number = 1
while loop < 1:
outgoing.send(send_string)
time.sleep(0.5)
# Receive the response
data = read_data(outgoing,ssl)
# Check the status of the response
check_reason(data,verbose)
# Get the data from the packet
loop = get_queue_data_loop(data, channel_number)
channel_number = channel_number+1
outgoing.close()
| |
#!/usr/bin/env python
import sys, os
import shutil
import subprocess
import multiprocessing
from localconfig import *
from pconsc import predict_all
from folding.rosetta import prepare_input
from folding.rosetta import fold
from folding.rosetta import extract
sys.stderr.write("""
****************************************************************************
PconsFold : Improved contact predictions improve protein models
****************************************************************************
If you use PconsFold for protein structure prediction please cite:
"PconsFold: Improved contact predictions improve protein models."
M Michel, S Hayat, MJ Skwark, C Sander, DS Marks and A Elofsson.
-----------------------------------------------------------------------------
""")
### parse parameters
if len(sys.argv) < 4:
sys.stderr.write('Usage: ./%s [-c n_cores] [-n n_decoys] [-m n_models]\n' % sys.argv[0].strip('./'))
sys.stderr.write(' [-f factor] [--norelax] [--nohoms]\n')
sys.stderr.write(' <hhblits db> <jackhmmer db> <sequence file>\n')
sys.exit(0)
sys.stderr.write('\nTesting dependencies...\n')
if rosetta_flag:
### Check Rosetta ###
try:
f = open(os.devnull, "w")
x = subprocess.call([rosetta_make_fragments, '-h'], stderr=f, stdout=f)
f.close()
pass
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('There might be something wrong with your Rosetta installation in:\n')
sys.stderr.write(rosettadir + '\n')
sys.stderr.write('Please check the path to the Rosetta root directory\n')
sys.stderr.write('and use Rosetta 3.5 or higher (weekly).\n')
sys.stderr.write('Please ensure that the following Rosetta executable\n')
sys.stderr.write('is present and working:\n')
sys.stderr.write(rosetta_make_fragments + '\n')
sys.exit(1)
try:
f = open(os.devnull, "w")
x = subprocess.call([rosetta_abinitiorelax, '-h'], stderr=f, stdout=f)
f.close()
pass
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('There might be something wrong with your Rosetta installation in:\n')
sys.stderr.write(rosettadir + '\n')
sys.stderr.write('Please check the path to the Rosetta root directory\n')
sys.stderr.write('and use Rosetta 3.5 or higher (weekly).\n')
sys.stderr.write('Please ensure that the following Rosetta executable\n')
sys.stderr.write('is present and working:\n')
sys.stderr.write(rosetta_abinitiorelax + '\n')
sys.exit(1)
try:
f = open(os.devnull, "w")
x = subprocess.call([rosetta_extract, '-h'], stderr=f, stdout=f)
f.close()
pass
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('There might be something wrong with your Rosetta installation in:\n')
sys.stderr.write(rosettadir + '\n')
sys.stderr.write('Please check the path to the Rosetta root directory\n')
sys.stderr.write('and use Rosetta 3.5 or higher (weekly).\n')
sys.stderr.write('Please ensure that the following Rosetta executable\n')
sys.stderr.write('is present and working:\n')
sys.stderr.write(rosetta_extract + '\n')
sys.exit(1)
try:
f = open(os.devnull, "w")
x = subprocess.call([rosetta_relax, '-h'], stderr=f, stdout=f)
f.close()
pass
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('There might be something wrong with your Rosetta installation in:\n')
sys.stderr.write(rosettadir + '\n')
sys.stderr.write('Please check the path to the Rosetta root directory\n')
sys.stderr.write('and use Rosetta 3.5 or higher (weekly).\n')
sys.stderr.write('Please ensure that the following Rosetta executable\n')
sys.stderr.write('is present and working:\n')
sys.stderr.write(rosetta_relax + '\n')
sys.exit(1)
else:
### Check Jackhmmer ###
try:
f = open(os.devnull, "w")
x = subprocess.call([jackhmmer, '-h'], stdout=f, stderr=f)
f.close()
except Exception as e:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Chosen jackhmmer binary does not seem to work!\n')
sys.exit(1)
### Check HHblits ###
try:
f = open(os.devnull, "w")
x = subprocess.call([hhblits, '-h'], stderr=f, stdout=f)
f.close()
pass
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Chosen HHblits binary does not seem to work!\n')
sys.exit(1)
### Check PSICOV ###
try:
f = open(os.devnull, "w")
x = subprocess.call([psicov, root + '/extras/psicovtest.fas'], stdout=f, stderr=f)
f.close()
except Exception as e:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Chosen PSICOV binary does not seem to work!\n')
sys.exit(1)
if x == 255 and not psicovfail:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Your version of PSICOV refuses to handle low-complexity alignments.\n')
sys.stderr.write('We recommend patching the PSICOV code to allow this. See 00README\n')
sys.stderr.write('If you _really_ do not want to do that, please change psicovfail flag in \n')
sys.stderr.write(os.path.abspath(sys.argv[0]) + ' to True.\n')
sys.stderr.write('This will (most probably) affect the prediction performance.\n')
sys.exit(1)
### Check plmDCA ###
if plmdca:
try:
f = open(os.devnull, "w")
x = subprocess.call([plmdca, '-h'], stdout=f, stderr=f)
f.close()
except Exception as e:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Chosen plmdca binary does not seem to work!\n')
sys.exit(1)
elif matlab:
try:
f = open(os.devnull, "w")
x = subprocess.call([matlab, '-h'], stdout=f, stderr=f)
f.close()
except:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('Chosen MATLAB binary does not seem to work!\n')
sys.stderr.write('You can get MCR \n')
sys.stderr.write('http://www.mathworks.se/products/compiler/mcr/\n')
sys.exit(1)
else:
sys.stderr.write('*****************\n ERROR!\n*****************\n\n')
sys.stderr.write('You must set one of plmdca or matlab in localconfig.py!\n')
sys.exit(1)
sys.stderr.write('Dependencies OK.\n')
nohoms_flag = False
relax_flag = True
factor = 1.0
n_models = 10
n_decoys = 2000
if '-c' in sys.argv:
idx = sys.argv.index('-c')
try:
n_cores = int(sys.argv[idx+1])
except:
print 'Number of cores -c must be an integer, %r is not. Default is %s.' % (sys.argv[idx+1], n_cores)
sys.exit(1)
del sys.argv[idx]
del sys.argv[idx]
if '-n' in sys.argv:
idx = sys.argv.index('-n')
try:
n_decoys = int(sys.argv[idx+1])
except:
print 'Number of decoys -n must be an integer, %r is not. Default is %s.' % (sys.argv[idx+1], n_decoys)
sys.exit(1)
del sys.argv[idx]
del sys.argv[idx]
if '-m' in sys.argv:
idx = sys.argv.index('-m')
try:
n_models = int(sys.argv[idx+1])
except:
print 'Number of models to extract -m must be an integer, %r is not. Default is %s.' % (sys.argv[idx+1], n_models)
sys.exit(1)
del sys.argv[idx]
del sys.argv[idx]
if '-f' in sys.argv:
idx = sys.argv.index('-f')
try:
factor = int(sys.argv[idx+1])
except:
print 'Factor of sequence length (determining number of constraints to be used during folding) -f must be float, %r is not. Default is %s.' % (sys.argv[idx+1], factor)
sys.exit(1)
del sys.argv[idx]
del sys.argv[idx]
if '--nohoms' in sys.argv:
idx = sys.argv.index('--nohoms')
nohoms_flag = True
del sys.argv[idx]
if '--norelax' in sys.argv:
idx = sys.argv.index('--norelax')
relax_flag = False
del sys.argv[idx]
hhblitsdb = os.path.abspath(sys.argv[1])
jackhmmerdb = os.path.abspath(sys.argv[2])
seqfile = os.path.abspath(sys.argv[3])
contactfile = seqfile + '.pconsc.out'
constraintfile = contactfile + '-' + str(factor) + '.constraints'
shutil.copyfile(root + '../localconfig.py', root + 'localconfig.py')
shutil.copyfile(root + '../localconfig.py', root + '../folding/rosetta/localconfig.py')
predict_all.main(hhblitsdb, jackhmmerdb, seqfile, n_cores=n_cores)
rundir_postfix = 'rosetta'
prepare_input.main(seqfile, contactfile, factor=factor, nohoms_flag=nohoms_flag)
fold.main(seqfile, constraintfile, n_cores=n_cores, n_decoys=n_decoys, rundir_postfix=rundir_postfix)
extract.main(seqfile, n_cores=n_cores, n_models=n_models, relax_flag=relax_flag, rundir_postfix=rundir_postfix)
### collect the results in seperate folder
call(['mkdir', rundir + 'rosetta_results'])
call('mv %s/%s/*.run_*.*.pdb %s/rosetta_results' % (rundir, rundir_postfix, rundir), shell=True)
if os.path.exists(rundir + 'native.pdb'):
call(['mv', rundir + rundir_postfix + '/TMscores.txt', rundir + 'rosetta_results'])
| |
import tensorflow as tf
def triplet_loss(labels, embeddings, alpha, target, labels_size, target_size, penalize_ratio, squared=True, epsilon=1e-8, name = 'batch_all_triplet_loss'):
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
with tf.variable_scope(name):
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=True)
ap_mask = pos_penalize_mask(labels, target, labels_size, target_size)
an_mask = neg_penalize_mask(labels, target, labels_size, target_size)
anchor_positive_dist = tf.where(ap_mask, pairwise_dist + penalize_ratio, pairwise_dist)
anchor_negative_dist = tf.where(an_mask, pairwise_dist - penalize_ratio, pairwise_dist)
# anchor_positive_dist = tf.expand_dims(pairwise_dist, 2)# shape (batch_size, batch_size, 1)
anchor_positive_dist = tf.expand_dims(anchor_positive_dist, 2)# shape (batch_size, batch_size, 1)
assert anchor_positive_dist.shape[2] == 1, "{}".format(anchor_positive_dist.shape)
# anchor_negative_dist = tf.expand_dims(pairwise_dist, 1)# shape (batch_size, 1, batch_size)
anchor_negative_dist = tf.expand_dims(anchor_negative_dist, 1)# shape (batch_size, 1, batch_size)
assert anchor_negative_dist.shape[1] == 1, "{}".format(anchor_negative_dist.shape)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + alpha
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = _get_triplet_mask(labels)
mask = tf.to_float(mask)
triplet_loss = tf.multiply(mask, triplet_loss)
# Remove negative losses (i.e. the easy triplets)
triplet_loss = tf.maximum(triplet_loss, 0.0)
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = tf.to_float(tf.greater(triplet_loss, 1e-16))
num_positive_triplets = tf.reduce_sum(valid_triplets)
num_valid_triplets = tf.reduce_sum(mask)
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16)
# Get final mean triplet loss over the positive valid triplets
triplet_loss = tf.reduce_sum(triplet_loss) / (num_positive_triplets + 1e-16)
return triplet_loss, fraction_positive_triplets
def _pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
# Get the dot product between all embeddings
# shape (batch_size, batch_size)
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = tf.diag_part(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances = tf.maximum(distances, 0.0)
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
# Correct the epsilon added: set the distances on the mask to be exactly 0.0
distances = distances * (1.0 - mask)
return distances
def _get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
# Check if labels[i] == labels[j] and labels[i] != labels[k]
label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
i_equal_j = tf.expand_dims(label_equal, 2)
i_equal_k = tf.expand_dims(label_equal, 1)
valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
# Combine the two masks
mask = tf.logical_and(distinct_indices, valid_labels)
return mask
def pos_penalize_mask(labels, target, labels_size, target_size):
"""
args:
labels: tensor list of labels
target: tensor list of target labels
labels_size: integer
target_size: integer
return:
mask to determine which positive pairwise distance to penalize
"""
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
indices_equal = tf.cast(tf.eye(labels_size), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
exist_in_target = tf.tile(tf.expand_dims(labels, axis = 1), [1, target_size])
exist_in_target = tf.equal(exist_in_target, target)
exist_in_target = tf.cast(exist_in_target, dtype = tf.int32)
exist_in_target = tf.reduce_sum(exist_in_target, axis = 1)
exist_in_target = tf.cast(exist_in_target, dtype = tf.bool)
exist_in_target = tf.logical_and(tf.expand_dims(exist_in_target, axis=0), tf.expand_dims(exist_in_target, axis=1))
return tf.logical_and(tf.logical_and(exist_in_target, labels_equal), indices_not_equal)
def neg_penalize_mask(labels, target, labels_size, target_size):
"""
args:
labels: tensor list of labels
target: tensor list of target labels
labels_size: integer
target_size: integer
return:
mask to determine which negative pairwise distance to penalize
"""
labels_not_equal = tf.not_equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
indices_equal = tf.cast(tf.eye(labels_size), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
exist_in_target = tf.tile(tf.expand_dims(labels, axis = 1), [1, target_size])
exist_in_target = tf.equal(exist_in_target, target)
exist_in_target = tf.cast(exist_in_target, dtype = tf.int32)
exist_in_target = tf.reduce_sum(exist_in_target, axis = 1)
exist_in_target = tf.cast(exist_in_target, dtype = tf.bool)
exist_in_target = tf.logical_or(tf.expand_dims(exist_in_target, axis=0), tf.expand_dims(exist_in_target, axis=1))
return tf.logical_and(tf.logical_and(exist_in_target, labels_not_equal), indices_not_equal)
def _get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# Combine the two masks
mask = tf.logical_and(indices_not_equal, labels_equal)
return mask
def _get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
mask = tf.logical_not(labels_equal)
return mask
def positive_penalize_fn(positive_similarity, penalize_ratio):
return tf.maximum(tf.subtract(positive_similarity, penalize_ratio), -1.0)
def negative_penalize_fn(negative_similarity, penalize_ratio):
return tf.minimum(tf.add(negative_similarity, penalize_ratio), 1.0)
def nth(tensor):
return tensor
def histogram_loss(labels, embeddings, target, labels_size, target_size, penalize_ratio, name = 'batch_all_histogram_loss'):
"""Build the histogram loss over a batch of embeddings.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
Returns:
histogram_loss: scalar tensor containing the histogram loss
"""
with tf.variable_scope(name):
dim = embeddings.shape[1]
R = tf.constant(dim, tf.int32)
# Get the pairwise cosine similarity matrix
pairwise_similarity = tf.matmul(embeddings, embeddings, transpose_b = True) # (batchsize, batchsize) matrix with pairwise similarity
positive_mask = tf.to_float(_get_anchor_positive_triplet_mask(labels)) #(batchsize, batchsize) matrix with 1's at valid positive pairs indices
negative_mask = tf.to_float(_get_anchor_negative_triplet_mask(labels)) #(batchsize, batchsize) matrix with 1's at valid negative pairs indices
positive_similarity = tf.multiply(positive_mask, pairwise_similarity)
positive_similarity = tf.where(pos_penalize_mask(labels, target, labels_size, target_size),
positive_penalize_fn(positive_similarity, penalize_ratio),
nth(positive_similarity))
lower_positive_similarity = tf.matrix_band_part(positive_similarity, -1, 0) #lower triangular (batchsize, batchsize) matrix with positive pair's pairwise similarity
flat_positive_similarity = tf.gather_nd(lower_positive_similarity, tf.where(tf.not_equal(lower_positive_similarity, 0.0))) #flatten the matrix
negative_similarity = tf.multiply(negative_mask, pairwise_similarity)
negative_similarity = tf.where(neg_penalize_mask(labels, target, labels_size, target_size),
negative_penalize_fn(negative_similarity, penalize_ratio),
nth(negative_similarity))
lower_negative_similarity = tf.matrix_band_part(negative_similarity, -1, 0) #lower triangular (batchsize, batchsize) matrix with negative pair's pairwise similarity
flat_negative_similarity = tf.gather_nd(lower_negative_similarity, tf.where(tf.not_equal(lower_negative_similarity, 0.0))) #flatten the matrix
nbr_pos_bins = tf.Variable(dim, dtype=tf.int32)
nbr_neg_bins = tf.Variable(dim, dtype=tf.int32)
flat_positive_similarity = tf.multiply(flat_positive_similarity, tf.divide(tf.to_float(nbr_pos_bins), 2.0))
flat_negative_similarity = tf.multiply(flat_negative_similarity, tf.divide(tf.to_float(nbr_neg_bins), 2.0))
sorted_flat_positive_similarity = tf.contrib.framework.sort(flat_positive_similarity)
sorted_flat_negative_similarity = tf.contrib.framework.sort(flat_negative_similarity)
floor_pos_pos = tf.map_fn(lambda x: tf.floor(x), sorted_flat_positive_similarity, dtype = tf.float32)
floor_pos_value = tf.map_fn(lambda x: tf.subtract(tf.ceil(x), x), sorted_flat_positive_similarity, dtype=tf.float32)
ceil_pos_pos = tf.map_fn(lambda x: tf.ceil(x), sorted_flat_positive_similarity, dtype = tf.float32)
ceil_pos_value = tf.map_fn(lambda x: tf.subtract(x, tf.floor(x)), sorted_flat_positive_similarity, dtype=tf.float32)
floor_neg_pos = tf.map_fn(lambda x: tf.floor(x), sorted_flat_negative_similarity, dtype = tf.float32)
floor_neg_value = tf.map_fn(lambda x: tf.subtract(tf.ceil(x), x), sorted_flat_negative_similarity, dtype=tf.float32)
ceil_neg_pos = tf.map_fn(lambda x: tf.ceil(x), sorted_flat_negative_similarity, dtype = tf.float32)
ceil_neg_value = tf.map_fn(lambda x: tf.subtract(x, tf.floor(x)), sorted_flat_negative_similarity, dtype=tf.float32)
multiples = [dim,1]
compare = tf.range(-R/2, R/2, 1) ###
compare = tf.expand_dims(compare,axis=-1)
floor_pos_pos = tf.expand_dims(floor_pos_pos, axis = 0)
floor_pos_pos = tf.tile(floor_pos_pos, multiples)
temp1 = tf.cast(tf.equal(floor_pos_pos, tf.to_float(compare)), dtype = tf.float32)
floor_pos_hist = tf.matmul(temp1, tf.expand_dims(floor_pos_value, axis=0), transpose_b = True)
ceil_pos_pos = tf.expand_dims(ceil_pos_pos, axis = 0)
ceil_pos_pos = tf.tile(ceil_pos_pos, multiples)
temp2 = tf.cast(tf.equal(ceil_pos_pos, tf.to_float(compare)), dtype = tf.float32)
ceil_pos_hist = tf.matmul(temp2, tf.expand_dims(ceil_pos_value, axis=0), transpose_b = True)
total_pos_hist = tf.add(floor_pos_hist, ceil_pos_hist)
total_pos_hist = tf.divide(total_pos_hist, tf.divide(tf.reduce_sum(positive_mask), 2.0))
floor_neg_pos = tf.expand_dims(floor_neg_pos, axis = 0)
floor_neg_pos = tf.tile(floor_neg_pos, multiples)
temp3 = tf.cast(tf.equal(floor_neg_pos, tf.to_float(compare)), dtype = tf.float32)
floor_neg_hist = tf.matmul(temp3, tf.expand_dims(floor_neg_value, axis=0), transpose_b = True)
ceil_neg_pos = tf.expand_dims(ceil_neg_pos, axis = 0)
ceil_neg_pos = tf.tile(ceil_neg_pos, multiples)
temp4 = tf.cast(tf.equal(ceil_neg_pos, tf.to_float(compare)), dtype = tf.float32)
ceil_neg_hist = tf.matmul(temp4, tf.expand_dims(ceil_neg_value, axis=0), transpose_b = True)
total_neg_hist = tf.add(floor_neg_hist, ceil_neg_hist)
total_neg_hist = tf.divide(total_neg_hist, tf.divide(tf.reduce_sum(negative_mask), 2.0))
cum_total_pos_hist = tf.cumsum(total_pos_hist)
hist_loss = tf.multiply(total_neg_hist, cum_total_pos_hist)
total_hist_loss = tf.reduce_sum(hist_loss)
return total_hist_loss
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for Federation Extension."""
import re
import jsonschema
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone import exception
from keystone.i18n import _, _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
"type": "object",
"required": ['rules'],
"properties": {
"rules": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"required": ['local', 'remote'],
"additionalProperties": False,
"properties": {
"local": {
"type": "array"
},
"remote": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"oneOf": [
{"$ref": "#/definitions/empty"},
{"$ref": "#/definitions/any_one_of"},
{"$ref": "#/definitions/not_any_of"}
],
}
}
}
}
}
},
"definitions": {
"empty": {
"type": "object",
"required": ['type'],
"properties": {
"type": {
"type": "string"
},
},
"additionalProperties": False,
},
"any_one_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'any_one_of'],
"properties": {
"type": {
"type": "string"
},
"any_one_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"not_any_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'not_any_of'],
"properties": {
"type": {
"type": "string"
},
"not_any_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
}
}
}
def validate_mapping_structure(ref):
v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
messages = ''
for error in sorted(v.iter_errors(ref), key=str):
messages = messages + error.message + "\n"
if messages:
raise exception.ValidationError(messages)
def validate_expiration(token_ref):
if timeutils.utcnow() > token_ref.expires:
raise exception.Unauthorized(_('Federation token is expired'))
def validate_groups_cardinality(group_ids, mapping_id):
"""Check if groups list is non-empty.
:param group_ids: list of group ids
:type group_ids: list of str
:raises exception.MissingGroups: if ``group_ids`` cardinality is 0
"""
if not group_ids:
raise exception.MissingGroups(mapping_id=mapping_id)
def validate_idp(idp, assertion):
"""Check if the IdP providing the assertion is the one registered for
the mapping
"""
remote_id_parameter = CONF.federation.remote_id_attribute
if not remote_id_parameter or not idp['remote_id']:
LOG.warning(_LW('Impossible to identify the IdP %s '),
idp['id'])
# If nothing is defined, the administrator may want to
# allow the mapping of every IdP
return
try:
idp_remote_identifier = assertion[remote_id_parameter]
except KeyError:
msg = _('Could not find Identity Provider identifier in '
'environment, check [federation] remote_id_attribute '
'for details.')
raise exception.ValidationError(msg)
if idp_remote_identifier != idp['remote_id']:
msg = _('Incoming identity provider identifier not included '
'among the accepeted identifiers.')
raise exception.Forbidden(msg)
def validate_groups_in_backend(group_ids, mapping_id, identity_api):
"""Iterate over group ids and make sure they are present in the backend/
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
"""
for group_id in group_ids:
try:
identity_api.get_group(group_id)
except exception.GroupNotFound:
raise exception.MappedGroupNotFound(
group_id=group_id, mapping_id=mapping_id)
def validate_groups(group_ids, mapping_id, identity_api):
"""Check group ids cardinality and check their existence in the backend.
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
:raises: exception.MissingGroups
"""
validate_groups_cardinality(group_ids, mapping_id)
validate_groups_in_backend(group_ids, mapping_id, identity_api)
# TODO(marek-denis): Optimize this function, so the number of calls to the
# backend are minimized.
def transform_to_group_ids(group_names, mapping_id,
identity_api, assignment_api):
"""Transform groups identitified by name/domain to their ids
Function accepts list of groups identified by a name and domain giving
a list of group ids in return.
Example of group_names parameter::
[
{
"name": "group_name",
"domain": {
"id": "domain_id"
},
},
{
"name": "group_name_2",
"domain": {
"name": "domain_name"
}
}
]
:param group_names: list of group identified by name and its domain.
:type group_names: list
:param mapping_id: id of the mapping used for mapping assertion into
local credentials
:type mapping_id: str
:param identity_api: identity_api object
:param assignment_api: assignment_api object
:returns: generator object with group ids
:raises: excepton.MappedGroupNotFound: in case asked group doesn't
exist in the backend.
"""
def resolve_domain(domain):
"""Return domain id.
Input is a dictionary with a domain identified either by a ``id`` or a
``name``. In the latter case system will attempt to fetch domain object
from the backend.
:returns: domain's id
:rtype: str
"""
domain_id = (domain.get('id') or
assignment_api.get_domain_by_name(
domain.get('name')).get('id'))
return domain_id
for group in group_names:
try:
group_dict = identity_api.get_group_by_name(
group['name'], resolve_domain(group['domain']))
yield group_dict['id']
except exception.GroupNotFound:
raise exception.MappedGroupNotFound(
group_id=group['name'], mapping_id=mapping_id)
def get_assertion_params_from_env(context):
LOG.debug('Environment variables: %s', context['environment'])
prefix = CONF.federation.assertion_prefix
for k, v in context['environment'].items():
if k.startswith(prefix):
yield (k, v)
class RuleProcessor(object):
"""A class to process assertions and mapping rules."""
class _EvalType(object):
"""Mapping rule evaluation types."""
ANY_ONE_OF = 'any_one_of'
NOT_ANY_OF = 'not_any_of'
def __init__(self, rules):
"""Initialize RuleProcessor.
Example rules can be found at:
:class:`keystone.tests.mapping_fixtures`
:param rules: rules from a mapping
:type rules: dict
"""
self.rules = rules
def process(self, assertion_data):
"""Transform assertion to a dictionary of user name and group ids
based on mapping rules.
This function will iterate through the mapping rules to find
assertions that are valid.
:param assertion_data: an assertion containing values from an IdP
:type assertion_data: dict
Example assertion_data::
{
'Email': 'testacct@example.com',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester'
}
:returns: dictionary with user and group_ids
The expected return structure is::
{
'name': 'foobar',
'group_ids': ['abc123', 'def456'],
'group_names': [
{
'name': 'group_name_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_1_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_2',
'domain': {
'id': 'xyz132'
}
}
]
}
"""
# Assertions will come in as string key-value pairs, and will use a
# semi-colon to indicate multiple values, i.e. groups.
# This will create a new dictionary where the values are arrays, and
# any multiple values are stored in the arrays.
LOG.debug('assertion data: %s', assertion_data)
assertion = dict((n, v.split(';')) for n, v in assertion_data.items()
if isinstance(v, six.string_types))
LOG.debug('assertion: %s', assertion)
identity_values = []
LOG.debug('rules: %s', self.rules)
for rule in self.rules:
direct_maps = self._verify_all_requirements(rule['remote'],
assertion)
# If the compare comes back as None, then the rule did not apply
# to the assertion data, go on to the next rule
if direct_maps is None:
continue
# If there are no direct mappings, then add the local mapping
# directly to the array of saved values. However, if there is
# a direct mapping, then perform variable replacement.
if not direct_maps:
identity_values += rule['local']
else:
for local in rule['local']:
new_local = self._update_local_mapping(local, direct_maps)
identity_values.append(new_local)
LOG.debug('identity_values: %s', identity_values)
mapped_properties = self._transform(identity_values)
LOG.debug('mapped_properties: %s', mapped_properties)
return mapped_properties
def _transform(self, identity_values):
"""Transform local mappings, to an easier to understand format.
Transform the incoming array to generate the return value for
the process function. Generating content for Keystone tokens will
be easier if some pre-processing is done at this level.
:param identity_values: local mapping from valid evaluations
:type identity_values: array of dict
Example identity_values::
[{'group': {'id': '0cd5e9'}, 'user': {'email': 'bob@example.com'}}]
:returns: dictionary with user name, group_ids and group_names.
"""
def extract_groups(groups_by_domain):
for groups in groups_by_domain.values():
for group in {g['name']: g for g in groups}.values():
yield group
# initialize the group_ids as a set to eliminate duplicates
user_name = None
group_ids = set()
group_names = list()
groups_by_domain = dict()
for identity_value in identity_values:
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user_name is not None:
LOG.warning(_LW('Ignoring user name %s'),
identity_value['user']['name'])
else:
user_name = identity_value['user']['name']
if 'group' in identity_value:
group = identity_value['group']
if 'id' in group:
group_ids.add(group['id'])
elif 'name' in group:
domain = (group['domain'].get('name') or
group['domain'].get('id'))
groups_by_domain.setdefault(domain, list()).append(group)
group_names.extend(extract_groups(groups_by_domain))
return {'name': user_name,
'group_ids': list(group_ids),
'group_names': group_names}
def _update_local_mapping(self, local, direct_maps):
"""Replace any {0}, {1} ... values with data from the assertion.
:param local: local mapping reference that needs to be updated
:type local: dict
:param direct_maps: list of identity values, used to update local
:type direct_maps: list
Example local::
{'user': {'name': '{0} {1}', 'email': '{2}'}}
Example direct_maps::
['Bob', 'Thompson', 'bob@example.com']
:returns: new local mapping reference with replaced values.
The expected return structure is::
{'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}}
"""
LOG.debug('direct_maps: %s', direct_maps)
LOG.debug('local: %s', local)
new = {}
for k, v in six.iteritems(local):
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
new_value = v.format(*direct_maps)
new[k] = new_value
return new
def _verify_all_requirements(self, requirements, assertion):
"""Go through the remote requirements of a rule, and compare against
the assertion.
If a value of ``None`` is returned, the rule with this assertion
doesn't apply.
If an array of zero length is returned, then there are no direct
mappings to be performed, but the rule is valid.
Otherwise, then it will return the values, in order, to be directly
mapped, again, the rule is valid.
:param requirements: list of remote requirements from rules
:type requirements: list
Example requirements::
[
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Customer"
]
}
]
:param assertion: dict of attributes from an IdP
:type assertion: dict
Example assertion::
{
'UserName': ['testacct'],
'LastName': ['Account'],
'orgPersonType': ['Tester'],
'Email': ['testacct@example.com'],
'FirstName': ['Test']
}
:returns: list of direct mappings or None.
"""
direct_maps = []
for requirement in requirements:
requirement_type = requirement['type']
regex = requirement.get('regex', False)
any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
if any_one_values is not None:
if self._evaluate_requirement(any_one_values,
requirement_type,
self._EvalType.ANY_ONE_OF,
regex,
assertion):
continue
else:
return None
not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
if not_any_values is not None:
if self._evaluate_requirement(not_any_values,
requirement_type,
self._EvalType.NOT_ANY_OF,
regex,
assertion):
continue
else:
return None
# If 'any_one_of' or 'not_any_of' are not found, then values are
# within 'type'. Attempt to find that 'type' within the assertion.
direct_map_values = assertion.get(requirement_type)
if direct_map_values:
LOG.debug('updating a direct mapping: %s', direct_map_values)
direct_maps += direct_map_values
return direct_maps
def _evaluate_values_by_regex(self, values, assertion_values):
for value in values:
for assertion_value in assertion_values:
if re.search(value, assertion_value):
return True
return False
def _evaluate_requirement(self, values, requirement_type,
eval_type, regex, assertion):
"""Evaluate the incoming requirement and assertion.
If the requirement type does not exist in the assertion data, then
return False. If regex is specified, then compare the values and
assertion values. Otherwise, grab the intersection of the values
and use that to compare against the evaluation type.
:param values: list of allowed values, defined in the requirement
:type values: list
:param requirement_type: key to look for in the assertion
:type requirement_type: string
:param eval_type: determine how to evaluate requirements
:type eval_type: string
:param regex: perform evaluation with regex
:type regex: boolean
:param assertion: dict of attributes from the IdP
:type assertion: dict
:returns: boolean, whether requirement is valid or not.
"""
assertion_values = assertion.get(requirement_type)
if not assertion_values:
return False
if regex:
any_match = self._evaluate_values_by_regex(values,
assertion_values)
else:
any_match = bool(set(values).intersection(set(assertion_values)))
if any_match and eval_type == self._EvalType.ANY_ONE_OF:
return True
if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
return True
return False
def assert_enabled_identity_provider(federation_api, idp_id):
identity_provider = federation_api.get_idp(idp_id)
if identity_provider.get('enabled') is not True:
msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
def assert_enabled_service_provider_object(service_provider):
if service_provider.get('enabled') is not True:
sp_id = service_provider['id']
msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
| |
# Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
import sys
import textwrap
from typ import main
from typ import test_case
from typ import Host
from typ import VERSION
from typ.fakes import test_result_server_fake
from typ.fakes import host_fake
is_python3 = bool(sys.version_info.major == 3)
if is_python3: # pragma: python3
# pylint: disable=redefined-builtin,invalid-name
unicode = str
d = textwrap.dedent
ARTIFACTS_TEST_PY = """
import unittest
import os
from typ import test_case
class ArtifactTest(test_case.TestCase):
def test_produce_artifact_for_retries(self):
self.artifacts.CreateArtifact('artifact_name', 'test.txt', 'content')
self.fail()
"""
FLAKY_TEST_PY = """
import unittest
class FlakyTest(unittest.TestCase):
_retry_count = 0
def test_flaky(self):
cls = self.__class__
if cls._retry_count < 3:
cls._retry_count += 1
self.fail()
return
"""
SKIP_TEST_PY = """
import unittest
class SkipTest(unittest.TestCase):
def test_skip(self):
self.skipTest('SKIPPING TEST')
"""
SKIP_TEST_FILES = {'skip_test.py': SKIP_TEST_PY}
PASS_TEST_PY = """
import unittest
import time
class PassingTest(unittest.TestCase):
def test_pass(self):
# Add sleep to make the time assertion in
# main_test.TestCli.test_write_full_results_to not flaky.
time.sleep(0.1)
pass
"""
PASS_TEST_FILES = {'pass_test.py': PASS_TEST_PY}
FAIL_TEST_PY = """
import unittest
class FailingTest(unittest.TestCase):
def test_fail(self):
self.fail()
"""
FAIL_TEST_FILES = {'fail_test.py': FAIL_TEST_PY}
OUTPUT_TEST_PY = """
import sys
import unittest
class PassTest(unittest.TestCase):
def test_out(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
def test_err(self):
sys.stderr.write("hello on stderr\\n")
class FailTest(unittest.TestCase):
def test_out_err_fail(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
sys.stderr.write("hello on stderr\\n")
self.fail()
"""
OUTPUT_TEST_FILES = {'output_test.py': OUTPUT_TEST_PY}
SF_TEST_PY = """
import sys
import unittest
class SkipMethods(unittest.TestCase):
@unittest.skip('reason')
def test_reason(self):
self.fail()
@unittest.skipIf(True, 'reason')
def test_skip_if_true(self):
self.fail()
@unittest.skipIf(False, 'reason')
def test_skip_if_false(self):
self.fail()
class SkipSetup(unittest.TestCase):
def setUp(self):
self.skipTest('setup failed')
def test_notrun(self):
self.fail()
@unittest.skip('skip class')
class SkipClass(unittest.TestCase):
def test_method(self):
self.fail()
class SetupClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.stdout.write('in setupClass\\n')
sys.stdout.flush()
assert False, 'setupClass failed'
def test_method1(self):
pass
def test_method2(self):
pass
class ExpectedFailures(unittest.TestCase):
@unittest.expectedFailure
def test_fail(self):
self.fail()
@unittest.expectedFailure
def test_pass(self):
pass
"""
SF_TEST_FILES = {'sf_test.py': SF_TEST_PY}
LOAD_TEST_PY = """
import unittest
class BaseTest(unittest.TestCase):
pass
def method_fail(self):
self.fail()
def method_pass(self):
pass
def load_tests(_, _2, _3):
setattr(BaseTest, "a/b/fail", method_fail)
setattr(BaseTest, "a/b/pass", method_pass)
suite = unittest.TestSuite()
suite.addTest(BaseTest("a/b/fail"))
suite.addTest(BaseTest("a/b/pass"))
return suite
"""
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY}
path_to_main = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'runner.py')
class TestCli(test_case.MainTestCase):
prog = [sys.executable, path_to_main]
files_to_ignore = ['*.pyc']
def test_bad_arg(self):
self.check(['--bad-arg'], ret=2, out='',
rerr='.*: error: unrecognized arguments: --bad-arg\n')
self.check(['-help'], ret=2, out='',
rerr=(".*: error: argument -h/--help: "
"ignored explicit argument 'elp'\n"))
def test_bad_metadata(self):
self.check(['--metadata', 'foo'], ret=2, err='',
out='Error: malformed --metadata "foo"\n')
def test_basic(self):
self.check([], files=PASS_TEST_FILES,
ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'), err='')
def test_coverage(self):
# TODO(crbug.com/1217850): Figure out why this isn't working
# in py3. Do we need to update coverage?
if sys.version_info.major == 3:
return
try:
import coverage # pylint: disable=W0612
files = {
'pass_test.py': PASS_TEST_PY,
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-c', 'pass_test'], files=files, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
Name Stmts Miss Cover
----------------------------------
fail_test.py 4 4 0%
pass_test.py 6 0 100%
----------------------------------
TOTAL 10 4 60%
"""))
except ImportError: # pragma: no cover
# We can never cover this line, since running coverage means
# that import will succeed.
self.check(['-c'], files=PASS_TEST_FILES, ret=1,
out='Error: coverage is not installed.\n', err='')
def test_debugger(self):
if sys.version_info.major == 3: # pragma: python3
return
else: # pragma: python2
_, out, _, _ = self.check(['-d'], stdin='quit()\n',
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('(Pdb) ', out)
def test_dryrun(self):
self.check(['-n'], files=PASS_TEST_FILES, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_error(self):
files = {'err_test.py': d("""\
import unittest
class ErrTest(unittest.TestCase):
def test_err(self):
foo = bar
""")}
_, out, _, _ = self.check([''], files=files, ret=1, err='')
self.assertIn('[1/1] err_test.ErrTest.test_err failed unexpectedly',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure', out)
def test_pass_repeat(self):
self.check(
['--repeat', '2'], files=PASS_TEST_FILES, ret=0, err='',
out=d("""\
[1/2] pass_test.PassingTest.test_pass passed
[2/2] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_expectations(self):
files = {
'expectations.txt': d('''\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
# No tags are passed, so this should fail unexpectedly.
#_, out, _, _ = self.check(['-X', 'expectations.txt'],
# files=files, ret=1)
# A matching tag is passed, so the test should fail as expected.
self.check(['-X', 'expectations.txt', '-x', 'foo'], files=files, ret=0)
# A tag that doesn't match is passed, so the test should fail
# unexpectedly.
self.check(['-X', 'expectations.txt', '-x', 'bar'], files=files, ret=1)
# Passing a tag without an expectations file doesn't make sense.
self.check(['-x', 'bar'], files=files, ret=1)
def test_expectations_with_globs(self):
files = {
'expectations.txt': d('''\
# results: [ Failure ]
crbug.com/12345 fail_test.FailingTest.* [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-X', 'expectations.txt'], files=files, ret=0)
def test_multiple_expectations_files_do_not_work(self):
files = {
'expectations_1.txt': d('''\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'expectations_2.txt': d('''\
# tags: [ foo bar ]
# results: [ Skip ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_skip [ Skip ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
# This isn't supported yet.
self.check(['-X', 'expectations_1.txt', '-X', 'expectations_2.txt',
'-x', 'foo'], files=files, ret=1)
def test_expectations_file_has_syntax_error(self):
files = {
'expectations.txt': d('''\
# tags: [
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-X', 'expectations.txt', '-x', 'foo'], files=files, ret=1)
def test_fail(self):
_, out, _, _ = self.check([], files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('fail_test.FailingTest.test_fail failed unexpectedly',
out)
def test_fail_repeat(self):
_, out, _, _ = self.check(
['--repeat', '2'], files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn(
'[1/2] fail_test.FailingTest.test_fail failed unexpectedly', out)
self.assertIn(
'[2/2] fail_test.FailingTest.test_fail failed unexpectedly', out)
def test_fail_then_pass(self):
files = {'fail_then_pass_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('1 test passed, 0 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
results['tests'][
'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS')
def test_fail_then_pass_repeat(self):
files = {'fail_then_pass_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count % 2 == 1:
self.fail()
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json',
'--repeat', '2'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertEqual(
results['tests'][
'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS FAIL PASS')
def test_fail_then_skip(self):
files = {'fail_then_skip_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
elif count == 2:
self.skipTest('')
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
results['tests'][
'fail_then_skip_test']['FPTest']['test_count']['actual'],
'FAIL SKIP')
def test_failures_are_not_elided(self):
_, out, _, _ = self.check(['--terminal-width=20'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail failed '
'unexpectedly:\n', out)
def test_file_list(self):
files = PASS_TEST_FILES
self.check(['-f', '-'], files=files, stdin='pass_test\n', ret=0)
self.check(['-f', '-'], files=files, stdin='pass_test.PassingTest\n',
ret=0)
self.check(['-f', '-'], files=files,
stdin='pass_test.PassingTest.test_pass\n',
ret=0)
files = {'pass_test.py': PASS_TEST_PY,
'test_list.txt': 'pass_test.PassingTest.test_pass\n'}
self.check(['-f', 'test_list.txt'], files=files, ret=0)
def test_find(self):
files = PASS_TEST_FILES
self.check(['-l'], files=files, ret=0,
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', './pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.PassingTest.test_pass'], files=files,
ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_find_from_subdirs(self):
files = {
'foo/__init__.py': '',
'foo/pass_test.py': PASS_TEST_PY,
'bar/__init__.py': '',
'bar/tmp': '',
}
self.check(['-l', '../foo/pass_test.py'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', 'foo'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', '--path', '../foo', 'pass_test'],
files=files, cwd='bar', ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_multiple_top_level_dirs(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', 'foo/bar', 'baz/quux'], files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
self.check(['-l', 'foo/bar/pass_test.py', 'baz/quux'], files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
self.check(['-l', '--top-level-dirs', 'foo', '--top-level-dirs', 'baz'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
def test_list_with_repeat(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', 'foo/bar', 'baz/quux', '--repeat', '10'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
def test_single_top_level_dir(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', '--top-level-dir', 'foo'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
))
def test_can_not_have_both_top_level_flags(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(
['-l', '--top-level-dir', 'foo', '--top-level-dirs', 'bar'],
files=files,
ret=1, out='',
err='Cannot specify both --top-level-dir and --top-level-dirs\n')
def test_help(self):
self.check(['--help'], ret=0, rout='.*', err='')
def test_import_failure_missing_file(self):
_, out, _, _ = self.check(['-l', 'foo'], ret=1, err='')
self.assertIn('Failed to load "foo" in find_tests', out)
self.assertIn('No module named', out)
def test_import_failure_missing_package(self):
files = {'foo.py': d("""\
import unittest
import package_that_does_not_exist
class ImportFailureTest(unittest.TestCase):
def test_case(self):
pass
""")}
_, out, _, _ = self.check(['-l', 'foo.py'], files=files, ret=1, err='')
self.assertIn('Failed to load "foo.py" in find_tests', out)
self.assertIn('No module named', out)
def test_import_failure_no_tests(self):
files = {'foo.py': 'import unittest'}
self.check(['-l', 'foo'], files=files, ret=0, err='',
out='\n')
def test_import_failure_syntax_error(self):
files = {'syn_test.py': d("""\
import unittest
class SyntaxErrorTest(unittest.TestCase):
def test_syntax_error_in_test(self):
syntax error
""")}
_, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('Failed to import test module: syn_test', out)
self.assertIn('SyntaxError: invalid syntax', out)
def test_interrupt(self):
files = {'interrupt_test.py': d("""\
import unittest
class Foo(unittest.TestCase):
def test_interrupt(self):
raise KeyboardInterrupt()
""")}
self.check(['-j', '1'], files=files, ret=130, out='',
err='interrupted, exiting\n')
def test_isolate(self):
self.check(['--isolate', '*test_pass*'], files=PASS_TEST_FILES, ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'), err='')
def test_load_tests_failure(self):
files = {'foo_test.py': d("""\
import unittest
def load_tests(_, _2, _3):
raise ValueError('this should fail')
""")}
_, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('this should fail', out)
def test_load_tests_single_worker(self):
files = LOAD_TEST_FILES
_, out, _, _ = self.check([
'-j', '1', '-v', '--test-name-prefix',
'load_test.BaseTest.'], files=files, ret=1, err='')
self.assertIn('[1/2] a/b/fail failed', out)
self.assertIn('[2/2] a/b/pass passed', out)
self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
def test_load_tests_multiple_workers(self):
# TODO(crbug.com/1217853) Figure out why this isn't working under
# py3 and/or possibly running in parallel on mac.
if sys.platform in ('darwin', 'win32'):
return
_, out, _, _ = self.check([], files=LOAD_TEST_FILES, ret=1, err='')
# The output for this test is nondeterministic since we may run
# two tests in parallel. So, we just test that some of the substrings
# we care about are present.
self.assertIn('a/b/pass passed', out)
self.assertIn('a/b/fail failed', out)
self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
def test_missing_builder_name(self):
self.check(['--test-results-server', 'localhost'], ret=2,
out=('Error: --builder-name must be specified '
'along with --test-result-server\n'
'Error: --master-name must be specified '
'along with --test-result-server\n'
'Error: --test-type must be specified '
'along with --test-result-server\n'), err='')
def test_ninja_status_env(self):
self.check(['-v', 'output_test.PassTest.test_out'],
files=OUTPUT_TEST_FILES, aenv={'NINJA_STATUS': 'ns: '},
out=d("""\
ns: output_test.PassTest.test_out passed
1 test passed, 0 skipped, 0 failures.
"""), err='')
def test_output_for_failures(self):
_, out, _, _ = self.check(['output_test.FailTest'],
files=OUTPUT_TEST_FILES,
ret=1, err='')
self.assertIn('[1/1] output_test.FailTest.test_out_err_fail '
'failed unexpectedly:\n'
' hello on stdout\n'
' hello on stderr\n', out)
def test_quiet(self):
self.check(['-q'], files=PASS_TEST_FILES, ret=0, err='', out='')
def test_retry_limit(self):
_, out, _, _ = self.check(['--retry-limit', '2'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('Retrying failed tests', out)
lines = out.splitlines()
self.assertEqual(len([l for l in lines
if 'test_fail failed unexpectedly:' in l]),
3)
def test_skip(self):
_, out, _, _ = self.check(['--skip', '*test_fail*'],
files=FAIL_TEST_FILES, ret=0)
self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
files = {'fail_test.py': FAIL_TEST_PY,
'pass_test.py': PASS_TEST_PY}
self.check(['-j', '1', '--skip', '*test_fail*'], files=files, ret=0,
out=('[1/2] fail_test.FailingTest.test_fail was skipped\n'
'[2/2] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 1 skipped, 0 failures.\n'), err='')
# This tests that we print test_started updates for skipped tests
# properly. It also tests how overwriting works.
_, out, _, _ = self.check(['-j', '1', '--overwrite', '--skip',
'*test_fail*'], files=files, ret=0,
err='', universal_newlines=False)
# We test this string separately and call out.strip() to
# avoid the trailing \r\n we get on windows, while keeping
# the \r's elsewhere in the string.
self.assertMultiLineEqual(
out.strip(),
('[0/2] fail_test.FailingTest.test_fail\r'
' \r'
'[1/2] fail_test.FailingTest.test_fail was skipped\r'
' \r'
'[1/2] pass_test.PassingTest.test_pass\r'
' \r'
'[2/2] pass_test.PassingTest.test_pass passed\r'
' \r'
'1 test passed, 1 skipped, 0 failures.'))
def test_skip_via_expectations(self):
files = {'expectations.txt':
'# results: [ Skip ]\ncrbug.com/23456 fail_test.FailingTest.test_fail [ Skip ]\n',
'fail_test.py': FAIL_TEST_PY,
'pass_test.py': PASS_TEST_PY}
self.check(['-X', 'expectations.txt'], files=files, ret=0)
def test_skips_and_failures(self):
_, out, _, _ = self.check(['-j', '1', '-v', '-v'], files=SF_TEST_FILES,
ret=1, err='')
# We do a bunch of assertIn()'s to work around the non-portable
# tracebacks.
self.assertIn(('[1/9] sf_test.ExpectedFailures.test_fail failed as expected:\n'
' Traceback '), out)
self.assertIn(('[2/9] sf_test.ExpectedFailures.test_pass '
'passed unexpectedly'), out)
self.assertIn(('[3/9] sf_test.SetupClass.test_method1 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[4/9] sf_test.SetupClass.test_method2 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[5/9] sf_test.SkipClass.test_method was skipped:\n'
' skip class\n'), out)
self.assertIn(('[6/9] sf_test.SkipMethods.test_reason was skipped:\n'
' reason\n'), out)
self.assertIn(('[7/9] sf_test.SkipMethods.test_skip_if_false '
'failed unexpectedly:\n'
' Traceback'), out)
self.assertIn(('[8/9] sf_test.SkipMethods.test_skip_if_true '
'was skipped:\n'
' reason\n'
'[9/9] sf_test.SkipSetup.test_notrun was skipped:\n'
' setup failed\n'
'1 test passed, 4 skipped, 4 failures.\n'), out)
def test_skip_and_all(self):
# --all should override --skip
_, out, _, _ = self.check(['--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
_, out, _, _ = self.check(['--all', '--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('1 test passed, 0 skipped, 0 failures.', out)
def test_skip_decorators_and_all(self):
_, out, _, _ = self.check(['--all', '-j', '1', '-v', '-v'],
files=SF_TEST_FILES, ret=1, err='')
self.assertIn('sf_test.SkipClass.test_method failed', out)
self.assertIn('sf_test.SkipMethods.test_reason failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_true failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_false failed', out)
# --all does not override explicit calls to skipTest(), only
# the decorators.
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out)
def test_sharding(self):
def run(shard_index, total_shards, tests):
files = {'shard_test.py': textwrap.dedent(
"""\
import unittest
class ShardTest(unittest.TestCase):
def test_01(self):
pass
def test_02(self):
pass
def test_03(self):
pass
def test_04(self):
pass
def test_05(self):
pass
""")}
_, out, _, _ = self.check(
['--shard-index', str(shard_index),
'--total-shards', str(total_shards),
'--jobs', '1'],
files=files)
exp_out = ''
total_tests = len(tests)
for i, test in enumerate(tests):
exp_out += ('[%d/%d] shard_test.ShardTest.test_%s passed\n' %
(i + 1, total_tests, test))
exp_out += '%d test%s passed, 0 skipped, 0 failures.\n' % (
total_tests, "" if total_tests == 1 else "s")
self.assertEqual(out, exp_out)
run(0, 1, ['01', '02', '03', '04', '05'])
run(0, 2, ['01', '03', '05'])
run(1, 2, ['02', '04'])
run(0, 6, ['01'])
def test_subdir(self):
files = {
'foo/__init__.py': '',
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY
}
self.check(['foo/bar'], files=files, ret=0, err='',
out=d("""\
[1/1] foo.bar.pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_timing(self):
self.check(['-t'], files=PASS_TEST_FILES, ret=0, err='',
rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed '
r'\d+.\d+s\n'
r'1 test passed in \d+.\d+s, 0 skipped, 0 failures.'))
def test_test_results_server(self):
# TODO(crbug.com/1217853) Figure out why this isn't working under
# py3 (and/or possibly running in parallel on mac).
if sys.platform in ('darwin', 'win32'):
return
server = test_result_server_fake.start()
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'http://%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=0, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'))
finally:
posts = server.stop()
self.assertEqual(len(posts), 1)
payload = posts[0][2].decode('utf8')
self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"',
payload)
self.assertTrue(payload.endswith('--\r\n'))
self.assertNotEqual(server.log.getvalue(), '')
# TODO(crbug.com/1032848) The typ unit tests hang whenever they run on
# mac with multiple processes. We need to investigate the root cause
# and fix it.
def disabled_test_test_results_server_error(self):
server = test_result_server_fake.start(code=500)
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'http://%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'
'Uploading the JSON results raised '
'"HTTP Error 500: Internal Server Error"\n'))
finally:
_ = server.stop()
# TODO(crbug.com/1032848) The typ unit tests hang whenever they run on
# mac with multiple processes. We need to investigate the root cause
# and fix it.
def disabled_test_test_results_server_not_running(self):
self.check(['--test-results-server', 'http://localhost:99999',
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'
'Uploading the JSON results raised .*\n'))
def test_unexpected_skip(self):
files = {
'expectations.txt':
'# results: [ Pass ]\ncrbug.com/23456 skip_test.SkipSetup.test_notrun [ Pass ]\n',
'skip_test.py': SF_TEST_PY
}
_, out, _, _ = self.check(['-X', 'expectations.txt',
'skip_test.SkipSetup.test_notrun'],
files=files, ret=1, err='')
self.assertIn('skip_test.SkipSetup.test_notrun was skipped unexpectedly'
,out)
def test_retry_only_retry_on_failure_tests(self):
files = {'flaky_test.py': FLAKY_TEST_PY}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--retry-limit','6',
'--retry-only-retry-on-failure-tests'],
files=files, ret=1, err='')
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky failed unexpectedly:\n',
out)
self.assertNotIn('[1/1] flaky_test.FlakyTest.test_flaky passed\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['flaky_test']['FlakyTest']['test_flaky']
self.assertEqual(results['actual'],'FAIL')
self.assertEqual(results['expected'],'PASS')
self.assertIn('is_unexpected', results)
self.assertIn('is_regression', results)
def test_retryonfailure_test_fails(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ Foo ]
# tags: [ Bar ]
# results: [ RetryOnFailure ]
crbug.com/12345 [ foo bar ] test_fail [ RetryOnFailure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'Foo',
'-x', 'Bar',
'--retry-limit', '3',
'--retry-only-retry-on-failure-tests',
'--test-name-prefix',
'fail_test.FailingTest.'],
files=files, ret=1, err='')
self.assertIn('[1/1] test_fail failed unexpectedly:\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['test_fail']
self.assertEqual(results['actual'],'FAIL FAIL FAIL FAIL')
self.assertEqual(results['expected'],'PASS')
self.assertIn('is_unexpected', results)
self.assertIn('is_regression', results)
def test_retryonfailure_test_passes(self):
files = {'flaky_test.py': FLAKY_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ RetryOnFailure ]
crbug.com/12345 [ foo ] flaky_test.FlakyTest.test_flaky [ RetryOnFailure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'--retry-limit','6',
'--retry-only-retry-on-failure-tests'],
files=files, ret=0, err='')
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky failed unexpectedly:\n',
out)
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky passed\n',
out)
self.assertIn('1 test passed, 0 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['flaky_test']['FlakyTest']['test_flaky']
self.assertEqual(results['actual'],'FAIL FAIL FAIL PASS')
self.assertEqual(results['expected'],'PASS')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_retryonfailure_test_fails_no_regression(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ RetryOnFailure Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ RetryOnFailure Failure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'-x', 'bar',
'--retry-limit', '3',
'--retry-only-retry-on-failure-tests', '-vv'],
files=files, ret=0, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail failed as expected:\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
test_results = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertEqual(test_results['actual'],'FAIL FAIL FAIL FAIL')
self.assertEqual(test_results['expected'],'FAIL')
self.assertEqual(results['metadata']['expectations_files'],
['expectations.txt'])
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
self.assertEqual(results['metadata']['tags'], ['foo', 'bar'])
def test_skip_test_with_expectations_file_skip_expectation(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar
# bat
# ]
# results: [ Skip ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Skip ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo'],
files=files, ret=0, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_with_expectations_file_skip_with_cmd_args(self):
files = {'pass_test.py': PASS_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ Pass ]
crbug.com/12345 [ foo ] pass_test.PassingTest.test_pass [ Pass ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'--skip', '*test_pass'],
files=files, ret=0, err='')
self.assertIn('[1/1] pass_test.PassingTest.test_pass was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_no_expectations_file(self):
files = {'skip_test.py': SKIP_TEST_PY}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('[1/1] skip_test.SkipTest.test_skip was skipped\n', out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['skip_test']['SkipTest']['test_skip']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_no_expectations_file_cmd_args_skip(self):
files = PASS_TEST_FILES
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--skip','*test_pass'],
files=files, ret=0, err='')
self.assertIn('[1/1] pass_test.PassingTest.test_pass was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_relative_paths_used_for_expectations_files_in_metadata(self):
test_expectations = (
'# tags: [ foo bar ]\n'
'# results: [ Failure ]\n'
'crbug.com/12345 [ foo ] test_dir.failing_test.FailingTest.test_fail '
'[ Failure ]\n')
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json', 'src/test_dir',
'-X', 'src/test_dir/test_expectations/test_expectations.txt',
'-x', 'foo', '--repository-absolute-path', 'src/'],
ret=0, err='', files={
'src/test_dir/failing_test.py': FAIL_TEST_PY,
('src/test_dir/test_expectations'
'/test_expectations.txt'): test_expectations,
'src/test_dir/__init__.py': ''
})
self.assertIn(
' test_dir.failing_test.FailingTest.test_fail failed', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
['/'.join(['', '', 'test_dir', 'test_expectations', 'test_expectations.txt'])],
results['metadata']['expectations_files'])
def test_implement_test_name_prefix_exclusion_in_finished_test_output(self):
files = PASS_TEST_FILES
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'pass_test.PassingTest.'],
files=files, ret=0, err='')
self.assertIn('[1/1] test_pass passed\n', out)
def test_implement_test_name_prefix_exclusion_in_test_filter(self):
files = OUTPUT_TEST_FILES
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'output_test.',
'--test-filter', '*test_out'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
self.assertEqual(len(results['tests']), 1)
self.assertIn('[1/1] PassTest.test_out passed\n', out)
def test_implement_test_name_prefix_exclusion_in_expectations_files(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] test_fail [ Failure ]
""")}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.',
'-X', 'expectations.txt', '-x', 'foo', '-vv'],
files=files, ret=0, err='')
self.assertIn('[1/1] test_fail failed as expected:\n', out)
def test_implement_test_name_prefix_exclusion_in_skip_glob(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.','--skip',
'test_*'], files=files, ret=0, err='')
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
def test_implement_test_name_prefix_exclusion_in_json_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.',
'--write-full-results-to', 'full_results.json'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
self.assertEqual(results['tests']['test_fail']['actual'], 'FAIL')
# also test if the test_name_prefix key value pair is in the JSON results
self.assertEqual(results['metadata']['test_name_prefix'], 'fail_test.FailingTest.')
def test_implement_test_name_prefix_exclusion_in_trace_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.',
'--write-trace-to', 'full_trace.json'],
files=files, ret=1, err='')
trace = json.loads(files['full_trace.json'])
self.assertEqual(trace['traceEvents'][0]['name'], 'test_fail')
# also test if the test_name_prefix key value pair is in the JSON results
self.assertEqual(
trace['otherData']['test_name_prefix'], 'fail_test.FailingTest.')
def test_test_name_prefix_is_optional_field_in_json_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
self.assertNotIn('test_name_prefix', results)
def test_implement_test_name_prefix_exclusion_for_tests_args(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['test_fail',
'--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.'],
files=files, ret=1, err='')
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
def test_implement_test_name_prefix_exclusion_for_file_list_arg(self):
test_list = ('test_fail\n')
files = {'fail_test.py': FAIL_TEST_PY,
'test_list.txt': test_list}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.',
'-f', 'test_list.txt'],
files=files, ret=1, err='')
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
def test_artifacts_added_for_retries(self):
files = {'artifacts_test.py': ARTIFACTS_TEST_PY}
_, out, err, files = self.check(
['--test-name-prefix', 'artifacts_test.ArtifactTest.',
'--write-full-results-to', 'full_results.json', '--retry-limit=1'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
artifacts = results['tests']['test_produce_artifact_for_retries']['artifacts']
self.assertEqual(artifacts['artifact_name'], [
os.path.join('test_produce_artifact_for_retries', 'test.txt'),
os.path.join('test_produce_artifact_for_retries',
'retry_1', 'test.txt')])
self.assertIn(
os.path.join('artifacts', 'test_produce_artifact_for_retries',
'test.txt'), files)
self.assertIn(
os.path.join('artifacts', 'test_produce_artifact_for_retries',
'retry_1', 'test.txt'), files)
def test_matches_partial_filter(self):
_, out, _, files = self.check(
['--test-name-prefix', 'output_test.',
'--partial-match-filter', 'PassTest'],
files=OUTPUT_TEST_FILES, ret=0, err='')
self.assertIn('2 tests passed, 0 skipped, 0 failures.',out)
def test_test_prefix_exclusion_in_partial_filter_match(self):
_, out, _, files = self.check(
['--test-name-prefix', 'output_test.',
'--partial-match-filter', 'output_test.'],
files=OUTPUT_TEST_FILES, ret=0, err='')
self.assertIn('0 tests passed, 0 skipped, 0 failures.',out)
def test_implement_test_name_prefix_exclusion_in_test_started_output(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.', '-vvv',
'--overwrite'],
files=files, ret=1, err='')
self.assertIn('[0/1] test_fail queued\n', out)
self.assertIn('[0/1] test_fail\n', out)
def test_implement_test_name_prefix_exclusion_in_list_only_arg(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.', '--list-only'],
files=files, ret=0, err='')
self.assertIn('test_fail', out)
def test_verbose_2(self):
self.check(['-vv', '-j', '1', 'output_test.PassTest'],
files=OUTPUT_TEST_FILES, ret=0,
out=d("""\
[1/2] output_test.PassTest.test_err passed:
hello on stderr
[2/2] output_test.PassTest.test_out passed:
hello on stdout
2 tests passed, 0 skipped, 0 failures.
"""), err='')
def test_verbose_3(self):
self.check(['-vvv', '-j', '1', 'output_test.PassTest'],
files=OUTPUT_TEST_FILES, ret=0,
out=d("""\
[0/2] output_test.PassTest.test_err queued
[1/2] output_test.PassTest.test_err passed:
hello on stderr
[1/2] output_test.PassTest.test_out queued
[2/2] output_test.PassTest.test_out passed:
hello on stdout
2 tests passed, 0 skipped, 0 failures.
"""), err='')
def test_version(self):
self.check('--version', ret=0, out=(VERSION + '\n'))
def test_write_full_results_to(self):
_, _, _, files = self.check(['--write-full-results-to',
'results.json'], files=PASS_TEST_FILES)
self.assertIn('results.json', files)
results = json.loads(files['results.json'])
self.assertEqual(results['interrupted'], False)
self.assertEqual(results['path_delimiter'], '.')
# The time it takes to run the test varies, so we test that
# we got a single entry greater than zero, but then delete it from
# the result so we can do an exact match on the rest of the trie.
result = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(len(result['times']), 1)
self.assertGreater(result['times'][0], 0)
result.pop('times')
self.assertEqual({u'pass_test': {
u'PassingTest': {
u'test_pass': {
u'actual': u'PASS',
u'expected': u'PASS',
}
}
}},
results['tests'])
def test_write_trace_to(self):
_, _, _, files = self.check(['--write-trace-to', 'trace.json'],
files=PASS_TEST_FILES)
self.assertIn('trace.json', files)
trace_obj = json.loads(files['trace.json'])
self.assertEqual(trace_obj['otherData'], {})
self.assertEqual(len(trace_obj['traceEvents']), 5)
event = trace_obj['traceEvents'][0]
self.assertEqual(event['name'], 'pass_test.PassingTest.test_pass')
self.assertEqual(event['ph'], 'X')
self.assertEqual(event['tid'], 1)
self.assertEqual(event['args']['expected'], ['PASS'])
self.assertEqual(event['args']['actual'], 'PASS')
def test_expected_failure_does_not_get_retried(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar
# bat
# ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--retry-limit','3',
'-X', 'expectations.txt',
'-x', 'foo'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
result = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertIn('test_fail failed as expected', out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
self.assertNotIn('Retrying failed tests', out)
self.assertEqual(result['expected'], 'FAIL')
self.assertEqual(result['actual'], 'FAIL')
self.assertNotIn('is_unexpected', result)
class TestMain(TestCli):
prog = []
def make_host(self):
return Host()
def call(self, host, argv, stdin, env):
stdin = unicode(stdin)
host.stdin = io.StringIO(stdin)
if env:
host.getenv = env.get
host.capture_output()
orig_sys_path = sys.path[:]
orig_sys_modules = list(sys.modules.keys())
try:
ret = main(argv + ['-j', '1'], host)
finally:
out, err = host.restore_output()
modules_to_unload = []
for k in sys.modules:
if k not in orig_sys_modules:
modules_to_unload.append(k)
for k in modules_to_unload:
del sys.modules[k]
sys.path = orig_sys_path
return ret, out, err
def test_debugger(self):
# TODO: this test seems to hang under coverage.
pass
| |
"""ConversationEvent base class and subclasses.
These classes are wrappers for ClientEvent instances from the API. Parsing is
done through property methods, which prefer logging warnings to raising
exceptions.
"""
import logging
import re
from hangups import parsers, message_parser, user, schemas
logger = logging.getLogger(__name__)
chat_message_parser = message_parser.ChatMessageParser()
class ConversationEvent(object):
"""An event which becomes part of the permanent record of a conversation.
This corresponds to ClientEvent in the API.
This is the base class for such events.
"""
def __init__(self, client_event):
self._event = client_event
@property
def timestamp(self):
"""A timestamp of when the event occurred."""
return parsers.from_timestamp(self._event.timestamp)
@property
def user_id(self):
"""A UserID indicating who created the event."""
return user.UserID(chat_id=self._event.sender_id.chat_id,
gaia_id=self._event.sender_id.gaia_id)
@property
def conversation_id(self):
"""The ID of the conversation the event belongs to."""
return self._event.conversation_id.id_
@property
def id_(self):
"""The ID of the ConversationEvent."""
return self._event.event_id
class ChatMessageSegment(object):
"""A segment of a chat message."""
def __init__(self, text, segment_type=None,
is_bold=False, is_italic=False, is_strikethrough=False,
is_underline=False, link_target=None):
"""Create a new chat message segment."""
if segment_type is not None:
self.type_ = segment_type
elif link_target is not None:
self.type_ = schemas.SegmentType.LINK
else:
self.type_ = schemas.SegmentType.TEXT
self.text = text
self.is_bold = is_bold
self.is_italic = is_italic
self.is_strikethrough = is_strikethrough
self.is_underline = is_underline
self.link_target = link_target
@staticmethod
def from_str(text):
"""Generate ChatMessageSegment list parsed from a string.
This method handles automatically finding line breaks, URLs and
parsing simple formatting markup (simplified Markdown and HTML).
"""
segment_list = chat_message_parser.parse(text)
return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
@staticmethod
def deserialize(segment):
"""Create a chat message segment from a parsed MESSAGE_SEGMENT."""
# The formatting options are optional.
if segment.formatting is None:
is_bold = False
is_italic = False
is_strikethrough = False
is_underline = False
else:
is_bold = bool(segment.formatting.bold)
is_italic = bool(segment.formatting.italic)
is_strikethrough = bool(segment.formatting.strikethrough)
is_underline = bool(segment.formatting.underline)
if segment.link_data is None:
link_target = None
else:
link_target = segment.link_data.link_target
return ChatMessageSegment(
segment.text, segment_type=segment.type_,
is_bold=is_bold, is_italic=is_italic,
is_strikethrough=is_strikethrough, is_underline=is_underline,
link_target=link_target
)
def serialize(self):
"""Serialize the segment to pblite."""
return [self.type_.value, self.text, [
1 if self.is_bold else 0,
1 if self.is_italic else 0,
1 if self.is_strikethrough else 0,
1 if self.is_underline else 0,
], [self.link_target]]
class ChatMessageEvent(ConversationEvent):
"""An event containing a chat message.
Corresponds to ClientChatMessage in the API.
"""
@property
def text(self):
"""A textual representation of the message."""
lines = ['']
for segment in self.segments:
if segment.type_ == schemas.SegmentType.TEXT:
lines[-1] += segment.text
elif segment.type_ == schemas.SegmentType.LINK:
lines[-1] += segment.text
elif segment.type_ == schemas.SegmentType.LINE_BREAK:
lines.append('')
else:
logger.warning('Ignoring unknown chat message segment type: {}'
.format(segment.type_))
lines.extend(self.attachments)
return '\n'.join(lines)
@property
def segments(self):
"""List of ChatMessageSegments in the message."""
seg_list = self._event.chat_message.message_content.segment
# seg_list may be None because the field is optional
if seg_list is not None:
return [ChatMessageSegment.deserialize(seg) for seg in seg_list]
else:
return []
@property
def attachments(self):
"""Attachments in the message."""
raw_attachments = self._event.chat_message.message_content.attachment
if raw_attachments is None:
raw_attachments = []
attachments = []
for attachment in raw_attachments:
if attachment.embed_item.type_ == [249]: # PLUS_PHOTO
# Try to parse an image message. Image messages contain no
# message segments, and thus have no automatic textual
# fallback.
try:
if attachment.embed_item.data is not None:
data = attachment.embed_item.data['27639957']
else:
# iOS f***ing around with byte stream field order
data = attachment.embed_item.data_['27639957']
attachments.append(
data[0][3]
)
except (KeyError, TypeError, IndexError):
logger.warning(
'Failed to parse PLUS_PHOTO attachment: {}'
.format(attachment)
)
elif attachment.embed_item.type_ == [340, 335, 0]:
pass # Google Maps URL that's already in the text.
else:
logger.warning('Ignoring unknown chat message attachment: {}'
.format(attachment))
return attachments
class RenameEvent(ConversationEvent):
"""An event that renames a conversation.
Corresponds to ClientConversationRename in the API.
"""
@property
def new_name(self):
"""The conversation's new name.
An empty string if the conversation's name was cleared.
"""
return self._event.conversation_rename.new_name
@property
def old_name(self):
"""The conversation's old name.
An empty string if the conversation had no previous name.
"""
return self._event.conversation_rename.old_name
class MembershipChangeEvent(ConversationEvent):
"""An event that adds or removes a conversation participant.
Corresponds to ClientMembershipChange in the API.
"""
@property
def type_(self):
"""The membership change type (MembershipChangeType)."""
return self._event.membership_change.type_
@property
def participant_ids(self):
"""Return the UserIDs involved in the membership change.
Multiple users may be added to a conversation at the same time.
"""
return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id)
for id_ in self._event.membership_change.participant_ids]
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2015 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
try:
import telescope.telescope.utils as telescope_utils
except ImportError:
raise Exception(('Could not find Telescope library. '
'Please verify all submodules are checked out.'))
import site_metadata
import telescope_data_parser
def _ensure_dir_exists(dir_path):
"""Ensures that a given directory path exists (creating it if necessary).
Creates a directory path for a given file path if the directory path does
not already exist. For example, if dir_path='foo/bar/baz/' and only
directory 'foo' exists, this function will create 'foo/bar/baz'.
Args:
dir_path: (str) Directory path to create.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def _generate_output_path(group_key, output_dir, output_type):
"""Generates the output path for an output file.
Generates the output path (including output directory and filename),
given a group key and type of output data to be written to the file.
Args:
group_key: (str) The key that identifies this dataset.
output_dir: (str) The directory to which this file will be written.
output_type: (str) The type of data to be written (either 'daily' or
'hourly').
Returns:
(str) A generated path for the output file (stripped of illegal filename
characters).
"""
filename = '%s_%s.csv' % (group_key, output_type)
filename = telescope_utils.strip_special_chars(filename)
return os.path.join(output_dir, filename)
def _write_valid_keys_file(valid_keys, valid_keys_file):
"""Writes the valid result group keys to a file.
Writes the valid keys file, indicating the keys for which we generated
output data. The keys are written in plaintext with one key per line in
alphabetically sorted order.
Args:
valid_keys: (list) A list of strings indicating the valid keys.
valid_keys_file: (file) File to which to write the keys.
"""
keys_sorted = sorted(valid_keys)
valid_keys_file.write(os.linesep.join(keys_sorted))
class ResultConverter(object):
"""Converts Telescope data into Observatory format."""
def __init__(self, result_grouper, result_reducer, observatory_file_writer,
output_dir, valid_keys_path):
"""Creates a converter from Telescope data to Observatory data.
Args:
result_grouper: Result grouper, which groups Telescope results
according to their metadata.
result_reducer: Result reducer, which reduces sets of raw results
into aggregate values compatible with Observatory.
observatory_file_writer: File writer to write processed results
into a file format that Observatory can read from.
output_dir: (str) The directory to which to write converted results.
valid_keys_path: (str) The file path to which to write the valid
group keys created during the convert operation.
"""
self._logger = logging.getLogger('telescope-convert')
self._result_grouper = result_grouper
self._result_reducer = result_reducer
self._observatory_file_writer = observatory_file_writer
self._output_dir = output_dir
self._valid_keys_path = valid_keys_path
def convert_to_observatory_format(self, input_filenames):
"""Converts a list of files in Telescope format into Observatory format.
Parses a list of files output from Telescope and converts them to files
that Observatory can read, placing the results into self._output_dir.
Args:
input_filenames: (list) A list of files created by Telescope.
"""
result_readers = []
for filename in input_filenames:
result_readers.append(
telescope_data_parser.SingleTelescopeResultReader(filename))
result_groups = self._result_grouper.group_results(result_readers)
self._convert_result_groups(result_groups)
def _convert_result_groups(self, result_groups):
"""Converts Telescope result groups into Observatory format.
Args:
result_groups: (dict) A dictionary of raw Telescope results, keyed by
group key, then by metric name, for example:
{
'lga01_comcast': {
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': ...,
},
'sea01_verizon': {
'download_throughput': ...,
'upload_throughput': ...,
},
'mia02_twc': ...,
...
}
"""
group_keys = sorted(result_groups.keys())
for index, key in enumerate(group_keys):
self._logger.info('Converting result group %s (%u/%u)', key,
index + 1, len(group_keys))
metro = key[:3]
result_group_local = self._adjust_result_group_timezone(
metro, result_groups[key])
self._convert_result_group_by_day(key, result_group_local)
self._convert_result_group_by_hour(key, result_group_local)
_ensure_dir_exists(os.path.dirname(self._valid_keys_path))
with open(self._valid_keys_path, 'w') as valid_keys_file:
_write_valid_keys_file(group_keys, valid_keys_file)
def _adjust_result_group_timezone(self, metro, metric_results):
"""Converts the timestamps on a result group to local time.
Given a result group associated with a particular metro, creates a new
result group where all timestamps are local to the given metro.
Args:
metro: (str) Name of a metropolitan region associated with these
results (e.g. 'lga' or 'lax').
metric_results: (dict) A dictionary of raw Telescope results, keyed by
metric name, for example:
{
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': [
(<datetime-2014-10-22@12:35:01>, 4.1),
(<datetime-2014-10-01@04:42:23>, 6.2),
(<datetime-2014-10-02@06:19:22>, 8.9),
...
]
}
Returns:
(dict) A dictionary in the same form as metric_results, but with the
timestamps converted to the local timezone.
"""
timezone = site_metadata.get_metro_timezone(metro)
metric_results_local = {}
for metric, values in metric_results.iteritems():
metric_results_local[metric] = []
for timestamp_utc, value in values:
timestamp_local = timestamp_utc.astimezone(timezone)
metric_results_local[metric].append((timestamp_local, value))
return metric_results_local
def _convert_result_group_by_day(self, group_key, metric_results):
self._convert_result_group(
group_key, metric_results, 'daily',
self._result_reducer.reduce_by_day,
self._observatory_file_writer.write_daily_datafile)
def _convert_result_group_by_hour(self, group_key, metric_results):
self._convert_result_group(
group_key, metric_results, 'hourly',
self._result_reducer.reduce_by_hour_of_day_per_month,
self._observatory_file_writer.write_hourly_datafile)
def _convert_result_group(self, group_key, metric_results, output_type,
reducer_func, writer_func):
"""Converts a group of Telescope results into Observatory files.
Args:
group_key: (str) The key that identifies this result group (e.g.
lga01_comcast).
metric_results: (dict) A dictionary of raw Telescope results, keyed
by metric name, for example:
{
'download_throughput': [
(<datetime-2014-10-22@12:35:01>, 24.5),
(<datetime-2014-10-01@04:42:23>, 14.3),
(<datetime-2014-10-02@06:19:22>, 21.3),
...
],
'upload_throughput': [
(<datetime-2014-10-22@12:35:01>, 4.1),
(<datetime-2014-10-01@04:42:23>, 6.2),
(<datetime-2014-10-02@06:19:22>, 8.9),
...
]
}
output_type: (str) The type of data to be written (either 'daily' or
'hourly').
reducer_func: (function) Function to reduce sets of raw results into
aggregate metrics that Observatory can display.
writer_func: (function) Function to write results to an Observatory-
compatible file.
"""
results_reduced = reducer_func(metric_results)
_ensure_dir_exists(self._output_dir)
output_path = _generate_output_path(group_key, self._output_dir,
output_type)
with open(output_path, 'w') as output_file:
writer_func(results_reduced, output_file)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks in multi-worker training with TF2."""
import tensorflow.compat.v2 as tf
import json
import os
from absl.testing import parameterized
from keras import callbacks
from keras.distribute import distributed_file_utils
from keras.distribute import multi_worker_testing_utils
def checkpoint_exists(filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return tf.io.gfile.exists(filepath)
tf_saved_model_exists = tf.io.gfile.exists(filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(
filepath + '.index')
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _model_setup(test_obj, file_format):
"""Set up a MNIST Keras model for testing purposes.
This function builds a MNIST Keras model and returns relevant information
for testing.
Args:
test_obj: The `TestCase` testing object.
file_format: File format for checkpoints. 'tf' or 'h5'.
Returns:
A tuple of (model, saving_filepath, train_ds, steps) where train_ds is
the training dataset.
"""
batch_size = 64
steps = 2
with tf.distribute.MultiWorkerMirroredStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
# same filepath to save.
saving_filepath = os.path.join(test_obj.get_temp_dir(),
'checkpoint.' + file_format)
return model, saving_filepath, train_ds, steps
def get_tf_config_task():
return json.loads(os.environ['TF_CONFIG'])['task']
def get_tf_config_cluster_spec():
return json.loads(os.environ['TF_CONFIG'])['cluster']
def get_task_type():
return get_tf_config_task()['type']
def get_task_index():
return get_tf_config_task()['index']
def is_chief():
return ('chief' not in get_tf_config_cluster_spec() and
get_task_type() == 'worker' and get_task_index() == 0)
class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
file_format=['h5', 'tf'],
save_weights_only=[True, False]))
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format):
model, saving_filepath, train_ds, steps = _model_setup(
test_obj, file_format)
num_epoch = 2
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' %
(task_config['type'], task_config['index'], extension))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=train_ds,
validation_steps=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only)
])
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(checkpoint_exists(saving_filepath), is_chief())
# If it's chief, the model should be saved (`write_filepath` should
# simply return `saving_filepath`); if not, i.e. for non-chief workers,
# the temporary path generated by `write_filepath` should no longer
# contain the checkpoint that has been deleted.
test_obj.assertEqual(
checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy)), is_chief())
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self, file_format))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)])
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint')
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_backupandrestore_checkpoint_works_with_interruption(self, mode):
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError('Interrupting!')
class AssertCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
# the interruption happened on epoch 2 as specified in
# InterruptingCallback, so the initial epoch after restart will begin
# at 2.
assert epoch > 1
def proc_model_checkpoint_works_with_same_file_path(test_obj,
saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 4
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
bar_dir = os.path.join(os.path.dirname(saving_filepath), 'backup')
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
InterruptingCallback()
])
except RuntimeError as e:
if 'Interrupting!' not in str(e):
raise
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
backup_filepath = os.path.join(bar_dir, 'chief', 'checkpoint')
test_obj.assertTrue(tf.io.gfile.exists(backup_filepath))
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
AssertCallback()
])
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
test_obj.assertFalse(tf.io.gfile.exists(backup_filepath))
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint')
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=['eager']))
def test_profiler_saves_on_both_chief_and_non_chief(self, mode):
def proc_profiler_saves_on_both_chief_and_non_chief(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
'logfile_%s_%d' % (task_config['type'], task_config['index']))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.TensorBoard(
log_dir=saving_filepath, profile_batch=[2, 4])
])
# Profiler dir should be created on both chief and non-chief node
profiler_dir_path = os.path.join(saving_filepath, 'plugins', 'profile')
test_obj.assertTrue(tf.io.gfile.exists(profiler_dir_path))
tf.__internal__.distribute.multi_process_runner.run(
proc_profiler_saves_on_both_chief_and_non_chief,
cluster_spec=
tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
num_workers=2),
args=(self,))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves summaries but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
'logfile_%s_%d' % (task_config['type'], task_config['index']))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
# disabling profiler by setting profile_batch to zero
callbacks=[
callbacks.TensorBoard(log_dir=saving_filepath, profile_batch=0)
])
# If it's chief, the summaries should be saved in the filepath; if not,
# the directory should be empty (although created). Using
# `file_io.list_directory()` since the directory may be created at this
# point.
test_obj.assertEqual(
bool(tf.io.gfile.listdir(saving_filepath)), is_chief())
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self,))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
'logfile_%s' % (get_tf_config_task()['type']))
saving_filepath_for_temp = os.path.join(saving_filepath, 'workertemp_1')
os.mkdir(saving_filepath)
os.mkdir(saving_filepath_for_temp)
# Verifies that even if `saving_filepath_for_temp` exists, tensorboard
# can still save to temporary directory.
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath_for_temp))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self,))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_tensorboard_works_with_same_file_path(self, mode):
def proc_tensorboard_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
test_obj.assertTrue(tf.io.gfile.listdir(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'logfile')
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['eager']))
def test_early_stopping(self, mode):
def proc_early_stopping(test_obj):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor='loss', min_delta=0.05, patience=1, verbose=1),
epoch_counter_cbk
]
# Empirically, it is expected that `model.fit()` terminates around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
tf.__internal__.distribute.multi_process_runner.run(
proc_early_stopping,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec(num_workers=2),
args=(self,))
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| |
#
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""System parameters"""
from __future__ import absolute_import
from ros3ddevcontroller.param.parameter import Parameter, ReadOnlyParameter
from ros3ddevcontroller.param.evaluators import *
SERVO_PARAMETERS = [
'baseline_mm',
'focus_distance_m',
'focal_length_mm',
'aperture'
]
CAMERA_PARAMETERS = [
'iso',
'camera_type',
'record_framerate',
'shutter_us',
'scene_no',
'shot_no',
'take_no',
'start_absolute_timecode',
'project_framerate',
'director',
'director_of_photography',
'copyright',
'camera_id',
'clip_id',
'reel_id',
'camera_operator',
'location',
'frame_width_px',
'frame_height_px',
'frame_format',
'camera_operator',
'production_name',
'record_date',
'record_time',
'sensor_width_mm',
'sensor_height_mm',
'sensor_width_px',
'sensor_height_px'
]
SYSTEM_PARAMETERS = [
# shot parameters
Parameter('scene_no', '', str),
Parameter('scene_name', '', str),
Parameter('shot_no', '', str),
Parameter('location', '', str),
Parameter('notes', '', str),
# clip parameters
ReadOnlyParameter('camera_id', 'A', str),
Parameter('record_framerate', 25, float),
Parameter('shutter_deg', 180, float),
Parameter('shutter_us', 20000, float, evaluator=ShutterUSCalc),
Parameter('iso', 800, int),
Parameter('filters', '', str),
ReadOnlyParameter('reel_id', '001', str),
ReadOnlyParameter('clip_id', '001', str),
Parameter('take_no', '3', str),
ReadOnlyParameter('record_date', '', str),
ReadOnlyParameter('record_time', '', str),
ReadOnlyParameter('start_absolute_timecode', '', str),
Parameter('frames', 0, int),
Parameter('rating', 0, int),
Parameter('circle', False, bool),
Parameter('script_notes', '', str),
Parameter('camera_notes', '', str),
Parameter('edit_notes', '', str),
Parameter('post_notes', '', str),
# lens & rig parameters
ReadOnlyParameter('lens_description', '', str),
ReadOnlyParameter('focal_length_mm', 35, float),
ReadOnlyParameter('aperture', 2.0, float),
ReadOnlyParameter('aperture_text', '2.0', str),
ReadOnlyParameter('focus_distance_m', 5.0, float),
Parameter('dof_near_m', 0, float, evaluator=DofNearCalc),
Parameter('dof_far_m', 0, float, evaluator=DofFarCalc),
Parameter('dof_total_m', 0, float, evaluator=DofTotalCalc),
Parameter('fov_horizontal_deg', 0, float, evaluator=FovHorizontalDegCalc),
Parameter('fov_vertical_deg', 0, float, evaluator=FovVerticalDegCalc),
Parameter('fov_diagonal_deg', 0, float, evaluator=FovDiagonalDegCalc),
Parameter('baseline_mm', 80, float),
Parameter('convergence_deg', 0, float, evaluator=ConvergenceDegCalc),
Parameter('convergence_px', 0, float, evaluator=ConvergencePxCalc),
# scene
Parameter('distance_near_m', 2, float),
Parameter('distance_screen_m', 2, float),
Parameter('distance_far_m', 6, float),
Parameter('distance_object1_m', 0, float),
Parameter('distance_object2_m', 0, float),
Parameter('description_near', '', str),
Parameter('description_screen', '', str),
Parameter('description_far', '', str),
Parameter('description_object1', '', str),
Parameter('description_object2', '', str),
Parameter('parallax_near_percent', 0, float, evaluator=ParallaxNearPercentCalc),
Parameter('parallax_screen_percent', 0, float),
Parameter('parallax_far_percent', 0, float, evaluator=ParallaxFarPercentCalc),
Parameter('parallax_object1_percent', 0, float, evaluator=ParallaxObject1PercentCalc),
Parameter('parallax_object2_percent', 0, float, evaluator=ParallaxObject2PercentCalc),
Parameter('parallax_near_mm', 0, float, evaluator=ParallaxNearMMCalc),
Parameter('parallax_screen_mm', 0, float),
Parameter('parallax_far_mm', 0, float, evaluator=ParallaxFarMMCalc),
Parameter('parallax_object1_mm', 0, float, evaluator=ParallaxObject1MMCalc),
Parameter('parallax_object2_mm', 0, float, evaluator=ParallaxObject2MMCalc),
Parameter('real_width_near_m', 0, float, evaluator=RealWidthNearCalc),
Parameter('real_height_near_m', 0, float, evaluator=RealHeightNearCalc),
Parameter('real_width_screen_m', 0, float, evaluator=RealWidthScreenCalc),
Parameter('real_height_screen_m', 0, float, evaluator=RealHeightScreenCalc),
Parameter('real_width_far_m', 0, float, evaluator=RealHeightFarCalc),
Parameter('real_height_far_m', 0, float, evaluator=RealHeightFarCalc),
Parameter('real_width_object1_m', 0, float, evaluator=RealWidthObject1Calc),
Parameter('real_height_object1_m', 0, float, evaluator=RealHeightObject1Calc),
Parameter('real_width_object2_m', 0, float, evaluator=RealWidthObject2Calc),
Parameter('real_height_object2_m', 0, float, evaluator=RealHeightObject2Calc),
# camera
ReadOnlyParameter('stereoscopic_set', True, bool),
ReadOnlyParameter('stereo_setup', 'C', str),
ReadOnlyParameter('camera_type', 'RED Mysterium-X', str),
ReadOnlyParameter('sensor_width_mm', 27.7, float),
ReadOnlyParameter('sensor_width_px', 5120, int),
ReadOnlyParameter('sensor_height_mm', 14.6, float),
ReadOnlyParameter('sensor_height_px', 2700, int),
Parameter('frame_format', '4K', str),
ReadOnlyParameter('frame_width_mm', 0, float, evaluator=FrameWidthMMCalc),
ReadOnlyParameter('frame_width_px', 4096, int),
ReadOnlyParameter('frame_height_mm', 0, float, evaluator=FrameHeightMMCalc),
ReadOnlyParameter('frame_height_px', 2160, int),
ReadOnlyParameter('frame_diagonal_mm', 0, float, evaluator=FrameDiagonalMMCalc),
ReadOnlyParameter('frame_horizontal_crop', 0, float, evaluator=FrameHorizontalCropCalc),
ReadOnlyParameter('frame_vertical_crop', 0, float, evaluator=FrameVerticalCropCalc),
ReadOnlyParameter('frame_diagonal_crop', 0, float, evaluator=FrameDiagonalCropCalc),
Parameter('coc_px', 2, float),
Parameter('coc_um', 0, float, evaluator=CocUmCalc),
ReadOnlyParameter('record_state', 0, int),
# integration
Parameter('rig_controller_url', '', str),
ReadOnlyParameter('aladin_module_enable', False, bool),
ReadOnlyParameter('aladin_module_status', False, bool),
ReadOnlyParameter('aladin_f_mode', 2, int),
ReadOnlyParameter('aladin_i_mode', 2, int),
ReadOnlyParameter('aladin_z_mode', 2, int),
ReadOnlyParameter('aladin_c_mode', 2, int),
ReadOnlyParameter('aladin_ia_mode', 1, int),
ReadOnlyParameter('red_module_enable', True, bool),
ReadOnlyParameter('red_module_status', False, bool),
ReadOnlyParameter('phantom_module_enable', False, bool),
ReadOnlyParameter('camera_center_hostname', '100.10.10.101', str),
ReadOnlyParameter('camera_left_hostname', '100.10.10.101', str),
ReadOnlyParameter('camera_right_hostname', '100.10.10.102', str),
# screen
Parameter('screen_type', 'TV 50-inch', str),
Parameter('screen_width_m', 1.08, float),
Parameter('screen_height_m', 0.67, float),
Parameter('screen_distance_n', 2, float),
Parameter('screen_distance_m', 0, float, evaluator=ScreenDistanceCalc),
Parameter('interpupillary_distance_mm', 65, float),
Parameter('spectator_fov_horizontal_deg', 0, float,
evaluator=SpectatorFovHorizontalDegCalc),
Parameter('perceived_position_near_percent', 0, float,
evaluator=PerceivedPositionNearPercCalc),
Parameter('perceived_position_screen_percent', 0, float,
evaluator=PerceivedPositionScreenPercCalc),
Parameter('perceived_position_far_percent', 0, float,
evaluator=PerceivedPositionFarPercCalc),
Parameter('perceived_position_object1_percent', 0,
float, evaluator=PerceivedPositionObject1PercCalc),
Parameter('perceived_position_object2_percent', 0, float,
evaluator=PerceivedPositionObject2PercCalc),
Parameter('perceived_position_near_m', 0, float,
evaluator=PerceivedPositionNearMCalc),
Parameter('perceived_position_screen_m', 0, float,
evaluator=PerceivedPositionScreenMCalc),
Parameter('perceived_position_far_m', 0, float,
evaluator=PerceivedPositionFarMCalc),
Parameter('perceived_position_object1_m', 0, float,
evaluator=PerceivedPositionObject1MCalc),
Parameter('perceived_position_object2_m', 0, float,
evaluator=PerceivedPositionObject2MCalc),
# project
Parameter('production_name', '', str),
Parameter('director', '', str),
Parameter('director_of_photography', '', str),
Parameter('camera_operator', '', str),
Parameter('stereographer', '', str),
ReadOnlyParameter('copyright', '', str),
ReadOnlyParameter('project_framerate', 25, float),
]
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gated Recurrent Unit layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
import uuid
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine import base_layer
from keras.engine.input_spec import InputSpec
from keras.layers.rnn import gru_lstm_utils
from keras.layers.rnn import rnn_utils
from keras.layers.rnn.base_rnn import RNN
from keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from keras.utils import tf_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export('keras.layers.GRUCell', v1=[])
class GRUCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
"""Cell class for the GRU layer.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.GRU` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state from
the previous time step. For timestep 0, the initial state provided by user
will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
reset_after=True,
**kwargs):
if units < 0:
raise ValueError(f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.')
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 2)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
default_caching_device = rnn_utils.caching_device(self)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] if tf.nest.is_nested(
states) else states # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = tf.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = backend.dot(inputs_z, self.kernel[:, :self.units])
x_r = backend.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = backend.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = backend.bias_add(x_z, input_bias[:self.units])
x_r = backend.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = backend.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = backend.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = backend.dot(
h_tm1_r, self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = backend.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = backend.bias_add(
recurrent_r, recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = backend.dot(
h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = backend.bias_add(
recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = backend.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = backend.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = tf.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = backend.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = backend.dot(
h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z, recurrent_r, recurrent_h = tf.split(
matrix_inner, [self.units, self.units, -1], axis=-1)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1, self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
new_state = [h] if tf.nest.is_nested(states) else h
return h, new_state
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
config.update(rnn_utils.config_for_enable_caching_device(self))
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return rnn_utils.generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype)
@keras_export('keras.layers.GRU', v1=[])
class GRU(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
"""Gated Recurrent Unit - Cho et al. 2014.
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based on
[v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden
state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `reset_after=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> gru = tf.keras.layers.GRU(4)
>>> output = gru(inputs)
>>> print(output.shape)
(32, 4)
>>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector. Default:
`None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[samples, timesteps]` indicating whether
a given timestep should be masked (optional, defaults to `None`).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional, defaults to `None`).
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, defaults to `None` which causes creation
of zero-filled initial state tensors).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
reset_after=True,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self._return_runtime = kwargs.pop('return_runtime', False)
implementation = kwargs.pop('implementation', 2)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=2`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
time_major=time_major,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
# GPU kernel uses following setting by default and not configurable.
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, tf.tanh) and
self.recurrent_activation in (activations.sigmoid, tf.sigmoid) and
recurrent_dropout == 0 and not unroll and use_bias and
reset_after and tf.compat.v1.executing_eagerly_outside_functions())
if tf.config.list_logical_devices('GPU'):
# Only show the message when there is GPU available, user will not care
# about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warning(gru_lstm_utils.CUDNN_NOT_AVAILABLE_MSG % self.name)
if gru_lstm_utils.use_new_gru_lstm_impl():
self._defun_wrapper = gru_lstm_utils.DefunWrapper(
time_major, go_backwards, 'gru')
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = (row_lengths is not None)
self._validate_args_if_ragged(is_ragged_input, mask)
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self._could_use_gpu_kernel:
kwargs = {'training': training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(cell_inputs, cell_states):
return self.cell(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
# This is a dummy tensor for testing purpose.
runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN)
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask, row_lengths)
if self.stateful:
updates = [tf.compat.v1.assign(self.states[0],
tf.cast(states[0], self.states[0].dtype))]
self.add_update(updates)
if self.return_sequences:
output = backend.maybe_convert_to_ragged(
is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
config.update(rnn_utils.config_for_enable_caching_device(self.cell))
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _defun_gru_call(self, inputs, initial_state, training, mask,
sequence_lengths):
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
if gru_lstm_utils.use_new_gru_lstm_impl():
gru_kwargs = {
'inputs':
inputs,
'init_h':
gru_lstm_utils.read_variable_value(initial_state[0]),
'kernel':
gru_lstm_utils.read_variable_value(self.cell.kernel),
'recurrent_kernel':
gru_lstm_utils.read_variable_value(self.cell.recurrent_kernel),
'bias':
gru_lstm_utils.read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
sequence_lengths,
'zero_output_for_mask':
self.zero_output_for_mask
}
(last_output, outputs, new_h,
runtime) = self._defun_wrapper.defun_layer(**gru_kwargs)
else:
gpu_gru_kwargs = {
'inputs':
inputs,
'init_h':
gru_lstm_utils.read_variable_value(initial_state[0]),
'kernel':
gru_lstm_utils.read_variable_value(self.cell.kernel),
'recurrent_kernel':
gru_lstm_utils.read_variable_value(self.cell.recurrent_kernel),
'bias':
gru_lstm_utils.read_variable_value(self.cell.bias),
'mask':
mask,
'time_major':
self.time_major,
'go_backwards':
self.go_backwards,
'sequence_lengths':
sequence_lengths
}
normal_gru_kwargs = gpu_gru_kwargs.copy()
normal_gru_kwargs.update({
'zero_output_for_mask': self.zero_output_for_mask,
})
if tf.executing_eagerly():
device_type = gru_lstm_utils.get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == gru_lstm_utils.GPU_DEVICE_NAME or
(device_type is None and tf.config.list_logical_devices('GPU')))
and
(mask is None or
gru_lstm_utils.is_cudnn_supported_inputs(mask, self.time_major)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
last_output, outputs, new_h, runtime = gpu_gru(**gpu_gru_kwargs)
else:
last_output, outputs, new_h, runtime = standard_gru(
**normal_gru_kwargs)
else:
last_output, outputs, new_h, runtime = gru_with_backend_selection(
**normal_gru_kwargs)
states = [new_h]
return last_output, outputs, runtime, states
def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask,
time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""GRU with standard kernel implementation.
This implementation can be run on all types of hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the cuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since cuDNN implementation does not support that.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. The bias contains the
combined input_bias and recurrent_bias.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False` entry
indicates that the corresponding timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
input_bias, recurrent_bias = tf.unstack(bias)
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0]
# inputs projected by all gate matrices at once
matrix_x = backend.dot(cell_inputs, kernel)
matrix_x = backend.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = tf.split(matrix_x, 3, axis=1)
# hidden state projected by all gate matrices at once
matrix_inner = backend.dot(h_tm1, recurrent_kernel)
matrix_inner = backend.bias_add(matrix_inner, recurrent_bias)
recurrent_z, recurrent_r, recurrent_h = tf.split(matrix_inner, 3, axis=1)
z = tf.sigmoid(x_z + recurrent_z)
r = tf.sigmoid(x_r + recurrent_r)
hh = tf.tanh(x_h + r * recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
last_output, outputs, new_states = backend.rnn(
step,
inputs, [init_h],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=sequence_lengths
if sequence_lengths is not None else timesteps,
zero_output_for_mask=zero_output_for_mask)
return last_output, outputs, new_states[0], gru_lstm_utils.runtime(
gru_lstm_utils.RUNTIME_CPU)
def gpu_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major,
go_backwards, sequence_lengths):
"""GRU with cuDNN implementation which is only available for GPU."""
if mask is not None:
sequence_lengths = gru_lstm_utils.calculate_sequence_by_mask(
mask, time_major)
if not time_major and sequence_lengths is None:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h, cuDNN expects one more dim of num_layers before or after batch
# dim for time major or batch major inputs respectively
init_h = tf.expand_dims(init_h, axis=seq_axis)
weights = tf.split(kernel, 3, axis=1)
weights += tf.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flat it into
# (6 * units)
bias = tf.split(backend.flatten(bias), 6)
if tf.sysconfig.get_build_info()['is_cuda_build']:
# Note that the gate order for cuDNN is different from the canonical format.
# canonical format is [z, r, h], whereas cuDNN is [r, z, h]. The swap need
# to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = gru_lstm_utils.canonical_to_params(
weights=weights,
biases=bias,
shape=tf.constant([-1]),
transpose_weights=True)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = tf.reverse_sequence(
inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(
input=inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode='gru',
sequence_lengths=sequence_lengths,
time_major=time_major)
if go_backwards:
outputs = tf.reverse_sequence(
outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis)
outputs = tf.reverse(outputs, axis=[seq_axis])
else:
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = tf.reverse(inputs, axis=[0])
outputs, h, _, _ = tf.raw_ops.CudnnRNN(
input=inputs, input_h=init_h, input_c=0, params=params,
is_training=True, rnn_mode='gru')
last_output = outputs[-1]
if not time_major and sequence_lengths is None:
outputs = tf.transpose(outputs, perm=[1, 0, 2])
h = tf.squeeze(h, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the previous
# output for t-1, so that in the return_sequence=False case, user can quickly
# get the final effect output instead just 0s at the last timestep.
# In order to mimic the default keras behavior, we copy the final h state as
# the last_output, since it is numerically same as the output.
if sequence_lengths is not None:
last_output = h
return last_output, outputs, h, gru_lstm_utils.runtime(
gru_lstm_utils.RUNTIME_GPU)
def gru_with_backend_selection(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Call the GRU with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the corresponding
timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable length
input, such as ragged tensors. If the input has a fixed timestep size,
this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
Returns:
List of output tensors, same as standard_gru.
"""
params = {
'inputs': inputs,
'init_h': init_h,
'kernel': kernel,
'recurrent_kernel': recurrent_kernel,
'bias': bias,
'mask': mask,
'time_major': time_major,
'go_backwards': go_backwards,
'sequence_lengths': sequence_lengths,
'zero_output_for_mask': zero_output_for_mask,
}
def gpu_gru_with_fallback(inputs, init_h, kernel, recurrent_kernel, bias,
mask, time_major, go_backwards, sequence_lengths,
zero_output_for_mask):
"""Use cuDNN kernel when mask is none or strictly right padded."""
if mask is None:
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def cudnn_gru_fn():
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths)
def standard_gru_fn():
return standard_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask)
return tf.cond(
gru_lstm_utils.is_cudnn_supported_inputs(mask, time_major),
true_fn=cudnn_gru_fn,
false_fn=standard_gru_fn)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(last_output, outputs, new_h,
runtime) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME:
lambda: standard_gru(**params),
gru_lstm_utils.GPU_DEVICE_NAME:
lambda: gpu_gru_with_fallback(**params)
}, lambda: standard_gru(**params))
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple GRU layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = 'gru_' + str(uuid.uuid4())
supportive_attribute = {
'time_major': time_major,
'go_backwards': go_backwards,
}
defun_standard_gru = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_gru,
supportive_attribute)
defun_gpu_gru = gru_lstm_utils.generate_defun_backend(
api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_gru_with_fallback,
supportive_attribute)
# Call the normal GRU impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, runtime = defun_standard_gru(**params)
gru_lstm_utils.function_register(defun_gpu_gru, **params)
return last_output, outputs, new_h, runtime
| |
import asyncio
from torba.testcase import AsyncioTestCase
from tests import dht_mocks
from lbry.dht import constants
from lbry.dht.node import Node
from lbry.dht.peer import PeerManager
expected_ranges = [
(
0,
2462625387274654950767440006258975862817483704404090416746768337765357610718575663213391640930307227550414249394176
),
(
2462625387274654950767440006258975862817483704404090416746768337765357610718575663213391640930307227550414249394176,
4925250774549309901534880012517951725634967408808180833493536675530715221437151326426783281860614455100828498788352
),
(
4925250774549309901534880012517951725634967408808180833493536675530715221437151326426783281860614455100828498788352,
9850501549098619803069760025035903451269934817616361666987073351061430442874302652853566563721228910201656997576704
),
(
9850501549098619803069760025035903451269934817616361666987073351061430442874302652853566563721228910201656997576704,
19701003098197239606139520050071806902539869635232723333974146702122860885748605305707133127442457820403313995153408
),
(
19701003098197239606139520050071806902539869635232723333974146702122860885748605305707133127442457820403313995153408,
39402006196394479212279040100143613805079739270465446667948293404245721771497210611414266254884915640806627990306816
)
]
class TestRouting(AsyncioTestCase):
async def test_fill_one_bucket(self):
loop = asyncio.get_event_loop()
peer_addresses = [
(constants.generate_id(1), '1.2.3.1'),
(constants.generate_id(2), '1.2.3.2'),
(constants.generate_id(3), '1.2.3.3'),
(constants.generate_id(4), '1.2.3.4'),
(constants.generate_id(5), '1.2.3.5'),
(constants.generate_id(6), '1.2.3.6'),
(constants.generate_id(7), '1.2.3.7'),
(constants.generate_id(8), '1.2.3.8'),
(constants.generate_id(9), '1.2.3.9'),
]
with dht_mocks.mock_network_loop(loop):
nodes = {
i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333, address)
for i, (node_id, address) in enumerate(peer_addresses)
}
node_1 = nodes[0]
contact_cnt = 0
for i in range(1, len(peer_addresses)):
self.assertEqual(len(node_1.protocol.routing_table.get_peers()), contact_cnt)
node = nodes[i]
peer = node_1.protocol.peer_manager.get_kademlia_peer(
node.protocol.node_id, node.protocol.external_ip,
udp_port=node.protocol.udp_port
)
added = await node_1.protocol._add_peer(peer)
self.assertEqual(True, added)
contact_cnt += 1
self.assertEqual(len(node_1.protocol.routing_table.get_peers()), 8)
self.assertEqual(node_1.protocol.routing_table.buckets_with_contacts(), 1)
for node in nodes.values():
node.protocol.stop()
async def test_split_buckets(self):
loop = asyncio.get_event_loop()
peer_addresses = [
(constants.generate_id(1), '1.2.3.1'),
]
for i in range(2, 200):
peer_addresses.append((constants.generate_id(i), f'1.2.3.{i}'))
with dht_mocks.mock_network_loop(loop):
nodes = {
i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333, address)
for i, (node_id, address) in enumerate(peer_addresses)
}
node_1 = nodes[0]
for i in range(1, len(peer_addresses)):
node = nodes[i]
peer = node_1.protocol.peer_manager.get_kademlia_peer(
node.protocol.node_id, node.protocol.external_ip,
udp_port=node.protocol.udp_port
)
# set all of the peers to good (as to not attempt pinging stale ones during split)
node_1.protocol.peer_manager.report_last_replied(peer.address, peer.udp_port)
node_1.protocol.peer_manager.report_last_replied(peer.address, peer.udp_port)
await node_1.protocol._add_peer(peer)
# check that bucket 0 is always the one covering the local node id
self.assertEqual(True, node_1.protocol.routing_table.buckets[0].key_in_range(node_1.protocol.node_id))
self.assertEqual(40, len(node_1.protocol.routing_table.get_peers()))
self.assertEqual(len(expected_ranges), len(node_1.protocol.routing_table.buckets))
covered = 0
for (expected_min, expected_max), bucket in zip(expected_ranges, node_1.protocol.routing_table.buckets):
self.assertEqual(expected_min, bucket.range_min)
self.assertEqual(expected_max, bucket.range_max)
covered += bucket.range_max - bucket.range_min
self.assertEqual(2**384, covered)
for node in nodes.values():
node.stop()
# from binascii import hexlify, unhexlify
#
# from twisted.trial import unittest
# from twisted.internet import defer
# from lbry.dht import constants
# from lbry.dht.routingtable import TreeRoutingTable
# from lbry.dht.contact import ContactManager
# from lbry.dht.distance import Distance
# from lbry.utils import generate_id
#
#
# class FakeRPCProtocol:
# """ Fake RPC protocol; allows lbry.dht.contact.Contact objects to "send" RPCs """
# def sendRPC(self, *args, **kwargs):
# return defer.succeed(None)
#
#
# class TreeRoutingTableTest(unittest.TestCase):
# """ Test case for the RoutingTable class """
# def setUp(self):
# self.contact_manager = ContactManager()
# self.nodeID = generate_id(b'node1')
# self.protocol = FakeRPCProtocol()
# self.routingTable = TreeRoutingTable(self.nodeID)
#
# def test_distance(self):
# """ Test to see if distance method returns correct result"""
# d = Distance(bytes((170,) * 48))
# result = d(bytes((85,) * 48))
# expected = int(hexlify(bytes((255,) * 48)), 16)
# self.assertEqual(result, expected)
#
# @defer.inlineCallbacks
# def test_add_contact(self):
# """ Tests if a contact can be added and retrieved correctly """
# # Create the contact
# contact_id = generate_id(b'node2')
# contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
# # Now add it...
# yield self.routingTable.addContact(contact)
# # ...and request the closest nodes to it (will retrieve it)
# closest_nodes = self.routingTable.findCloseNodes(contact_id)
# self.assertEqual(len(closest_nodes), 1)
# self.assertIn(contact, closest_nodes)
#
# @defer.inlineCallbacks
# def test_get_contact(self):
# """ Tests if a specific existing contact can be retrieved correctly """
# contact_id = generate_id(b'node2')
# contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
# # Now add it...
# yield self.routingTable.addContact(contact)
# # ...and get it again
# same_contact = self.routingTable.getContact(contact_id)
# self.assertEqual(contact, same_contact, 'getContact() should return the same contact')
#
# @defer.inlineCallbacks
# def test_add_parent_node_as_contact(self):
# """
# Tests the routing table's behaviour when attempting to add its parent node as a contact
# """
# # Create a contact with the same ID as the local node's ID
# contact = self.contact_manager.make_contact(self.nodeID, '127.0.0.1', 9182, self.protocol)
# # Now try to add it
# yield self.routingTable.addContact(contact)
# # ...and request the closest nodes to it using FIND_NODE
# closest_nodes = self.routingTable.findCloseNodes(self.nodeID, constants.k)
# self.assertNotIn(contact, closest_nodes, 'Node added itself as a contact')
#
# @defer.inlineCallbacks
# def test_remove_contact(self):
# """ Tests contact removal """
# # Create the contact
# contact_id = generate_id(b'node2')
# contact = self.contact_manager.make_contact(contact_id, '127.0.0.1', 9182, self.protocol)
# # Now add it...
# yield self.routingTable.addContact(contact)
# # Verify addition
# self.assertEqual(len(self.routingTable._buckets[0]), 1, 'Contact not added properly')
# # Now remove it
# self.routingTable.removeContact(contact)
# self.assertEqual(len(self.routingTable._buckets[0]), 0, 'Contact not removed properly')
#
# @defer.inlineCallbacks
# def test_split_bucket(self):
# """ Tests if the the routing table correctly dynamically splits k-buckets """
# self.assertEqual(self.routingTable._buckets[0].rangeMax, 2**384,
# 'Initial k-bucket range should be 0 <= range < 2**384')
# # Add k contacts
# for i in range(constants.k):
# node_id = generate_id(b'remote node %d' % i)
# contact = self.contact_manager.make_contact(node_id, '127.0.0.1', 9182, self.protocol)
# yield self.routingTable.addContact(contact)
#
# self.assertEqual(len(self.routingTable._buckets), 1,
# 'Only k nodes have been added; the first k-bucket should now '
# 'be full, but should not yet be split')
# # Now add 1 more contact
# node_id = generate_id(b'yet another remote node')
# contact = self.contact_manager.make_contact(node_id, '127.0.0.1', 9182, self.protocol)
# yield self.routingTable.addContact(contact)
# self.assertEqual(len(self.routingTable._buckets), 2,
# 'k+1 nodes have been added; the first k-bucket should have been '
# 'split into two new buckets')
# self.assertNotEqual(self.routingTable._buckets[0].rangeMax, 2**384,
# 'K-bucket was split, but its range was not properly adjusted')
# self.assertEqual(self.routingTable._buckets[1].rangeMax, 2**384,
# 'K-bucket was split, but the second (new) bucket\'s '
# 'max range was not set properly')
# self.assertEqual(self.routingTable._buckets[0].rangeMax,
# self.routingTable._buckets[1].rangeMin,
# 'K-bucket was split, but the min/max ranges were '
# 'not divided properly')
#
# @defer.inlineCallbacks
# def test_full_split(self):
# """
# Test that a bucket is not split if it is full, but the new contact is not closer than the kth closest contact
# """
#
# self.routingTable._parentNodeID = bytes(48 * b'\xff')
#
# node_ids = [
# b"100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
# b"010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
# ]
#
# # Add k contacts
# for nodeID in node_ids:
# # self.assertEquals(nodeID, node_ids[i].decode('hex'))
# contact = self.contact_manager.make_contact(unhexlify(nodeID), '127.0.0.1', 9182, self.protocol)
# yield self.routingTable.addContact(contact)
# self.assertEqual(len(self.routingTable._buckets), 2)
# self.assertEqual(len(self.routingTable._buckets[0]._contacts), 8)
# self.assertEqual(len(self.routingTable._buckets[1]._contacts), 2)
#
# # try adding a contact who is further from us than the k'th known contact
# nodeID = b'020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
# nodeID = unhexlify(nodeID)
# contact = self.contact_manager.make_contact(nodeID, '127.0.0.1', 9182, self.protocol)
# self.assertFalse(self.routingTable._shouldSplit(self.routingTable._kbucketIndex(contact.id), contact.id))
# yield self.routingTable.addContact(contact)
# self.assertEqual(len(self.routingTable._buckets), 2)
# self.assertEqual(len(self.routingTable._buckets[0]._contacts), 8)
# self.assertEqual(len(self.routingTable._buckets[1]._contacts), 2)
# self.assertNotIn(contact, self.routingTable._buckets[0]._contacts)
# self.assertNotIn(contact, self.routingTable._buckets[1]._contacts)
#
# class KeyErrorFixedTest(unittest.TestCase):
# """ Basic tests case for boolean operators on the Contact class """
#
# def setUp(self):
# own_id = (2 ** constants.key_bits) - 1
# # carefully chosen own_id. here's the logic
# # we want a bunch of buckets (k+1, to be exact), and we want to make sure own_id
# # is not in bucket 0. so we put own_id at the end so we can keep splitting by adding to the
# # end
#
# self.table = lbry.dht.routingtable.OptimizedTreeRoutingTable(own_id)
#
# def fill_bucket(self, bucket_min):
# bucket_size = lbry.dht.constants.k
# for i in range(bucket_min, bucket_min + bucket_size):
# self.table.addContact(lbry.dht.contact.Contact(long(i), '127.0.0.1', 9999, None))
#
# def overflow_bucket(self, bucket_min):
# bucket_size = lbry.dht.constants.k
# self.fill_bucket(bucket_min)
# self.table.addContact(
# lbry.dht.contact.Contact(long(bucket_min + bucket_size + 1),
# '127.0.0.1', 9999, None))
#
# def testKeyError(self):
#
# # find middle, so we know where bucket will split
# bucket_middle = self.table._buckets[0].rangeMax / 2
#
# # fill last bucket
# self.fill_bucket(self.table._buckets[0].rangeMax - lbry.dht.constants.k - 1)
# # -1 in previous line because own_id is in last bucket
#
# # fill/overflow 7 more buckets
# bucket_start = 0
# for i in range(0, lbry.dht.constants.k):
# self.overflow_bucket(bucket_start)
# bucket_start += bucket_middle / (2 ** i)
#
# # replacement cache now has k-1 entries.
# # adding one more contact to bucket 0 used to cause a KeyError, but it should work
# self.table.addContact(
# lbry.dht.contact.Contact(long(lbry.dht.constants.k + 2), '127.0.0.1', 9999, None))
#
# # import math
# # print ""
# # for i, bucket in enumerate(self.table._buckets):
# # print "Bucket " + str(i) + " (2 ** " + str(
# # math.log(bucket.rangeMin, 2) if bucket.rangeMin > 0 else 0) + " <= x < 2 ** "+str(
# # math.log(bucket.rangeMax, 2)) + ")"
# # for c in bucket.getContacts():
# # print " contact " + str(c.id)
# # for key, bucket in self.table._replacementCache.items():
# # print "Replacement Cache for Bucket " + str(key)
# # for c in bucket:
# # print " contact " + str(c.id)
| |
##
# \file reconstruct_volume.py
#
# \author Michael Ebner (michael.ebner.14@ucl.ac.uk)
# \date Nov 2016
#
import SimpleITK as sitk
import numpy as np
import os
import niftymic.base.stack as st
import niftymic.reconstruction.admm_solver as admm
import niftymic.reconstruction.tikhonov_solver as tk
import niftymic.utilities.brain_stripping as bs
import niftymic.utilities.intensity_correction as ic
import niftymic.base.psf as psf
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
import volumetricreconstructionfromprintedfilms.utilities.input_argparser as inargs
import volumetricreconstructionfromprintedfilms.utilities.utilities as utils
# noinspection PyPep8Naming
def main():
time_start = ph.start_timing()
input_parser = inargs.InputArgparser(
description="Based on the estimated transformations obtained by "
"'correct_motion.py' a volumetric representation is "
"reconstructed. An additional in-plane denoising "
"step is performed for improved visual appearance",
)
input_parser.add_stack(required=True)
input_parser.add_reference(required=True)
input_parser.add_dir_input(required=True)
input_parser.add_dir_output(
required=True,
help="Output directory to store volumetric "
"reconstruction results")
input_parser.add_regularization(default="TV")
input_parser.add_alpha(
default=5 # TV
# default=0.3 # TK1
)
input_parser.add_option(
option_string="--intensity-correction",
type=int,
default=1,
help="Turn on/off intensity correction.")
input_parser.add_option(
option_string="--additional-frame",
type=float,
default=0,
help="Specify extra surrounding frame (in mm) to be added in the "
"in-plane dimensions for the reconstructed image. "
"The slice-thickness remains unchanged.")
input_parser.add_rho(default=0.5)
input_parser.add_iterations(default=10)
input_parser.add_iter_max(default=10)
input_parser.add_sigma2(default=0.25)
input_parser.add_resolution_processing(default=0.25)
input_parser.add_resolution_reconstruction(default=1.)
input_parser.add_verbose(default=False)
args = input_parser.parse_args()
input_parser.print_arguments(args)
# ---------------------------------------------------------------------
# Read reference image
ph.print_title("Read Data")
ph.print_info("Read reference image")
reference_image = st.Stack.from_filename(args.reference)
# ---------------------------------------------------------------------
# Read motion correction results
ph.print_info("Read motion correction results")
slice_transforms_sitk, stack_corrected = \
utils.read_results_motion_correction(args.dir_input)
stack0 = st.Stack.from_filename(args.stack)
# Extract filename without filename extension
filename_stack = os.path.basename(args.stack).split(".")[0]
# ---------------------------------------------------------------------
# Define resampling and reconstruction grids
ph.print_info("Define resampling and reconstruction grids")
slice_thickness = stack0.sitk.GetSpacing()[2]
resampling_grid_sitk = 0 * sitkh.get_downsampled_sitk_image(
stack_corrected.sitk,
new_spacing=(args.resolution_processing,
args.resolution_processing,
slice_thickness)
)
# Get enlarged FOV
resampling_grid_sitk = sitkh.get_altered_field_of_view_sitk_image(
resampling_grid_sitk,
boundary_i=args.additional_frame,
boundary_j=args.additional_frame,
boundary_k=0,
unit="mm")
recon_grid_sitk = 0 * sitkh.get_downsampled_sitk_image(
resampling_grid_sitk,
new_spacing=(args.resolution_reconstruction,
args.resolution_reconstruction,
slice_thickness))
recon_grid = st.Stack.from_sitk_image(recon_grid_sitk)
# ---------------------------------------------------------------------
# Correct for motion
ph.print_title("Correct for motion")
stack = st.Stack.from_stack(stack0)
stack.update_motion_correction_of_slices(slice_transforms_sitk)
# Default pixel value for resampling
# Rationale: Due to high background noise, a zero pixel value would not be
# suitable
# noinspection PyTypeChecker
default_pixel_value = np.percentile(
np.array(sitk.GetArrayFromImage(stack.sitk)), 0.1)
# Write results
i = len(slice_transforms_sitk) / 2 # select midslice trafo for alignment
stack0.update_motion_correction(slice_transforms_sitk[i])
stack_naivelyscaled_recon_grid = \
stack0.get_resampled_stack_from_slices(
resampling_grid=recon_grid_sitk,
interpolator="BSpline",
default_pixel_value=default_pixel_value)
stack_naivelyscaled_recon_grid.set_filename(
filename_stack + "_recon-space")
stack_naivelyscaled_recon_grid.write(args.dir_output)
stack_motioncorrected_recon_grid = \
stack.get_resampled_stack_from_slices(
resampling_grid=recon_grid_sitk,
interpolator="BSpline",
default_pixel_value=default_pixel_value)
stack_motioncorrected_recon_grid.set_filename(
filename_stack + "_motion-corrected")
stack_motioncorrected_recon_grid.write(args.dir_output)
# verbose:
if args.verbose:
sitkh.show_stacks([
stack_naivelyscaled_recon_grid,
stack_motioncorrected_recon_grid,
reference_image.get_resampled_stack(
resampling_grid=recon_grid_sitk,
interpolator="BSpline")
])
# ---------------------------------------------------------------------
# Get brain mask for reference image
ph.print_title("Get brain mask for reference image")
brain_stripping = bs.BrainStripping()
brain_stripping.set_input_image_sitk(reference_image.sitk)
brain_stripping.run()
reference_image_sitk_mask = brain_stripping.get_brain_mask_sitk()
reference_image = st.Stack.from_sitk_image(
reference_image.sitk,
reference_image.get_filename(),
reference_image_sitk_mask)
# ---------------------------------------------------------------------
# Resampling to processing and reconstruction grids
ph.print_title("Resample motion-corrected stack to processing grid")
# noinspection PyTypeChecker
default_pixel_value = np.percentile(
np.array(sitk.GetArrayFromImage(stack.sitk)), 0.1)
stack_resampled = stack.get_resampled_stack_from_slices(
resampling_grid=resampling_grid_sitk,
interpolator="BSpline",
default_pixel_value=default_pixel_value)
reference_image_resampled = reference_image.get_resampled_stack(
resampling_grid=resampling_grid_sitk, interpolator="BSpline")
stack_resampled.set_filename(filename_stack + "_motion-corrected")
ph.print_title("Resample original stack to reconstruction grid")
stack0_resampled = st.Stack.from_stack(stack0)
stack0_resampled = stack0_resampled.get_resampled_stack_from_slices(
resampling_grid=recon_grid_sitk,
interpolator="BSpline",
default_pixel_value=default_pixel_value)
stack0_resampled.set_filename(filename_stack)
# ---------------------------------------------------------------------
if args.intensity_correction:
# Perform intensity correction
# sitkh.show_stacks([stack0_resampled, stack_resampled], title=["0","1"])
ph.print_title("Perform intensity correction")
intensity_correction = ic.IntensityCorrection(
stack=stack_resampled,
reference=reference_image_resampled,
use_reference_mask=True,
use_verbose=True)
intensity_correction.set_additional_stack(stack0_resampled)
intensity_correction.use_individual_slice_correction(False)
intensity_correction.run_affine_intensity_correction()
# intensity_correction.use_individual_slice_correction(False)
intensity_correction.run_lower_percentile_capping_of_stack(
percentile=25)
# intensity_correction.use_individual_slice_correction(True)
intensity_correction.run_linear_intensity_correction()
# noinspection PyPep8Naming
stack_intensityCorrected = intensity_correction.get_intensity_corrected_stack()
stack0_intensityCorrected = \
intensity_correction.get_intensity_corrected_additional_stack()
stack_intensityCorrected.set_filename(
filename_stack + "_motion-corrected-ic")
stack0_intensityCorrected.set_filename(
filename_stack + "_recon-space-ic")
else:
stack_intensityCorrected = stack_resampled
stack0_intensityCorrected = stack0_resampled
# Write results
if args.intensity_correction:
stack_naivelyscaledic_recon_grid = stack0_intensityCorrected
stack_naivelyscaledic_recon_grid.set_filename(
filename_stack + "_recon-space-ic")
stack_naivelyscaledic_recon_grid.write(args.dir_output)
stack_motioncorrectedic_recon_grid = \
stack_intensityCorrected.get_resampled_stack_from_slices(
resampling_grid=recon_grid_sitk, interpolator="BSpline")
stack_motioncorrectedic_recon_grid.set_filename(
filename_stack + "_motion-corrected-ic")
stack_motioncorrectedic_recon_grid.write(args.dir_output)
# verbose:
if args.verbose:
if args.intensity_correction:
sitkh.show_stacks(
[stack_naivelyscaled_recon_grid,
stack_motioncorrected_recon_grid,
stack_naivelyscaledic_recon_grid,
stack_motioncorrectedic_recon_grid,
reference_image_resampled,
])
# ---------------------------------------------------------------------
# Extract mask from reference
ph.print_title("Extract mask from reference")
stack_masked = stack_intensityCorrected
# ---------------------------------------------------------------------
# Perform SR step
ph.print_title("Perform In-plane Deconvolution Step")
# Deconvolution only in-plane
if args.sigma2 < 0:
# Estimate PSF automatically given original in-plane resolution
cov = psf.PSF().get_gaussian_psf_covariance_matrix_from_spacing(
stack0.sitk.GetSpacing())
cov[2, 2] = 1e-5
else:
# Use predefined covariance
cov = np.array([args.sigma2, args.sigma2, 1e-5])
if args.regularization != "TV":
volumetric_recon = tk.TikhonovSolver(
stacks=[stack_masked],
reconstruction=recon_grid,
alpha=args.alpha,
iter_max=args.iter_max,
deconvolution_mode="predefined_covariance",
predefined_covariance=cov,
x_scale=1,
)
else:
# Initial value
volumetric_recon = tk.TikhonovSolver(
stacks=[stack_masked],
reconstruction=recon_grid,
alpha=0.02,
iter_max=5,
deconvolution_mode="predefined_covariance",
predefined_covariance=cov,
x_scale=1,
)
volumetric_recon.run()
HR_volume0 = volumetric_recon.get_reconstruction()
volumetric_recon = admm.ADMMSolver(
stacks=[stack_masked],
reconstruction=HR_volume0,
alpha=args.alpha,
iter_max=args.iter_max,
deconvolution_mode="predefined_covariance",
predefined_covariance=cov,
rho=args.rho,
iterations=args.iterations,
x_scale=1,
)
volumetric_recon.run()
stack_reconstructed = volumetric_recon.get_reconstruction()
stack_reconstructed.set_filename(
volumetric_recon.get_setting_specific_filename(prefix="recon_"))
if args.verbose:
tmp = [
stack_naivelyscaled_recon_grid,
stack_motioncorrected_recon_grid,
]
if args.intensity_correction:
tmp.append(stack_naivelyscaledic_recon_grid)
tmp.append(stack_motioncorrectedic_recon_grid)
tmp.append(stack_reconstructed)
tmp.append(reference_image_resampled)
sitkh.show_stacks(tmp)
# Write results
stack_reconstructed.write(
args.dir_output,
stack.get_filename() + "_" + stack_reconstructed.get_filename())
elapsed_time = ph.stop_timing(time_start)
ph.print_title("Summary Motion Correction")
ph.print_info("Computational time: %s" % elapsed_time)
return 0
if __name__ == '__main__':
main()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The Slicer classes.
The main purpose of these classes is to have auto adjust of axes size to
the data with different layout of cuts.
"""
from __future__ import absolute_import
import numpy as np
from nipy.utils import is_iterable
from nipy.utils.skip_test import skip_if_running_nose
try:
import matplotlib as mpl
import pylab as pl
from matplotlib import transforms
except ImportError:
skip_if_running_nose('Could not import matplotlib')
# Local imports
from .coord_tools import coord_transform, get_bounds, get_mask_bounds, \
find_cut_coords
from .edge_detect import _edge_map
from . import cm
from ..datasets import VolumeImg
################################################################################
# Bugware to have transparency work OK with MPL < .99.1
if mpl.__version__ < '0.99.1':
# We wrap the lut as a callable and replace its evalution to put
# alpha to zero where the mask is true. This is what is done in
# MPL >= .99.1
from matplotlib import colors
class CMapProxy(colors.Colormap):
def __init__(self, lut):
self.__lut = lut
def __call__(self, arr, *args, **kwargs):
results = self.__lut(arr, *args, **kwargs)
if not isinstance(arr, np.ma.MaskedArray):
return results
else:
results[arr.mask, -1] = 0
return results
def __getattr__(self, attr):
# Dark magic: we are delegating any call to the lut instance
# we wrap
return self.__dict__.get(attr, getattr(self.__lut, attr))
def _xyz_order(map, affine):
img = VolumeImg(map, affine=affine, world_space='mine')
img = img.xyz_ordered(resample=True, copy=False)
map = img.get_data()
affine = img.affine
return map, affine
################################################################################
# class CutAxes
################################################################################
class CutAxes(object):
""" An MPL axis-like object that displays a cut of 3D volumes
"""
def __init__(self, ax, direction, coord):
""" An MPL axis-like object that displays a cut of 3D volumes
Parameters
==========
ax: a MPL axes instance
The axes in which the plots will be drawn
direction: {'x', 'y', 'z'}
The directions of the cut
coord: float
The coordinnate along the direction of the cut
"""
self.ax = ax
self.direction = direction
self.coord = coord
self._object_bounds = list()
def do_cut(self, map, affine):
""" Cut the 3D volume into a 2D slice
Parameters
==========
map: 3D ndarray
The 3D volume to cut
affine: 4x4 ndarray
The affine of the volume
"""
coords = [0, 0, 0]
coords['xyz'.index(self.direction)] = self.coord
x_map, y_map, z_map = [int(np.round(c)) for c in
coord_transform(coords[0],
coords[1],
coords[2],
np.linalg.inv(affine))]
if self.direction == 'y':
cut = np.rot90(map[:, y_map, :])
elif self.direction == 'x':
cut = np.rot90(map[x_map, :, :])
elif self.direction == 'z':
cut = np.rot90(map[:, :, z_map])
else:
raise ValueError('Invalid value for direction %s' %
self.direction)
return cut
def draw_cut(self, cut, data_bounds, bounding_box,
type='imshow', **kwargs):
# kwargs massaging
kwargs['origin'] = 'upper'
if mpl.__version__ < '0.99.1':
cmap = kwargs.get('cmap',
pl.cm.cmap_d[pl.rcParams['image.cmap']])
kwargs['cmap'] = CMapProxy(cmap)
if self.direction == 'y':
(xmin, xmax), (_, _), (zmin, zmax) = data_bounds
(xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box
elif self.direction == 'x':
(_, _), (xmin, xmax), (zmin, zmax) = data_bounds
(_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box
elif self.direction == 'z':
(xmin, xmax), (zmin, zmax), (_, _) = data_bounds
(xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box
else:
raise ValueError('Invalid value for direction %s' %
self.direction)
ax = self.ax
getattr(ax, type)(cut, extent=(xmin, xmax, zmin, zmax), **kwargs)
self._object_bounds.append((xmin_, xmax_, zmin_, zmax_))
ax.axis(self.get_object_bounds())
def get_object_bounds(self):
""" Return the bounds of the objects on this axes.
"""
if len(self._object_bounds) == 0:
# Nothing plotted yet
return -.01, .01, -.01, .01
xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T
xmax = max(xmaxs.max(), xmins.max())
xmin = min(xmins.min(), xmaxs.min())
ymax = max(ymaxs.max(), ymins.max())
ymin = min(ymins.min(), ymaxs.min())
return xmin, xmax, ymin, ymax
def draw_left_right(self, size, bg_color, **kwargs):
if self.direction == 'x':
return
ax = self.ax
ax.text(.1, .95, 'L',
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
ax.text(.9, .95, 'R',
transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
def draw_position(self, size, bg_color, **kwargs):
ax = self.ax
ax.text(0, 0, '%s=%i' % (self.direction, self.coord),
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
################################################################################
# class BaseSlicer
################################################################################
class BaseSlicer(object):
""" The main purpose of these class is to have auto adjust of axes size
to the data with different layout of cuts.
"""
# This actually encodes the figsize for only one axe
_default_figsize = [2.2, 2.6]
def __init__(self, cut_coords, axes=None, black_bg=False):
""" Create 3 linked axes for plotting orthogonal cuts.
Parameters
----------
cut_coords: 3 tuple of ints
The cut position, in world space.
axes: matplotlib axes object, optional
The axes that will be subdivided in 3.
black_bg: boolean, optional
If True, the background of the figure will be put to
black. If you whish to save figures with a black background,
you will need to pass "facecolor='k', edgecolor='k'" to
pylab's savefig.
"""
self._cut_coords = cut_coords
if axes is None:
axes = pl.axes((0., 0., 1., 1.))
axes.axis('off')
self.frame_axes = axes
axes.set_zorder(1)
bb = axes.get_position()
self.rect = (bb.x0, bb.y0, bb.x1, bb.y1)
self._black_bg = black_bg
self._init_axes()
@staticmethod
def find_cut_coords(data=None, affine=None, threshold=None,
cut_coords=None):
# Implement this as a staticmethod or a classmethod when
# subclassing
raise NotImplementedError
@classmethod
def init_with_figure(cls, data=None, affine=None, threshold=None,
cut_coords=None, figure=None, axes=None,
black_bg=False, leave_space=False):
cut_coords = cls.find_cut_coords(data, affine, threshold,
cut_coords)
if isinstance(axes, pl.Axes) and figure is None:
figure = axes.figure
if not isinstance(figure, pl.Figure):
# Make sure that we have a figure
figsize = cls._default_figsize[:]
# Adjust for the number of axes
figsize[0] *= len(cut_coords)
facecolor = 'k' if black_bg else 'w'
if leave_space:
figsize[0] += 3.4
figure = pl.figure(figure, figsize=figsize,
facecolor=facecolor)
else:
if isinstance(axes, pl.Axes):
assert axes.figure is figure, ("The axes passed are not "
"in the figure")
if axes is None:
axes = [0., 0., 1., 1.]
if leave_space:
axes = [0.3, 0, .7, 1.]
if is_iterable(axes):
axes = figure.add_axes(axes)
# People forget to turn their axis off, or to set the zorder, and
# then they cannot see their slicer
axes.axis('off')
return cls(cut_coords, axes, black_bg)
def title(self, text, x=0.01, y=0.99, size=15, color=None,
bgcolor=None, alpha=1, **kwargs):
""" Write a title to the view.
Parameters
----------
text: string
The text of the title
x: float, optional
The horizontal position of the title on the frame in
fraction of the frame width.
y: float, optional
The vertical position of the title on the frame in
fraction of the frame height.
size: integer, optional
The size of the title text.
color: matplotlib color specifier, optional
The color of the font of the title.
bgcolor: matplotlib color specifier, optional
The color of the background of the title.
alpha: float, optional
The alpha value for the background.
kwargs:
Extra keyword arguments are passed to matplotlib's text
function.
"""
if color is None:
color = 'k' if self._black_bg else 'w'
if bgcolor is None:
bgcolor = 'w' if self._black_bg else 'k'
self.frame_axes.text(x, y, text,
transform=self.frame_axes.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size, color=color,
bbox=dict(boxstyle="square,pad=.3",
ec=bgcolor, fc=bgcolor, alpha=alpha),
**kwargs)
def plot_map(self, map, affine, threshold=None, **kwargs):
""" Plot a 3D map in all the views.
Parameters
-----------
map: 3D ndarray
The 3D map to be plotted. If it is a masked array, only
the non-masked part will be plotted.
affine: 4x4 ndarray
The affine matrix giving the transformation from voxel
indices to world space.
threshold : a number, None, or 'auto'
If None is given, the maps are not thresholded.
If a number is given, it is used to threshold the maps:
values below the threshold are plotted as transparent.
kwargs:
Extra keyword arguments are passed to imshow.
"""
if threshold is not None:
if threshold == 0:
map = np.ma.masked_equal(map, 0, copy=False)
else:
map = np.ma.masked_inside(map, -threshold, threshold,
copy=False)
self._map_show(map, affine, type='imshow', **kwargs)
def contour_map(self, map, affine, **kwargs):
""" Contour a 3D map in all the views.
Parameters
-----------
map: 3D ndarray
The 3D map to be plotted. If it is a masked array, only
the non-masked part will be plotted.
affine: 4x4 ndarray
The affine matrix giving the transformation from voxel
indices to world space.
kwargs:
Extra keyword arguments are passed to contour.
"""
self._map_show(map, affine, type='contour', **kwargs)
def _map_show(self, map, affine, type='imshow', **kwargs):
map, affine = _xyz_order(map, affine)
data_bounds = get_bounds(map.shape, affine)
(xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
xmin, xmax, ymin, ymax, zmin, zmax
if hasattr(map, 'mask'):
not_mask = np.logical_not(map.mask)
xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \
get_mask_bounds(not_mask, affine)
if kwargs.get('vmin') is None and kwargs.get('vmax') is None:
# Avoid dealing with masked arrays: they are slow
if not np.any(not_mask):
# Everything is masked
vmin = vmax = 0
else:
masked_map = np.asarray(map)[not_mask]
vmin = masked_map.min()
vmax = masked_map.max()
if kwargs.get('vmin') is None:
kwargs['vmin'] = vmin
if kwargs.get('max') is None:
kwargs['vmax'] = vmax
else:
if not 'vmin' in kwargs:
kwargs['vmin'] = map.min()
if not 'vmax' in kwargs:
kwargs['vmax'] = map.max()
bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_)
# For each ax, cut the data and plot it
for cut_ax in self.axes.values():
try:
cut = cut_ax.do_cut(map, affine)
except IndexError:
# We are cutting outside the indices of the data
continue
cut_ax.draw_cut(cut, data_bounds, bounding_box,
type=type, **kwargs)
def edge_map(self, map, affine, color='r'):
""" Plot the edges of a 3D map in all the views.
Parameters
-----------
map: 3D ndarray
The 3D map to be plotted. If it is a masked array, only
the non-masked part will be plotted.
affine: 4x4 ndarray
The affine matrix giving the transformation from voxel
indices to world space.
color: matplotlib color: string or (r, g, b) value
The color used to display the edge map
"""
map, affine = _xyz_order(map, affine)
kwargs = dict(cmap=cm.alpha_cmap(color=color))
data_bounds = get_bounds(map.shape, affine)
# For each ax, cut the data and plot it
for cut_ax in self.axes.values():
try:
cut = cut_ax.do_cut(map, affine)
edge_mask = _edge_map(cut)
except IndexError:
# We are cutting outside the indices of the data
continue
cut_ax.draw_cut(edge_mask, data_bounds, data_bounds,
type='imshow', **kwargs)
def annotate(self, left_right=True, positions=True, size=12, **kwargs):
""" Add annotations to the plot.
Parameters
----------
left_right: boolean, optional
If left_right is True, annotations indicating which side
is left and which side is right are drawn.
positions: boolean, optional
If positions is True, annotations indicating the
positions of the cuts are drawn.
size: integer, optional
The size of the text used.
kwargs:
Extra keyword arguments are passed to matplotlib's text
function.
"""
kwargs = kwargs.copy()
if not 'color' in kwargs:
if self._black_bg:
kwargs['color'] = 'w'
else:
kwargs['color'] = 'k'
bg_color = ('k' if self._black_bg else 'w')
if left_right:
for cut_ax in self.axes.values():
cut_ax.draw_left_right(size=size, bg_color=bg_color,
**kwargs)
if positions:
for cut_ax in self.axes.values():
cut_ax.draw_position(size=size, bg_color=bg_color,
**kwargs)
################################################################################
# class OrthoSlicer
################################################################################
class OrthoSlicer(BaseSlicer):
""" A class to create 3 linked axes for plotting orthogonal
cuts of 3D maps.
Attributes
----------
axes: dictionnary of axes
The 3 axes used to plot each view.
frame_axes: axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
@staticmethod
def find_cut_coords(data=None, affine=None, threshold=None,
cut_coords=None):
if cut_coords is None:
if data is None or data is False:
cut_coords = (0, 0, 0)
else:
x_map, y_map, z_map = find_cut_coords(data,
activation_threshold=threshold)
cut_coords = coord_transform(x_map, y_map, z_map, affine)
return cut_coords
def _init_axes(self):
x0, y0, x1, y1 = self.rect
# Create our axes:
self.axes = dict()
for index, direction in enumerate(('y', 'x', 'z')):
ax = pl.axes([0.3*index*(x1-x0) + x0, y0, .3*(x1-x0), y1-y0])
ax.axis('off')
coord = self._cut_coords['xyz'.index(direction)]
cut_ax = CutAxes(ax, direction, coord)
self.axes[direction] = cut_ax
ax.set_axes_locator(self._locator)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
width_dict = dict()
cut_ax_dict = self.axes
x_ax = cut_ax_dict['x']
y_ax = cut_ax_dict['y']
z_ax = cut_ax_dict['z']
for cut_ax in cut_ax_dict.values():
bounds = cut_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# succesful. As it happens asyncroniously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
width_dict[cut_ax.ax] = (xmax - xmin)
total_width = float(sum(width_dict.values()))
for ax, width in width_dict.items():
width_dict[ax] = width/total_width*(x1 -x0)
left_dict = dict()
left_dict[y_ax.ax] = x0
left_dict[x_ax.ax] = x0 + width_dict[y_ax.ax]
left_dict[z_ax.ax] = x0 + width_dict[x_ax.ax] + width_dict[y_ax.ax]
return transforms.Bbox([[left_dict[axes], y0],
[left_dict[axes] + width_dict[axes], y1]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords: 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinnates are used.
kwargs:
Extra keyword arguments are passed to axhline
"""
if cut_coords is None:
cut_coords = self._cut_coords
x, y, z = cut_coords
kwargs = kwargs.copy()
if not 'color' in kwargs:
if self._black_bg:
kwargs['color'] = '.8'
else:
kwargs['color'] = 'k'
ax = self.axes['y'].ax
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
ax.axhline(z, **kwargs)
ax = self.axes['x'].ax
ax.axvline(y, ymin=.05, ymax=.95, **kwargs)
ax.axhline(z, xmax=.95, **kwargs)
ax = self.axes['z'].ax
ax.axvline(x, ymin=.05, ymax=.95, **kwargs)
ax.axhline(y, **kwargs)
def demo_ortho_slicer():
""" A small demo of the OrthoSlicer functionality.
"""
pl.clf()
oslicer = OrthoSlicer(cut_coords=(0, 0, 0))
from .anat_cache import _AnatCache
map, affine, _ = _AnatCache.get_anat()
oslicer.plot_map(map, affine, cmap=pl.cm.gray)
return oslicer
################################################################################
# class BaseStackedSlicer
################################################################################
class BaseStackedSlicer(BaseSlicer):
""" A class to create linked axes for plotting stacked
cuts of 3D maps.
Attributes
----------
axes: dictionnary of axes
The axes used to plot each view.
frame_axes: axes
The axes framing the whole set of views.
Notes
-----
The extent of the different axes are adjusted to fit the data
best in the viewing area.
"""
@classmethod
def find_cut_coords(cls, data=None, affine=None, threshold=None,
cut_coords=None):
if cut_coords is None:
if data is None or data is False:
bounds = ((-40, 40), (-30, 30), (-30, 75))
else:
if hasattr(data, 'mask'):
mask = np.logical_not(data.mask)
else:
# The mask will be anything that is fairly different
# from the values in the corners
edge_value = float(data[0, 0, 0] + data[0, -1, 0]
+ data[-1, 0, 0] + data[0, 0, -1]
+ data[-1, -1, 0] + data[-1, 0, -1]
+ data[0, -1, -1] + data[-1, -1, -1]
)
edge_value /= 6
mask = np.abs(data - edge_value) > .005*data.ptp()
xmin, xmax, ymin, ymax, zmin, zmax = \
get_mask_bounds(mask, affine)
bounds = (xmin, xmax), (ymin, ymax), (zmin, zmax)
lower, upper = bounds['xyz'.index(cls._direction)]
cut_coords = np.linspace(lower, upper, 10).tolist()
return cut_coords
def _init_axes(self):
x0, y0, x1, y1 = self.rect
# Create our axes:
self.axes = dict()
fraction = 1./len(self._cut_coords)
for index, coord in enumerate(self._cut_coords):
coord = float(coord)
ax = pl.axes([fraction*index*(x1-x0) + x0, y0,
fraction*(x1-x0), y1-y0])
ax.axis('off')
cut_ax = CutAxes(ax, self._direction, coord)
self.axes[coord] = cut_ax
ax.set_axes_locator(self._locator)
def _locator(self, axes, renderer):
""" The locator function used by matplotlib to position axes.
Here we put the logic used to adjust the size of the axes.
"""
x0, y0, x1, y1 = self.rect
width_dict = dict()
cut_ax_dict = self.axes
for cut_ax in cut_ax_dict.values():
bounds = cut_ax.get_object_bounds()
if not bounds:
# This happens if the call to _map_show was not
# succesful. As it happens asyncroniously (during a
# refresh of the figure) we capture the problem and
# ignore it: it only adds a non informative traceback
bounds = [0, 1, 0, 1]
xmin, xmax, ymin, ymax = bounds
width_dict[cut_ax.ax] = (xmax - xmin)
total_width = float(sum(width_dict.values()))
for ax, width in width_dict.items():
width_dict[ax] = width/total_width*(x1 -x0)
left_dict = dict()
left = float(x0)
for coord, cut_ax in sorted(cut_ax_dict.items()):
left_dict[cut_ax.ax] = left
this_width = width_dict[cut_ax.ax]
left += this_width
return transforms.Bbox([[left_dict[axes], y0],
[left_dict[axes] + width_dict[axes], y1]])
def draw_cross(self, cut_coords=None, **kwargs):
""" Draw a crossbar on the plot to show where the cut is
performed.
Parameters
----------
cut_coords: 3-tuple of floats, optional
The position of the cross to draw. If none is passed, the
ortho_slicer's cut coordinnates are used.
kwargs:
Extra keyword arguments are passed to axhline
"""
return
class XSlicer(BaseStackedSlicer):
_direction = 'x'
_default_figsize = [2.2, 2.3]
class YSlicer(BaseStackedSlicer):
_direction = 'y'
_default_figsize = [2.6, 2.3]
class ZSlicer(BaseStackedSlicer):
_direction = 'z'
SLICERS = dict(ortho=OrthoSlicer,
x=XSlicer,
y=YSlicer,
z=ZSlicer)
| |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build infrastructure for testing HA clusters"""
import unittest
import re
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from machinetest import MachineTestMixin
dnsdomain = "aqd-unittest.ms.com"
def host_fqdn(host):
""" Return FQDN given a node name. """
return "%s.%s" % (host, dnsdomain)
config = {
# Buildings to add
"building": {
"utb1": {"address": "Unit 1 Test Park", "city": "ny", "next_rackid": "1"},
"utb2": {"address": "Unit 2 Test Park", "city": "ny", "next_rackid": "1"},
"utb3": {"address": "Unit 3 Test Park", "city": "ny", "next_rackid": "1"},
},
# Service mappings required (by service name and instance)
"map": {
"afs": "q.ny.ms.com",
"bootserver": "unittest",
"dns": "unittest",
},
# Racks to add
"rack": {
"utb11": {"row": "1", "column": "a", "building": "utb1", "fullname": "utb1-1a"},
"utb12": {"row": "1", "column": "b", "building": "utb1", "fullname": "utb1-1b"},
"utb21": {"row": "1", "column": "a", "building": "utb2", "fullname": "utb2-1a"},
"utb31": {"row": "1", "column": "a", "building": "utb3", "fullname": "utb3-1a"},
},
# Hosts to add and cluster
"host": {
"utbhost01": {"cluster": "utbvcs1a", "rack": "utb11"},
"utbhost02": {"cluster": "utbvcs1a", "rack": "utb12"},
"utbhost03": {"cluster": "utbvcs1b", "rack": "utb11"},
"utbhost04": {"cluster": "utbvcs1b", "rack": "utb12"},
"utbhost05": {"cluster": "utbvcs1c", "rack": "utb11"},
"utbhost06": {"cluster": "utbvcs1c", "rack": "utb12"},
"utbhost07": {"cluster": "utbvcs1d", "rack": "utb11"},
"utbhost08": {"cluster": "utbvcs1d", "rack": "utb12"},
"utbhost09": {"cluster": "utbvcs2a", "rack": "utb11"},
"utbhost10": {"cluster": "utbvcs2a", "rack": "utb21"},
"utbhost11": {"cluster": "utbvcs2b", "rack": "utb11"},
"utbhost12": {"cluster": "utbvcs2b", "rack": "utb21"},
"utbhost13": {"cluster": "utbvcs2c", "rack": "utb11"},
"utbhost14": {"cluster": "utbvcs2c", "rack": "utb21"},
"utbhost15": {"cluster": "utbvcs2d", "rack": "utb11"},
"utbhost16": {"cluster": "utbvcs2d", "rack": "utb21"},
"utbhost17": {"cluster": "utbvcs3a", "rack": "utb21"},
"utbhost18": {"cluster": "utbvcs3a", "rack": "utb31"},
"utbhost19": {"cluster": "utbvcs3b", "rack": "utb21"},
"utbhost20": {"cluster": "utbvcs3b", "rack": "utb31"},
"utbhost21": {"cluster": "utbvcs3c", "rack": "utb21"},
"utbhost22": {"cluster": "utbvcs3c", "rack": "utb31"},
"utbhost23": {"cluster": "utbvcs3d", "rack": "utb21"},
"utbhost24": {"cluster": "utbvcs3d", "rack": "utb31"},
"utbhost25": {"cluster": "utbvcs3e", "rack": "utb21"},
"utbhost26": {"cluster": "utbvcs3e", "rack": "utb31"},
"utbhost27": {"cluster": "utbvcs3f", "rack": "utb21"},
"utbhost28": {"cluster": "utbvcs3f", "rack": "utb31"},
"utbhost29": {"cluster": "utbvcs4a", "rack": "utb11"},
"utbhost30": {"cluster": "utbvcs4a", "rack": "utb31"},
"utbhost31": {"cluster": "utbvcs4b", "rack": "utb11"},
"utbhost32": {"cluster": "utbvcs4b", "rack": "utb31"},
"utbhost33": {"cluster": "utbvcs4c", "rack": "utb11"},
"utbhost34": {"cluster": "utbvcs4c", "rack": "utb31"},
"utbhost35": {"cluster": "utbvcs4d", "rack": "utb11"},
"utbhost36": {"cluster": "utbvcs4d", "rack": "utb31"},
"utbhost37": {"cluster": "utbvcs4e", "rack": "utb11"},
"utbhost38": {"cluster": "utbvcs4e", "rack": "utb31"},
"utbhost39": {"cluster": "utbvcs4f", "rack": "utb11"},
"utbhost40": {"cluster": "utbvcs4f", "rack": "utb31"},
"utbhost41": {"cluster": "utbvcs5a", "rack": "utb11"},
"utbhost42": {"cluster": "utbvcs5a", "rack": "utb21"},
"utbhost43": {"cluster": "utbvcs5a", "rack": "utb31"},
"utbhost44": {"cluster": "utbvcs5b", "rack": "utb11"},
"utbhost45": {"cluster": "utbvcs5b", "rack": "utb21"},
"utbhost46": {"cluster": "utbvcs5b", "rack": "utb31"},
"utbhost47": {"cluster": "utbvcs5c", "rack": "utb11"},
"utbhost48": {"cluster": "utbvcs5c", "rack": "utb21"},
"utbhost49": {"cluster": "utbvcs5c", "rack": "utb31"},
"utbhost50": {"cluster": "utbvcs5d", "rack": "utb11"},
"utbhost51": {"cluster": "utbvcs5d", "rack": "utb21"},
"utbhost52": {"cluster": "utbvcs5d", "rack": "utb31"},
},
}
def reset_config():
""" Sets or resets additional keys in the config dict needed to
support the test cases. """
build_cluster_key()
def build_cluster_key():
""" Create 'cluster' key based on entries in 'host' key in config dict,
and create 'machine' if key is missing. """
config["cluster"] = {}
for host in config["host"]:
config["cluster"].\
setdefault(config["host"][host]["cluster"], {}).\
setdefault("hosts", []).append(host)
if "machine" not in config["host"][host]:
config["host"][host]["machine"] = re.sub("host", "mach", host)
for cluster in config["cluster"]:
config["cluster"][cluster]["hosts"] = \
tuple(sorted(config["cluster"][cluster]["hosts"]))
class TestBuildClusters(MachineTestMixin, TestBrokerCommand):
@classmethod
def setUpClass(cls):
""" Fill in the computed bits of config dict prior to test execution """
super(TestBuildClusters, cls).setUpClass()
reset_config()
def test_100_add_building(self):
""" Add buildings needed for the use case """
for building in config["building"]:
args = config["building"][building]
self.dsdb_expect("add_building_aq -building_name %s -city %s "
"-building_addr %s" % (building, args["city"],
args["address"]))
self.dsdb_expect_add_campus_building("ny", building)
self.noouttest(["add_building", "--building", building] +
["--%s=%s" % (a, args[a]) for a in args])
self.dsdb_verify()
for service in config["map"]:
command = ["map_service", "--service", service,
"--instance", config["map"][service],
"--building", building] + self.valid_just_tcm
self.noouttest(command)
def test_110_add_rack(self):
""" Add racks needed for the use case """
for rack in config["rack"]:
args = config["rack"][rack]
cmd = ["add_rack"] + ["--%s=%s" % (a, args[a]) for a in args]
out = self.commandtest(cmd)
self.matchoutput(out, rack, cmd)
def test_120_add_host(self):
""" Add hosts needed for the use case """
ipidx = 0
config["ip"] = {}
for host in config["host"]:
args = config["host"][host]
ip = config["ip"][host] = self.net["aapb_net"].usable[ipidx]
self.create_host(host_fqdn(host), ip, args["machine"],
model="utrackmount", memory=65536,
cpuname="utcpu", cpucount=2,
sda_size=500, sda_controller="sas",
rack=args["rack"], personality="utpers-prod",
osname="linux",
osversion=self.config.get("unittest",
"linux_version_curr"))
ipidx += 1
def test_130_add_cluster(self):
""" Add clusters needed for the use case """
for cluster in config["cluster"]:
args = config["cluster"][cluster]
self.noouttest(["add_cluster", "--cluster", cluster,
"--archetype", "hacluster",
"--personality", "hapersonality",
"--down_hosts_threshold", "0",
"--hub", "ny",
"--domain", "unittest",
"--max_members", len(args["hosts"])])
for i in range(0, 2):
rgname = "%sas%02d" % (cluster, i + 1)
self.noouttest(["add_resourcegroup", "--resourcegroup", rgname,
"--cluster", cluster])
self.noouttest(["add_filesystem", "--resourcegroup", rgname,
"--filesystem", rgname, "--type", "ext",
"--mountpoint", "/d/%s/d%d" % (cluster, i),
"--blockdevice",
"/dev/vx/dsk/%s.gnr.0/gnr.0" % rgname,
"--nobootmount"])
def test_140_cluster(self):
""" Add hosts to clusters needed for the use case """
for cluster in config["cluster"]:
for host in config["cluster"][cluster]["hosts"]:
self.ignoreoutputtest(["cluster", "--cluster", cluster,
"--hostn", host_fqdn(host)])
self.noouttest(["update_cluster", "--cluster", cluster,
"--fix_location"])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestBuildClusters)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework.
"""
import os
import sys
import optparse
import urllib
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path] + [p for p in sys.path if not p == path]
class Servers:
@staticmethod
def cgi(app, address=None, **options):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app) # Just ignore host and port here
@staticmethod
def flup(app, address, **options):
import flup.server.fcgi
flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
@staticmethod
def wsgiref(app, address, **options): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
options = {}
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
options['handler_class'] = QuietHandler
srv = make_server(address[0], address[1], app, **options)
srv.serve_forever()
@staticmethod
def cherrypy(app, address, **options):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket(app, address, **options):
from gluon.rocket import CherryPyWSGIServer
server = CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket_with_repoze_profiler(app, address, **options):
from gluon.rocket import CherryPyWSGIServer
from repoze.profile.profiler import AccumulatingProfileMiddleware
from gluon.settings import global_settings
global_settings.web2py_crontype = 'none'
wrapped = AccumulatingProfileMiddleware(
app,
log_filename='wsgi.prof',
discard_first_request=True,
flush_at_shutdown=True,
path='/__profile__'
)
server = CherryPyWSGIServer(address, wrapped)
server.start()
@staticmethod
def paste(app, address, **options):
options = {}
from paste import httpserver
from paste.translogger import TransLogger
httpserver.serve(app, host=address[0], port=address[1], **options)
@staticmethod
def fapws(app, address, **options):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(address[0], str(address[1]))
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return app(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
@staticmethod
def gevent(app, address, **options):
options = options['options']
workers = options.workers
from gevent import pywsgi
from gevent.pool import Pool
pywsgi.WSGIServer(address, app, spawn=workers and Pool(
int(options.workers)) or 'default', log=None).serve_forever()
@staticmethod
def bjoern(app, address, **options):
import bjoern
bjoern.run(app, *address)
@staticmethod
def tornado(app, address, **options):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(address=address[0], port=address[1])
tornado.ioloop.IOLoop.instance().start()
@staticmethod
def twisted(app, address, **options):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
@staticmethod
def diesel(app, address, **options):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(app, port=address[1])
app.run()
@staticmethod
def gunicorn(app, address, **options):
options = {}
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % address}
config.update(options)
sys.argv = ['anyserver.py']
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return app
g = GunicornApplication()
g.run()
@staticmethod
def eventlet(app, address, **options):
from eventlet import wsgi, listen
wsgi.server(listen(address), app)
@staticmethod
def mongrel2(app, address, **options):
import uuid
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from mongrel2 import handler
conn = handler.Connection(str(uuid.uuid4()),
"tcp://127.0.0.1:9997",
"tcp://127.0.0.1:9996")
mongrel2_handler(app, conn, debug=False)
@staticmethod
def motor(app, address, **options):
#https://github.com/rpedroso/motor
import motor
app = motor.WSGIContainer(app)
http_server = motor.HTTPServer(app)
http_server.listen(address=address[0], port=address[1])
#http_server.start(2)
motor.IOLoop.instance().start()
@staticmethod
def pulsar(app, address, **options):
from pulsar.apps import wsgi
sys.argv = ['anyserver.py']
s = wsgi.WSGIServer(callable=app, bind="%s:%d" % address)
s.start()
def mongrel2_handler(application, conn, debug=False):
"""
Based on :
https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py
WSGI handler based on the Python wsgiref SimpleHandler.
A WSGI application should return a iterable op StringTypes.
Any encoding must be handled by the WSGI application itself.
"""
from wsgiref.handlers import SimpleHandler
try:
import cStringIO as StringIO
except:
import StringIO
# TODO - this wsgi handler executes the application and renders a page
# in memory completely before returning it as a response to the client.
# Thus, it does not "stream" the result back to the client. It should be
# possible though. The SimpleHandler accepts file-like stream objects. So,
# it should be just a matter of connecting 0MQ requests/response streams to
# the SimpleHandler requests and response streams. However, the Python API
# for Mongrel2 doesn't seem to support file-like stream objects for requests
# and responses. Unless I have missed something.
while True:
if debug:
print "WAITING FOR REQUEST"
# receive a request
req = conn.recv()
if debug:
print "REQUEST BODY: %r\n" % req.body
if req.is_disconnect():
if debug:
print "DISCONNECT"
continue # effectively ignore the disconnect from the client
# Set a couple of environment attributes a.k.a. header attributes
# that are a must according to PEP 333
environ = req.headers
environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1
environ['REQUEST_METHOD'] = environ['METHOD']
if ':' in environ['Host']:
environ['SERVER_NAME'] = environ['Host'].split(':')[0]
environ['SERVER_PORT'] = environ['Host'].split(':')[1]
else:
environ['SERVER_NAME'] = environ['Host']
environ['SERVER_PORT'] = ''
environ['SCRIPT_NAME'] = '' # empty for now
environ['PATH_INFO'] = urllib.unquote(environ['PATH'])
if '?' in environ['URI']:
environ['QUERY_STRING'] = environ['URI'].split('?')[1]
else:
environ['QUERY_STRING'] = ''
if 'Content-Length' in environ:
environ['CONTENT_LENGTH'] = environ[
'Content-Length'] # necessary for POST to work with Django
environ['wsgi.input'] = req.body
if debug:
print "ENVIRON: %r\n" % environ
# SimpleHandler needs file-like stream objects for
# requests, errors and responses
reqIO = StringIO.StringIO(req.body)
errIO = StringIO.StringIO()
respIO = StringIO.StringIO()
# execute the application
handler = SimpleHandler(reqIO, respIO, errIO, environ,
multithread=False, multiprocess=False)
handler.run(application)
# Get the response and filter out the response (=data) itself,
# the response headers,
# the response status code and the response status description
response = respIO.getvalue()
response = response.split("\r\n")
data = response[-1]
headers = dict([r.split(": ") for r in response[1:-2]])
code = response[0][9:12]
status = response[0][13:]
# strip BOM's from response data
# Especially the WSGI handler from Django seems to generate them (2 actually, huh?)
# a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari
# See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/
# Although I still find this a ugly hack, it does work.
data = data.replace('\xef\xbb\xbf', '')
# Get the generated errors
errors = errIO.getvalue()
# return the response
if debug:
print "RESPONSE: %r\n" % response
if errors:
if debug:
print "ERRORS: %r" % errors
data = "%s\r\n\r\n%s" % (data, errors)
conn.reply_http(
req, data, code=code, status=status, headers=headers)
def run(servername, ip, port, softcron=True, logging=False, profiler=None,
options=None):
if servername == 'gevent':
from gevent import monkey
monkey.patch_all()
elif servername == 'eventlet':
import eventlet
eventlet.monkey_patch()
import gluon.main
if logging:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profiler_dir=profiler)
else:
application = gluon.main.wsgibase
if softcron:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
getattr(Servers, servername)(application, (ip, int(port)), options=options)
def main():
usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
try:
version = open('VERSION','r')
except IOError:
version = ''
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-l',
'--logging',
action='store_true',
default=False,
dest='logging',
help='log into httpserver.log')
parser.add_option('-P',
'--profiler',
default=False,
dest='profiler_dir',
help='profiler dir')
servers = ', '.join(x for x in dir(Servers) if not x[0] == '_')
parser.add_option('-s',
'--server',
default='rocket',
dest='server',
help='server name (%s)' % servers)
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
help='port number')
parser.add_option('-w',
'--workers',
default=None,
dest='workers',
help='number of workers number')
(options, args) = parser.parse_args()
print 'starting %s on %s:%s...' % (
options.server, options.ip, options.port)
run(options.server, options.ip, options.port,
logging=options.logging, profiler=options.profiler_dir,
options=options)
if __name__ == '__main__':
main()
| |
import ast
from itertools import groupby
import parallel
import screen
import util
import time
try:
from collections import OrderedDict
except ImportError:
# python < 2.7 compatability
from compat.OrderedDict import OrderedDict
try:
from collections import Counter
except ImportError:
# python < 2.7 compatability
from compat.Counter import Counter
DEBUG = False
__is_aggregate = False
def do(stmt, data):
global __is_aggregate
__is_aggregate = False # reset
d = data
if stmt.where:
d = _where(stmt.where, d)
if stmt.groupby:
groups = []
for k, g in groupby(_groupby(stmt.groupby, d), lambda x: x[stmt.groupby[0]]):
groups.append(list(g))
resp = []
for group in groups:
resp.append(_fields(stmt.fields, group))
d = _flatten(resp)
else:
d = _fields(stmt.fields, d)
if stmt.orderby:
if stmt.orderby[1] == 'desc':
d = list(reversed(_orderby(stmt.orderby[0], d)))
else:
d = _orderby(stmt.orderby[0], d)
if stmt.limit:
l = stmt.limit
d = d[l[0]:l[0]+l[1]]
return d
def _where(where, data):
"""
Compile `where` ast into executable code and run
a parallel 'filter' on the data with it
"""
if where is None:
return
ast.fix_missing_locations(where)
return parallel.run(__where, data, "<where clause>", syntree=where)
@parallel.map
def __where(chunk, syntree):
code = compile(syntree, '', 'eval')
res = []
for line in chunk:
for k in line.keys():
locals()[k] = line[k]
if eval(code):
res.append(line)
return res
def _groupby(fields, data):
return _orderby(fields, data, "<groupby>")
def _orderby(fields, data, name="<orderby>"):
if DEBUG:
print "starting sort for %s on %d lines" % (name, len(data))
s = time.time()
f = fields[0]
newdata = sorted(data, key=lambda x: x[f])
if DEBUG:
print "sort for %s ran in %0.3f seconds" % (name, time.time() - s)
return newdata
def _fields(fields, __data__):
"""
Compile fields ast into executable code and run
a parallel 'filter' on the data with it
"""
if fields is None:
raise SyntaxError("What fields are you selecting?")
ast.fix_missing_locations(fields)
code = compile(fields, '', 'eval')
resp = []
for __line__ in __data__:
for k in __line__.keys():
locals()[k] = __line__[k]
newrow = eval(code)
if newrow.has_key('__line__'):
newrow = newrow['__line__']
resp.append(newrow)
if __is_aggregate:
break
return resp
def _flatten(l):
r = []
for i in l:
r += i
return r
def count(data, i):
global __is_aggregate
__is_aggregate = True
return len(data)
def avg(data, column):
global __is_aggregate
__is_aggregate = True
vals = [row[column] for row in data]
data = parallel.run(parallel.map(
lambda chunk: [(sum([int(line) for line in chunk]), len(chunk))]),
vals,
'avg()'
)
dividend = parallel.run(parallel.reduce(lambda data: sum([d[0] for d in data], 0.0)), data)
divisor = parallel.run(parallel.reduce(lambda data: sum([d[1] for d in data])), data)
return sum(dividend)/sum(divisor)
def mean(data, column):
return avg(data, column)
def median(data, column):
global __is_aggregate
__is_aggregate = True
d = sorted(data, key=lambda x: x[column])
if len(d) & 0x01:
return data[(len(d)-1)/2]
m = len(d)/2
a, b = data[m-1:m+1]
return (a[column]+b[column]/2)
def mode(data, column, ind=0):
global __is_aggregate
__is_aggregate = True
ind = int(ind)
return Counter([x[column] for x in data]).most_common(ind+1)[ind][0]
def max(data, column):
global __is_aggregate
__is_aggregate = True
max = __builtins__['max']
vals = [row[column] for row in data]
return max(parallel.run(parallel.reduce(lambda chunk: max([int(i) for i in chunk])), vals))
def min(data, column):
global __is_aggregate
__is_aggregate = True
min = __builtins__['min']
vals = [row[column] for row in data]
return min(parallel.run(parallel.reduce(lambda chunk: min([int(i) for i in chunk])), vals))
def div(data, a, b):
try:
a = int(a)
b = int(b)
except ValueError:
a = float(a)
b = float(b)
return a/b
def year(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[:4])
def month(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[4:6])
def day(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[6:8])
def hour(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[8:10])
def minute(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[10:12])
def second(data, d):
if type(d) == str:
d = data[d]
else:
d = str(d)
return int(d[12:14])
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for loading and parsing kubeconfig."""
import os
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files as file_utils
import yaml
class Error(Exception):
"""Class for errors raised by kubeconfig utilities."""
# TODO(user): marshal yaml directly into a type with a
# matching structure.
class Kubeconfig(object):
"""Interface for interacting with a kubeconfig file."""
def __init__(self, raw_data, filename):
self._filename = filename
self._data = raw_data
self.clusters = {}
self.users = {}
self.contexts = {}
for cluster in self._data['clusters']:
self.clusters[cluster['name']] = cluster
for user in self._data['users']:
self.users[user['name']] = user
for context in self._data['contexts']:
self.contexts[context['name']] = context
@property
def current_context(self):
return self._data['current-context']
def Clear(self, key):
self.contexts.pop(key, None)
self.clusters.pop(key, None)
self.users.pop(key, None)
if self._data.get('current-context') == key:
self._data['current-context'] = ''
def SaveToFile(self):
self._data['clusters'] = self.clusters.values()
self._data['users'] = self.users.values()
self._data['contexts'] = self.contexts.values()
# We use os.open here to explicitly set file mode 0600.
# the flags passed should mimic behavior of open(self._filename, 'w'),
# which does write with truncate and creates file if not existing.
fd = os.open(self._filename, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0o600)
with os.fdopen(fd, 'w') as fp:
yaml.safe_dump(self._data, fp, default_flow_style=False)
def SetCurrentContext(self, context):
self._data['current-context'] = context
@classmethod
def _Validate(cls, data):
try:
if not data:
raise Error('empty file')
for key in ('clusters', 'users', 'contexts'):
if not isinstance(data[key], list):
raise Error(
'invalid type for %s: %s', data[key], type(data[key]))
except KeyError as error:
raise Error('expected key %s not found', error)
@classmethod
def LoadFromFile(cls, filename):
try:
with open(filename, 'r') as fp:
data = yaml.load(fp)
cls._Validate(data)
return cls(data, filename)
except yaml.YAMLError as error:
raise Error('unable to load kubeconfig for %s: %s', filename, error)
@classmethod
def LoadOrCreate(cls, filename):
try:
return cls.LoadFromFile(filename)
except (Error, IOError) as error:
log.debug('unable to load default kubeconfig: %s; recreating %s',
error, filename)
file_utils.MakeDir(os.path.dirname(filename))
kubeconfig = cls(EmptyKubeconfig(), filename)
kubeconfig.SaveToFile()
return kubeconfig
@classmethod
def Default(cls):
return cls.LoadOrCreate(Kubeconfig.DefaultPath())
@staticmethod
def DefaultPath():
if os.environ.get('KUBECONFIG'):
return os.environ['KUBECONFIG']
return os.path.join(os.path.expanduser('~/'), '.kube/config')
def Cluster(name, server, ca_path=None, ca_data=None):
"""Generate and return a cluster kubeconfig object."""
cluster = {
'server': server,
}
if ca_path and ca_data:
raise Error('cannot specify both ca_path and ca_data')
if ca_path:
cluster['certificate-authority'] = ca_path
elif ca_data:
cluster['certificate-authority-data'] = ca_data
else:
cluster['insecure-skip-tls-verify'] = True
return {
'name': name,
'cluster': cluster
}
def User(name, token=None, username=None, password=None,
cert_path=None, cert_data=None, key_path=None, key_data=None):
"""Generate and return a user kubeconfig object.
Args:
name: str, nickname for this user entry.
token: str, bearer token.
username: str, basic auth user.
password: str, basic auth password.
cert_path: str, path to client certificate file.
cert_data: str, base64 encoded client certificate data.
key_path: str, path to client key file.
key_data: str, base64 encoded client key data.
Returns:
dict, valid kubeconfig user entry.
Raises:
Error: if no auth info is provided (token or username AND password)
"""
if not token and (not username or not password):
raise Error('either token or username,password must be provided')
user = {}
if token:
user['token'] = token
else:
user['username'] = username
user['password'] = password
if cert_path and cert_data:
raise Error('cannot specify both cert_path and cert_data')
if cert_path:
user['client-certificate'] = cert_path
elif cert_data:
user['client-certificate-data'] = cert_data
if key_path and key_data:
raise Error('cannot specify both key_path and key_data')
if key_path:
user['client-key'] = key_path
elif key_data:
user['client-key-data'] = key_data
return {
'name': name,
'user': user
}
def Context(name, cluster, user):
"""Generate and return a context kubeconfig object."""
return {
'name': name,
'context': {
'cluster': cluster,
'user': user,
},
}
def EmptyKubeconfig():
return {
'apiVersion': 'v1',
'contexts': [],
'clusters': [],
'current-context': '',
'kind': 'Config',
'preferences': {},
'users': [],
}
| |
#!/usr/bin/env python
import logging
l = logging.getLogger("claripy.frontends.frontend")
import ana
#pylint:disable=unidiomatic-typecheck
class Frontend(ana.Storable):
def __init__(self, solver_backend):
self._solver_backend = solver_backend
self.result = None
self._simplified = False
#
# Storable support
#
@property
def uuid(self):
return self.ana_uuid
def _ana_getstate(self):
if not self._simplified: self.simplify()
return self._solver_backend.__class__.__name__, self.result
def _ana_setstate(self, s):
solver_backend_name, self.result = s
self._solver_backend = _backends[solver_backend_name]
self._simplified = True
#
# Constraint management
#
@staticmethod
def _split_constraints(constraints):
'''
Returns independent constraints, split from this Frontend's constraints.
'''
splitted = [ ]
for i in constraints:
splitted.extend(i.split(['And']))
l.debug("... splitted of size %d", len(splitted))
variable_connections = { }
constraint_connections = { }
for n,s in enumerate(splitted):
l.debug("... processing constraint with %d variables", len(s.variables))
connected_variables = set(s.variables)
connected_constraints = { n }
if len(connected_variables) == 0:
connected_variables.add('CONCRETE')
for v in s.variables:
if v in variable_connections:
connected_variables |= variable_connections[v]
if v in constraint_connections:
connected_constraints |= constraint_connections[v]
for v in connected_variables:
variable_connections[v] = connected_variables
constraint_connections[v] = connected_constraints
unique_constraint_sets = set()
for v in variable_connections:
unique_constraint_sets.add((frozenset(variable_connections[v]), frozenset(constraint_connections[v])))
results = [ ]
for v,c_indexes in unique_constraint_sets:
results.append((set(v), [ splitted[c] for c in c_indexes ]))
return results
def _constraint_filter(self, ec):
fc = [ ]
for e in ec if type(ec) in (list, tuple, set) else (ec,):
#e_simp = self._claripy.simplify(e)
e_simp = e
for b in _eager_backends + [ self._solver_backend ]:
try:
o = b.convert(e_simp)
if b._is_false(o):
#filter_false += 1
raise UnsatError("expressions contain False")
elif b._has_true(o):
#filter_true +=1
break
else:
l.warning("Frontend._constraint_filter got non-boolean from model_backend")
raise ClaripyFrontendError()
except BackendError:
pass
else:
fc.append(e_simp)
return tuple(fc)
def branch(self):
s = self.__class__(self._solver_backend)
s.result = self.result
s._simplified = self._simplified
return s
#
# Stuff that should be implemented by subclasses
#
def _add_constraints(self, constraints, invalidate_cache=True):
raise NotImplementedError("_add_constraints() is not implemented")
def _simplify(self):
raise NotImplementedError("_simplify() is not implemented")
def _solve(self, extra_constraints=()):
raise NotImplementedError("_solve() is not implemented")
def _eval(self, e, n, extra_constraints=()):
raise NotImplementedError("_eval() is not implemented")
def _min(self, e, extra_constraints=()):
raise NotImplementedError("_min() is not implemented")
def _max(self, e, extra_constraints=()):
raise NotImplementedError("_max() is not implemented")
def _solution(self, e, v, extra_constraints=()):
raise NotImplementedError("_solution() is not implemented")
def finalize(self):
raise NotImplementedError("finalize() is not implemented")
def merge(self, others, merge_flag, merge_values):
raise NotImplementedError("merge() is not implemented")
def combine(self, others):
raise NotImplementedError("combine() is not implemented")
def split(self):
raise NotImplementedError("split() is not implemented")
#
# Solving
#
def add(self, constraints, invalidate_cache=True):
if type(constraints) not in (list, tuple):
constraints = [ constraints ]
if len(constraints) == 0:
return [ ]
try:
to_add = self._constraint_filter(constraints)
except UnsatError:
self.result = UnsatResult()
to_add = [ false ]
for c in to_add:
c.make_uuid()
if not isinstance(c, Bool):
raise ClaripyTypeError('constraint is not a boolean expression!')
if self.result is not None and invalidate_cache:
all_true = True
for c in to_add:
try:
v = LightFrontend._eval.im_func(self, c, 1)[0]
all_true &= v
except ClaripyFrontendError:
all_true = False
break
else:
all_true = False
self._add_constraints(to_add, invalidate_cache=invalidate_cache)
self._simplified = False
if invalidate_cache and self.result is not None and self.result.sat:
if all_true:
new_result = SatResult()
new_result.model.update(self.result.model)
self.result = new_result
else:
self.result = None
return to_add
def simplify(self):
if self._simplified:
return
s = self._simplify()
self._simplified = True
return s
def solve(self, extra_constraints=()):
l.debug("%s.solve() running with %d extra constraints...", self.__class__.__name__, len(extra_constraints))
if self.result is not None:
if not self.result.sat or len(extra_constraints) == 0:
l.debug("... returning cached result (sat: %s)", self.result.sat)
return self.result
else:
l.debug("... no cached result")
try:
extra_constraints = self._constraint_filter(extra_constraints)
except UnsatError:
l.debug("... returning unsat result due to false extra_constraints")
return UnsatResult()
l.debug("... conferring with the solver")
r = self._solve(extra_constraints=extra_constraints)
if len(extra_constraints) == 0 or (self.result is None and r.sat):
l.debug("... caching result (sat: %s)", r.sat)
self.result = r
return r
def satisfiable(self, extra_constraints=()):
return self.solve(extra_constraints=extra_constraints).sat
def eval(self, e, n, extra_constraints=()):
extra_constraints = self._constraint_filter(extra_constraints)
if not isinstance(e, Base):
raise ValueError("Expressions passed to eval() MUST be Claripy ASTs (got %s)" % type(e))
return self._eval(e, n, extra_constraints=extra_constraints)
def max(self, e, extra_constraints=()):
extra_constraints = self._constraint_filter(extra_constraints)
if not isinstance(e, Base):
raise ValueError("Expressions passed to max() MUST be Claripy ASTs (got %s)" % type(e))
if len(extra_constraints) == 0 and self.result is not None and e.uuid in self.result.max_cache:
#cached_max += 1
return self.result.max_cache[e.uuid]
m = self._max(e, extra_constraints=extra_constraints)
if len(extra_constraints) == 0:
if self.result is not None: self.result.max_cache[e.uuid] = m
self.add([ULE(e, m)], invalidate_cache=False)
return m
def min(self, e, extra_constraints=()):
extra_constraints = self._constraint_filter(extra_constraints)
if not isinstance(e, Base):
raise ValueError("Expressions passed to min() MUST be Claripy ASTs (got %s)" % type(e))
if len(extra_constraints) == 0 and self.result is not None and e.uuid in self.result.min_cache:
#cached_min += 1
return self.result.min_cache[e.uuid]
m = self._min(e, extra_constraints=extra_constraints)
if len(extra_constraints) == 0:
if self.result is not None: self.result.min_cache[e.uuid] = m
self.add([UGE(e, m)], invalidate_cache=False)
return m
def solution(self, e, v, extra_constraints=()):
try:
extra_constraints = self._constraint_filter(extra_constraints)
except UnsatError:
return False
if not isinstance(e, Base):
raise ValueError("Expressions passed to solution() MUST be Claripy ASTs (got %s)" % type(e))
b = self._solution(e, v, extra_constraints=extra_constraints)
if b is False and len(extra_constraints) > 0:
self.add([e != v], invalidate_cache=False)
return b
#
# Serialization and such.
#
def downsize(self): #pylint:disable=R0201
if self.result is not None:
self.result.downsize()
from .frontends import LightFrontend
from .result import UnsatResult, SatResult
from .errors import UnsatError, BackendError, ClaripyFrontendError, ClaripyTypeError
from . import _eager_backends, _backends
from .ast.base import Base
from .ast.bool import false, Bool
from .ast.bv import UGE, ULE
| |
"""Base Command class, and related routines"""
import sys
import os
import socket
import urllib2
import urllib
from cStringIO import StringIO
import traceback
import time
from pip.log import logger
from pip.baseparser import parser, ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.exceptions import BadCommand, InstallationError, UninstallationError
from pip.venv import restart_in_venv
__all__ = ['command_dict', 'Command', 'load_all_commands',
'load_command', 'command_names']
command_dict = {}
class Command(object):
name = None
usage = None
hidden = False
def __init__(self):
assert self.name
self.parser = ConfigOptionParser(
usage=self.usage,
prog='%s %s' % (sys.argv[0], self.name),
version=parser.version,
formatter=UpdatingDefaultsHelpFormatter(),
name=self.name)
for option in parser.option_list:
if not option.dest or option.dest == 'help':
# -h, --version, etc
continue
self.parser.add_option(option)
command_dict[self.name] = self
def merge_options(self, initial_options, options):
# Make sure we have all global options carried over
for attr in ['log', 'venv', 'proxy', 'venv_base', 'require_venv',
'respect_venv', 'log_explicit_levels', 'log_file',
'timeout', 'default_vcs', 'skip_requirements_regex']:
setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr))
options.quiet += initial_options.quiet
options.verbose += initial_options.verbose
def setup_logging(self):
pass
def main(self, complete_args, args, initial_options):
options, args = self.parser.parse_args(args)
self.merge_options(initial_options, options)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4-level)
complete_log = []
logger.consumers.extend(
[(level, sys.stdout),
(logger.DEBUG, complete_log.append)])
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
if options.require_venv and not options.venv:
# If a venv is required check if it can really be found
if not os.environ.get('VIRTUAL_ENV'):
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(3)
# Automatically install in currently activated venv if required
options.respect_venv = True
if args and args[-1] == '___VENV_RESTART___':
## FIXME: We don't do anything this this value yet:
venv_location = args[-2]
args = args[:-2]
options.venv = None
else:
# If given the option to respect the activated environment
# check if no venv is given as a command line parameter
if options.respect_venv and os.environ.get('VIRTUAL_ENV'):
if options.venv and os.path.exists(options.venv):
# Make sure command line venv and environmental are the same
if (os.path.realpath(os.path.expanduser(options.venv)) !=
os.path.realpath(os.environ.get('VIRTUAL_ENV'))):
logger.fatal("Given virtualenv (%s) doesn't match "
"currently activated virtualenv (%s)."
% (options.venv, os.environ.get('VIRTUAL_ENV')))
sys.exit(3)
else:
options.venv = os.environ.get('VIRTUAL_ENV')
logger.info('Using already activated environment %s' % options.venv)
if options.venv:
logger.info('Running in environment %s' % options.venv)
site_packages=False
if options.site_packages:
site_packages=True
restart_in_venv(options.venv, options.venv_base, site_packages,
complete_args)
# restart_in_venv should actually never return, but for clarity...
return
## FIXME: not sure if this sure come before or after venv restart
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.consumers.append((logger.DEBUG, log_fp))
else:
log_fp = None
socket.setdefaulttimeout(options.timeout or None)
setup_proxy_handler(options.proxy)
exit = 0
try:
self.run(options, args)
except (InstallationError, UninstallationError), e:
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
exit = 1
except BadCommand, e:
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
exit = 1
except:
logger.fatal('Exception:\n%s' % format_exc())
exit = 2
if log_fp is not None:
log_fp.close()
if exit:
log_fn = options.log_file
text = '\n'.join(complete_log)
logger.fatal('Storing complete log in %s' % log_fn)
log_fp = open_logfile(log_fn, 'w')
log_fp.write(text)
log_fp.close()
return exit
## FIXME: should get moved somewhere else:
def setup_proxy_handler(proxystr=''):
"""Set the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable. """
proxy = get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
def get_proxy(proxystr=''):
"""Get the proxy given the option passed on the command line. If an
empty string is passed it looks at the HTTP_PROXY environment
variable."""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
import getpass
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
print >> log_fp, '-'*60
print >> log_fp, '%s run on %s' % (sys.argv[0], time.strftime('%c'))
return log_fp
def load_command(name):
full_name = 'pip.commands.%s' % name
if full_name in sys.modules:
return
try:
__import__(full_name)
except ImportError:
pass
def load_all_commands():
for name in command_names():
load_command(name)
def command_names():
dir = os.path.join(os.path.dirname(__file__), 'commands')
names = []
for name in os.listdir(dir):
if name.endswith('.py') and os.path.isfile(os.path.join(dir, name)):
names.append(os.path.splitext(name)[0])
return names
| |
from direct.interval.IntervalGlobal import *
from BattleProps import *
from BattleSounds import *
from direct.directnotify import DirectNotifyGlobal
import MovieCamera
import random
import MovieUtil
import BattleParticles
import HealJokes
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from toontown.toon import NPCToons
notify = DirectNotifyGlobal.directNotify.newCategory('MovieNPCSOS')
soundFiles = ('AA_heal_tickle.mp3', 'AA_heal_telljoke.mp3', 'AA_heal_smooch.mp3', 'AA_heal_happydance.mp3', 'AA_heal_pixiedust.mp3', 'AA_heal_juggle.mp3')
offset = Point3(0, 4.0, 0)
def __cogsMiss(attack, level, hp):
return __doCogsMiss(attack, level, hp)
def __toonsHit(attack, level, hp):
return __doToonsHit(attack, level, hp)
def __restockGags(attack, level, hp):
return __doRestockGags(attack, level, hp)
NPCSOSfn_dict = {ToontownBattleGlobals.NPC_COGS_MISS: __cogsMiss,
ToontownBattleGlobals.NPC_TOONS_HIT: __toonsHit,
ToontownBattleGlobals.NPC_RESTOCK_GAGS: __restockGags}
def doNPCSOSs(NPCSOSs):
if len(NPCSOSs) == 0:
return (None, None)
track = Sequence()
textTrack = Sequence()
for n in NPCSOSs:
ival, textIval = __doNPCSOS(n)
if ival:
track.append(ival)
textTrack.append(textIval)
camDuration = track.getDuration()
if camDuration > 0.0:
camTrack = MovieCamera.chooseHealShot(NPCSOSs, camDuration)
else:
camTrack = Sequence()
return (track, Parallel(camTrack, textTrack))
def __doNPCSOS(sos):
npcId = sos['npcId']
track, level, hp = NPCToons.getNPCTrackLevelHp(npcId)
if track != None:
return NPCSOSfn_dict[track](sos, level, hp)
else:
return __cogsMiss(sos, 0, 0)
return
def __healToon(toon, hp, ineffective = 0):
notify.debug('healToon() - toon: %d hp: %d ineffective: %d' % (toon.doId, hp, ineffective))
if ineffective == 1:
laughter = random.choice(TTLocalizer.MovieHealLaughterMisses)
else:
maxDam = ToontownBattleGlobals.AvPropDamage[0][1][0][1]
if hp >= maxDam - 1:
laughter = random.choice(TTLocalizer.MovieHealLaughterHits2)
else:
laughter = random.choice(TTLocalizer.MovieHealLaughterHits1)
toon.setChatAbsolute(laughter, CFSpeech | CFTimeout)
def __getSoundTrack(level, delay, duration = None, node = None):
soundEffect = globalBattleSoundCache.getSound(soundFiles[level])
soundIntervals = Sequence()
if soundEffect:
if duration:
playSound = SoundInterval(soundEffect, duration=duration, node=node)
else:
playSound = SoundInterval(soundEffect, node=node)
soundIntervals.append(Wait(delay))
soundIntervals.append(playSound)
return soundIntervals
def teleportIn(attack, npc, pos = Point3(0, 0, 0), hpr = Vec3(180.0, 0.0, 0.0)):
a = Func(npc.reparentTo, attack['battle'])
b = Func(npc.setPos, pos)
c = Func(npc.setHpr, hpr)
d = Func(npc.pose, 'teleport', npc.getNumFrames('teleport') - 1)
e = npc.getTeleportInTrack()
ee = Func(npc.addActive)
f = Func(npc.setChatAbsolute, TTLocalizer.MovieNPCSOSGreeting % attack['toon'].getName(), CFSpeech | CFTimeout)
g = ActorInterval(npc, 'wave')
h = Func(npc.loop, 'neutral')
i = Func(npc.clearChat)
return Sequence(a, b, c, d, e, ee, f, g, h, i)
def teleportOut(attack, npc):
if npc.style.getGender() == 'm':
a = ActorInterval(npc, 'bow')
else:
a = ActorInterval(npc, 'curtsy')
b = Func(npc.setChatAbsolute, TTLocalizer.MovieNPCSOSGoodbye, CFSpeech | CFTimeout)
c = npc.getTeleportOutTrack()
d = Func(npc.removeActive)
e = Func(npc.detachNode)
f = Func(npc.delete)
return Sequence(a, b, c, d, e, f)
def __getPartTrack(particleEffect, startDelay, durationDelay, partExtraArgs):
pEffect = partExtraArgs[0]
parent = partExtraArgs[1]
if len(partExtraArgs) == 3:
worldRelative = partExtraArgs[2]
else:
worldRelative = 1
return Sequence(Wait(startDelay), ParticleInterval(pEffect, parent, worldRelative, duration=durationDelay, cleanup=True))
def __doSprinkle(attack, recipients, hp = 0):
toon = NPCToons.createLocalNPC(attack['npcId'])
if toon == None:
return
targets = attack[recipients]
level = 4
battle = attack['battle']
track = Sequence(teleportIn(attack, toon))
def face90(target, toon, battle):
vec = Point3(target.getPos(battle) - toon.getPos(battle))
vec.setZ(0)
temp = vec[0]
vec.setX(-vec[1])
vec.setY(temp)
targetPoint = Point3(toon.getPos(battle) + vec)
toon.headsUp(battle, targetPoint)
delay = 2.5
effectTrack = Sequence()
for target in targets:
sprayEffect = BattleParticles.createParticleEffect(file='pixieSpray')
dropEffect = BattleParticles.createParticleEffect(file='pixieDrop')
explodeEffect = BattleParticles.createParticleEffect(file='pixieExplode')
poofEffect = BattleParticles.createParticleEffect(file='pixiePoof')
wallEffect = BattleParticles.createParticleEffect(file='pixieWall')
mtrack = Parallel(__getPartTrack(sprayEffect, 1.5, 0.5, [sprayEffect, toon, 0]), __getPartTrack(dropEffect, 1.9, 2.0, [dropEffect, target, 0]), __getPartTrack(explodeEffect, 2.7, 1.0, [explodeEffect, toon, 0]), __getPartTrack(poofEffect, 3.4, 1.0, [poofEffect, target, 0]), __getPartTrack(wallEffect, 4.05, 1.2, [wallEffect, toon, 0]), __getSoundTrack(level, 2, duration=3.1, node=toon), Sequence(Func(face90, target, toon, battle), ActorInterval(toon, 'sprinkle-dust')), Sequence(Wait(delay), Func(__healToon, target, hp)))
effectTrack.append(mtrack)
track.append(effectTrack)
track.append(Func(toon.setHpr, Vec3(180.0, 0.0, 0.0)))
track.append(teleportOut(attack, toon))
return track
def __doSmooch(attack, hp = 0):
toon = NPCToons.createLocalNPC(attack['npcId'])
if toon == None:
return
targets = attack['toons']
level = 2
battle = attack['battle']
track = Sequence(teleportIn(attack, toon))
lipstick = globalPropPool.getProp('lipstick')
lipstick2 = MovieUtil.copyProp(lipstick)
lipsticks = [lipstick, lipstick2]
rightHands = toon.getRightHands()
dScale = 0.5
lipstickTrack = Sequence(Func(MovieUtil.showProps, lipsticks, rightHands, Point3(-0.27, -0.24, -0.95), Point3(-118, -10.6, -25.9)), MovieUtil.getScaleIntervals(lipsticks, dScale, MovieUtil.PNT3_NEARZERO, MovieUtil.PNT3_ONE), Wait(toon.getDuration('smooch') - 2.0 * dScale), MovieUtil.getScaleIntervals(lipsticks, dScale, MovieUtil.PNT3_ONE, MovieUtil.PNT3_NEARZERO))
lips = globalPropPool.getProp('lips')
dScale = 0.5
tLips = 2.5
tThrow = 115.0 / toon.getFrameRate('smooch')
dThrow = 0.5
def getLipPos(toon = toon):
toon.pose('smooch', 57)
toon.update(0)
hand = toon.getRightHands()[0]
return hand.getPos(render)
effectTrack = Sequence()
for target in targets:
lipcopy = MovieUtil.copyProp(lips)
lipsTrack = Sequence(Wait(tLips), Func(MovieUtil.showProp, lipcopy, render, getLipPos), Func(lipcopy.setBillboardPointWorld), LerpScaleInterval(lipcopy, dScale, Point3(3, 3, 3), startScale=MovieUtil.PNT3_NEARZERO), Wait(tThrow - tLips - dScale), LerpPosInterval(lipcopy, dThrow, Point3(target.getPos() + Point3(0, 0, target.getHeight()))), Func(MovieUtil.removeProp, lipcopy))
delay = tThrow + dThrow
mtrack = Parallel(lipstickTrack, lipsTrack, __getSoundTrack(level, 2, node=toon), Sequence(ActorInterval(toon, 'smooch')), Sequence(Wait(delay), ActorInterval(target, 'conked')), Sequence(Wait(delay), Func(__healToon, target, hp)))
effectTrack.append(mtrack)
effectTrack.append(Func(MovieUtil.removeProps, lipsticks))
track.append(effectTrack)
track.append(teleportOut(attack, toon))
track.append(Func(target.clearChat))
return track
def __doToonsHit(attack, level, hp):
track = __doSprinkle(attack, 'toons', hp)
pbpText = attack['playByPlayText']
pbpTrack = pbpText.getShowInterval(TTLocalizer.MovieNPCSOSToonsHit, track.getDuration())
return (track, pbpTrack)
def __doCogsMiss(attack, level, hp):
track = __doSprinkle(attack, 'suits', hp)
pbpText = attack['playByPlayText']
pbpTrack = pbpText.getShowInterval(TTLocalizer.MovieNPCSOSCogsMiss, track.getDuration())
return (track, pbpTrack)
def __doRestockGags(attack, level, hp):
track = __doSmooch(attack, hp)
pbpText = attack['playByPlayText']
if level == ToontownBattleGlobals.HEAL_TRACK:
text = TTLocalizer.MovieNPCSOSHeal
elif level == ToontownBattleGlobals.TRAP_TRACK:
text = TTLocalizer.MovieNPCSOSTrap
elif level == ToontownBattleGlobals.LURE_TRACK:
text = TTLocalizer.MovieNPCSOSLure
elif level == ToontownBattleGlobals.SOUND_TRACK:
text = TTLocalizer.MovieNPCSOSSound
elif level == ToontownBattleGlobals.THROW_TRACK:
text = TTLocalizer.MovieNPCSOSThrow
elif level == ToontownBattleGlobals.SQUIRT_TRACK:
text = TTLocalizer.MovieNPCSOSSquirt
elif level == ToontownBattleGlobals.DROP_TRACK:
text = TTLocalizer.MovieNPCSOSDrop
elif level == -1:
text = TTLocalizer.MovieNPCSOSAll
pbpTrack = pbpText.getShowInterval(TTLocalizer.MovieNPCSOSRestockGags % text, track.getDuration())
return (track, pbpTrack)
def doNPCTeleports(attacks):
npcs = []
npcDatas = []
arrivals = Sequence()
departures = Parallel()
for attack in attacks:
if attack.has_key('npcId'):
npcId = attack['npcId']
npc = NPCToons.createLocalNPC(npcId)
if npc != None:
npcs.append(npc)
attack['npc'] = npc
toon = attack['toon']
battle = attack['battle']
pos = toon.getPos(battle) + offset
hpr = toon.getHpr(battle)
npcDatas.append((npc, battle, hpr))
arrival = teleportIn(attack, npc, pos=pos)
arrivals.append(arrival)
departure = teleportOut(attack, npc)
departures.append(departure)
turns = Parallel()
unturns = Parallel()
hpr = Vec3(180.0, 0, 0)
for npc in npcDatas:
turns.append(Func(npc[0].setHpr, npc[1], npc[2]))
unturns.append(Func(npc[0].setHpr, npc[1], hpr))
arrivals.append(turns)
unturns.append(departures)
return (arrivals, unturns, npcs)
| |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware', )
MIDDLEWARE_CLASSES = RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['dictionary.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='dictionary <noreply@dictionary.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[dictionary] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ("anymail", )
ANYMAIL = {
"MAILGUN_API_KEY": env('DJANGO_MAILGUN_API_KEY'),
}
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| |
import numpy as np
import pandas as pd
import pytest
from pytest_regressions.testing import check_regression_fixture_workflow
@pytest.fixture
def no_regen(dataframe_regression, request):
if dataframe_regression._force_regen or request.config.getoption("force_regen"):
pytest.fail("--force-regen should not be used on this test.")
def test_usage_workflow(testdir, monkeypatch):
"""
:type testdir: _pytest.pytester.TmpTestdir
:type monkeypatch: _pytest.monkeypatch.monkeypatch
"""
import sys
monkeypatch.setattr(
sys, "testing_get_data", lambda: {"data": 1.1 * np.ones(50)}, raising=False
)
source = """
import sys
import pandas as pd
def test_1(dataframe_regression):
contents = sys.testing_get_data()
dataframe_regression.check(pd.DataFrame.from_dict(contents))
"""
def get_csv_contents():
filename = testdir.tmpdir / "test_file" / "test_1.csv"
frame = pd.read_csv(str(filename))
return {"data": frame["data"].values}
def compare_arrays(obtained, expected):
assert (obtained["data"] == expected["data"]).all()
check_regression_fixture_workflow(
testdir,
source=source,
data_getter=get_csv_contents,
data_modifier=lambda: monkeypatch.setattr(
sys, "testing_get_data", lambda: {"data": 1.2 * np.ones(50)}, raising=False
),
expected_data_1={"data": 1.1 * np.ones(50)},
expected_data_2={"data": 1.2 * np.ones(50)},
compare_fn=compare_arrays,
)
def test_common_cases(dataframe_regression, no_regen):
# Most common case: Data is valid, is present and should pass
data1 = 1.1 * np.ones(5000)
data2 = 2.2 * np.ones(5000)
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1, "data2": data2}))
# Assertion error case 1: Data has one invalid place
data1 = 1.1 * np.ones(5000)
data2 = 2.2 * np.ones(5000)
data1[500] += 0.1
with pytest.raises(AssertionError) as excinfo:
dataframe_regression.check(
pd.DataFrame.from_dict({"data1": data1, "data2": data2})
)
obtained_error_msg = str(excinfo.value)
expected = "\n".join(
[
"Values are not sufficiently close.",
"To update values, use --force-regen option.",
]
)
assert expected in obtained_error_msg
expected = "\n".join(
[
"data1:",
" obtained_data1 expected_data1 diff",
"500 1.20000000000000018 1.10000000000000009 0.10000000000000009",
]
)
assert expected in obtained_error_msg
# Assertion error case 2: More than one invalid data
data1 = 1.1 * np.ones(5000)
data2 = 2.2 * np.ones(5000)
data1[500] += 0.1
data1[600] += 0.2
data2[700] += 0.3
with pytest.raises(AssertionError) as excinfo:
dataframe_regression.check(
pd.DataFrame.from_dict({"data1": data1, "data2": data2})
)
obtained_error_msg = str(excinfo.value)
expected = "\n".join(
[
"Values are not sufficiently close.",
"To update values, use --force-regen option.",
]
)
assert expected in obtained_error_msg
expected = "\n".join(
[
"data1:",
" obtained_data1 expected_data1 diff",
"500 1.20000000000000018 1.10000000000000009 0.10000000000000009",
"600 1.30000000000000004 1.10000000000000009 0.19999999999999996",
]
)
assert expected in obtained_error_msg
expected = "\n".join(
[
"data2:",
" obtained_data2 expected_data2 diff",
"700 2.5 2.20000000000000018 0.29999999999999982",
]
)
assert expected in obtained_error_msg
# Assertion error case 3: More than one invalid data
data1 = 1.1 * np.ones(5000)
data2 = 2.2 * np.ones(5000)
data1[500] += 0.01
data2[500] += 0.01
with pytest.raises(AssertionError) as excinfo:
dataframe_regression.check(
pd.DataFrame.from_dict({"data1": data1, "data2": data2}),
tolerances={
"data1": dict(atol=1e-1, rtol=1e-17),
"data2": dict(atol=1e-17, rtol=1e-17),
},
)
obtained_error_msg = str(excinfo.value)
assert " data1:" not in obtained_error_msg
assert (
"\n".join(
[
"Values are not sufficiently close.",
"To update values, use --force-regen option.",
]
)
in obtained_error_msg
)
assert (
"\n".join(
[
"data2:",
" obtained_data2 expected_data2 diff",
"500 2.20999999999999996 2.20000000000000018 0.00999999999999979",
]
)
in obtained_error_msg
)
def test_different_data_types(dataframe_regression, no_regen):
# Original CSV file contains integer data
data1 = np.array([True] * 10)
with pytest.raises(
AssertionError,
match="Data type for data data1 of obtained and expected are not the same.",
):
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1}))
class Foo:
def __init__(self, bar):
self.bar = bar
@pytest.mark.parametrize(
"array", [[np.random.randint(10, 99, 6)] * 6, [Foo(i) for i in range(4)]]
)
def test_non_numeric_data(dataframe_regression, array, no_regen):
data1 = pd.DataFrame()
data1["data1"] = array
with pytest.raises(
AssertionError,
match="Only numeric data is supported on dataframe_regression fixture.\n"
" *Array with type '%s' was given." % (str(data1["data1"].dtype),),
):
dataframe_regression.check(data1)
def test_arrays_with_different_sizes(dataframe_regression, no_regen):
data1 = np.ones(10, dtype=np.float64)
with pytest.raises(
AssertionError, match="Obtained and expected data shape are not the same."
):
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1}))
def test_integer_values_smoke_test(dataframe_regression, no_regen):
data1 = np.ones(11, dtype=int)
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1}))
def test_number_formats(dataframe_regression, no_regen):
data1 = np.array([1.2345678e50, 1.2345678e-50, 0.0])
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1}))
def test_bool_array(dataframe_regression, no_regen):
data1 = np.array([True, True, True], dtype=bool)
with pytest.raises(AssertionError) as excinfo:
dataframe_regression.check(pd.DataFrame.from_dict({"data1": data1}))
obtained_error_msg = str(excinfo.value)
expected = "\n".join(
[
"Values are not sufficiently close.",
"To update values, use --force-regen option.",
]
)
assert expected in obtained_error_msg
expected = "\n".join(
[
"data1:",
" obtained_data1 expected_data1 diff",
"0 True False True",
"1 True False True",
"2 True False True",
]
)
assert expected in obtained_error_msg
def test_arrays_of_same_size(dataframe_regression):
same_size_int_arrays = {
"hello": np.zeros((1,), dtype=int),
"world": np.zeros((1,), dtype=int),
}
dataframe_regression.check(pd.DataFrame.from_dict(same_size_int_arrays))
def test_string_array(dataframe_regression):
data1 = {"potato": ["delicious", "nutritive", "yummy"]}
dataframe_regression.check(pd.DataFrame.from_dict(data1))
data1 = {"potato": ["delicious", "nutritive", "yikes"]}
with pytest.raises(AssertionError) as excinfo:
dataframe_regression.check(pd.DataFrame.from_dict(data1))
obtained_error_msg = str(excinfo.value)
assert "Values are not sufficiently close." in obtained_error_msg
assert "To update values, use --force-regen option." in obtained_error_msg
assert "2 yikes yummy ?" in obtained_error_msg
assert (
"WARNING: diffs for this kind of data type cannot be computed"
in obtained_error_msg
)
def test_non_pandas_dataframe(dataframe_regression):
data = np.ones(shape=(10, 10))
with pytest.raises(
AssertionError,
match="Only pandas DataFrames are supported on dataframe_regression fixture.\n"
" *Object with type '%s' was given." % (str(type(data)),),
):
dataframe_regression.check(data)
def test_dataframe_with_empty_strings(dataframe_regression):
df = pd.DataFrame.from_records(
[
{"a": "a", "b": "b"},
{"a": "a1", "b": ""},
]
)
dataframe_regression.check(df)
| |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import tinctest
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from gppylib.commands.base import REMOTE
from gppylib.commands.gp import GpLogFilter
from gppylib.gparray import GpArray
from gppylib.db.dbconn import DbURL, connect
_DEFAULT_OUT_FILE = '/tmp/cluster.logs'
_DEFAULT_USER = os.environ.get('USER')
_DEFAULT_PORT = int(os.environ.get('PGPORT', 5432))
class GpLogException(Exception):
pass
class GpLog(object):
"""
This class lets users perform operations on logs from the cluster
"""
@staticmethod
def gather_log(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE,
dbname=_DEFAULT_USER, host='localhost', port=_DEFAULT_PORT,
user=_DEFAULT_USER,
errors_only=False, master_only=False):
"""
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type out_file: string
@param out_file: File to which the gathered logs should be written to.Defaults to /tmp/cluster.logs
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@type errors_only: boolean
@param errors_only: When set to true, gathers only errors from logs.Defaults to False.
@type master_only: boolean
@param master_only: When set to true, gathers logs only from the master host.
"""
try:
# TODO - When the cluster is down or this fails,
# no exception is thrown from run_sql_command
GpLog._gather_log_from_gp_toolkit(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
except Exception, e:
tinctest.logger.exception("Gather log failed: %s" %e)
raise GpLogException("Gathering log failed. Make sure you can connect to the cluster.")
# TODO - use this as a backup if gp toolkit fails
"""
GpLog._gather_log_from_gp_log_filter(start_time=start_time,
end_time=end_time,
out_file=out_file,
host=host,
port=port,
user=user,
dbname=dbname,
errors_only=errors_only,
master_only=master_only)
"""
@staticmethod
def check_log_for_errors(start_time, end_time=None, host='localhost',
user=_DEFAULT_USER, port=_DEFAULT_PORT,
dbname=_DEFAULT_USER):
"""
Check logs in the given duration for any error messages.
Returns True / False based on whether errors were found in the logs.
@type start_time: date
@param start_time: Start time of the duration for which logs should be gathered.
@type end_time: date
@param end_time: End time of the duration for which logs should be gathered.
@type host: string
@param host: Host name for the connection.Defaults to localhost
@type port: integer
@param port: Port number for the connection to the cluster. Defaults to environment variable PGPORT.
@type user: string
@param user: Username for the connection to the cluster. Defaults to the current user.
@type dbname: string
@param dbname: Database name to use for the connection to the cluster. Defaults to the current user.
@rtype: boolean
@return: Returns True if there are errors found in the log in the given duration, False otherwise.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("checking log from %s to %s" % (format_start_time, format_end_time))
sql_cmd = "select logseverity,count(logseverity) from gp_toolkit.gp_log_database " + \
"where (logseverity=\'FATAL\' or logseverity=\'ERROR\' or logseverity='PANIC') " + \
"and (logtime >=\'%s\' and logtime <= \'%s\') group by logseverity;" % \
(format_start_time, format_end_time)
try:
result = PSQL.run_sql_command(sql_cmd, dbname=dbname, host=host, port=port, username=user, flags='-a -x')
if "RECORD" in result:
return True
except Exception, e:
tinctest.logger.exception("Failed while checking logs - %s" %e)
raise GpLogException("Failed while checking logs. Make sure you can connect to the cluster")
return False
@staticmethod
def _test_connection(host='localhost',port=_DEFAULT_PORT, user=_DEFAULT_USER,
dbname=_DEFAULT_USER):
try:
connect(DbURL(hostname=host,
port=port,
dbname=dbname,
username=user))
except Exception, expt:
tinctest.logger.error("Failed to connect to hostname %s, port %s, database %s, as user %s"
% (host, port, dbname, user))
tinctest.logger.exception(expt)
return False
return True
@staticmethod
def _gather_log_from_gp_log_filter(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
The tuples returned are (dbid, hostname, datadir, logdata). sorted by dbid.
Returns True/False based on whether matching log entries were found.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime())
tinctest.logger.info("Collecting log from %s to %s into the file -%s" % (format_start_time,format_end_time, out_file))
array = GpArray.initFromCatalog(DbURL(hostname=host, port=port, username=user, dbname=dbname), True)
log_chunks = []
for seg in array.getDbList():
tinctest.logger.info("Collecting log for segment - %s : %s" %(seg.getSegmentHostName(), seg.getSegmentContentId()))
if master_only and seg.getSegmentContentId() != -1:
continue
cmd = GpLogFilter('collect log chunk',
'\\`ls -rt %s | tail -1\\`' % os.path.join(seg.getSegmentDataDirectory(), 'pg_log', '*.csv'),
start=format_start_time, end=format_end_time,
trouble=errors_only,
ctxt=REMOTE,
remoteHost=seg.getSegmentHostName())
cmd.run()
rc = cmd.get_results().rc
if rc:
tinctest.logger.warning("Failed command execution %s : %s" %(cmd, cmd.get_results().stderr))
continue
log_data = cmd.get_results().stdout
if not log_data:
tinctest.logger.warning("No log data returned for the given time frame.")
else:
log_chunks.append((seg.getSegmentContentId(),
seg.getSegmentHostName(),
seg.getSegmentDataDirectory(),
log_data))
if log_chunks:
tinctest.logger.info("Writing log data to file - %s" %(out_file))
with open(out_file, 'w') as f:
for part in log_chunks:
f.write("-"*70)
f.write("\n DBID %s (%s:%s)\n" % (part[0], part[1], part[2]))
f.write("-"*70)
f.write("\n%s" % part[3])
f.write("\n\n")
@staticmethod
def _gather_log_from_gp_toolkit(start_time, end_time=None, out_file=_DEFAULT_OUT_FILE, host='localhost',
port=_DEFAULT_PORT, user=_DEFAULT_USER, dbname=_DEFAULT_USER, errors_only=False, master_only=False):
"""
This retrieves log messages from all segments that happened within the last
'duration' seconds. The format of start_time and end_time is YYYY-MM-DD [hh:mm[:ss]]
This function gathers logs by querying external tables in gptoolkit. If the cluster is not up and running,
use _gather_log_from_gp_log_filter which uses the utility gplogfilter to gather logs.
"""
format_start_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(start_time))
if end_time:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(end_time))
else:
format_end_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
tinctest.logger.info("Collecting log from %s to %s" % (format_start_time,format_end_time))
sql_cmd = "select * from gp_toolkit.gp_log_database where logtime >=\'%s\' and logtime <= \'%s\';" % \
(format_start_time, format_end_time)
PSQL.run_sql_command(sql_cmd, out_file=out_file, dbname=dbname,
host=host, port=port, username=user, flags='-a -x')
| |
import logging
import logging.handlers
import unittest
import redispatcher
from redispatcher import DebugRedis, Redis, fmtcmd, logcmd, wirecmd
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record): pass
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class Stub(object):
def __init__(self, obj=None, attr=None, returns=[], raises=[]):
self.obj = obj
self.attr = attr
self.unpatched = None
self.called = []
self.returns = returns
self.raises = raises
def __call__(self, *args, **kwargs):
self.called.append((args, kwargs))
if self.raises:
raise self.raises.pop(0)
elif self.returns:
return self.returns.pop(0)
return self.__class__(self.obj, self.attr)
def __getattr__(self, attr):
return self.__class__()
def patch(self, stubs=None):
self.unpatched = getattr(self.obj, self.attr)
setattr(self.obj, self.attr, self)
if stubs is not None:
stubs.append(self)
return self
def unpatch(self):
setattr(self.obj, self.attr, self.unpatched)
self.unpatched = None
class BaseTest(unittest.TestCase):
pass
class TestUtils(BaseTest):
def test_wirecmd_noargs(self):
result = wirecmd("COMMAND", tuple())
self.assertEquals(result, "*1\r\n$7\r\nCOMMAND\r\n")
def test_wirecmd_args(self):
result = wirecmd("COMMAND", ("arg1", "arg2"))
self.assertEquals(result,
"*3\r\n$7\r\nCOMMAND\r\n$4\r\narg1\r\n$4\r\narg2\r\n")
def test_wirecmd_separator(self):
result = wirecmd("COMMAND", ("arg1", "arg2"), separator="!")
self.assertEquals(result,
"*3!$7!COMMAND!$4!arg1!$4!arg2!")
def test_fmtcmd_noargs(self):
result = fmtcmd("COMMAND", tuple())
self.assertEquals(result, "%s")
def test_fmtcmd_args(self):
result = fmtcmd("COMMAND", ("arg1", "arg2"))
self.assertEquals(result, "%s %r %r")
def test_fmtcmd_separator(self):
result = fmtcmd("COMMAND", ("arg1", "arg2"), separator="!")
self.assertEquals(result, "%s!%r!%r")
def tmplog(name="tmp", size=100):
log = logging.getLogger(name)
log.propagate = 0
buffer = logging.handlers.BufferingHandler(size)
log.addHandler(buffer)
log.buffer = buffer.buffer
return log
class TestLogging(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.log = tmplog()
def test_logcmd_explicit_logger(self):
logcmd(None, "COMMAND", ("arg1", "arg2"), log=self.log)
self.assertEqual(len(self.log.buffer), 1)
record = self.log.buffer[0]
self.assertEqual(record.msg, "%s %r %r")
self.assertEqual(record.args, ("COMMAND", "arg1", "arg2"))
def test_logcmd_get_logger(self):
logcmd("tmp", "COMMAND", ("arg1", "arg2"))
self.assertEqual(len(self.log.buffer), 1)
record = self.log.buffer[0]
self.assertEqual(record.msg, "%s %r %r")
self.assertEqual(record.args, ("COMMAND", "arg1", "arg2"))
class TestRedis(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.socket = Stub(redispatcher.socket, "socket").patch()
self.patched = [
Stub(redispatcher.asyncore.dispatcher, "__init__").patch(),
Stub(redispatcher.asyncore.dispatcher, "connect").patch(),
Stub(redispatcher.asyncore.dispatcher, "set_socket").patch(),
self.socket,
]
self.redis = Redis()
self.log = tmplog()
self.log.orig = redispatcher.log
redispatcher.log = self.log
def tearDown(self):
BaseTest.tearDown(self)
for stub in self.patched:
stub.unpatch()
redispatcher.log = self.log.orig
def test_init(self):
redis = Redis()
def test_connect(self):
redis = self.redis
sock = Stub()
redis.connect(sock=sock, data="data", callback="callback")
self.assertEqual(redis.callbacks, [("CONNECT", (), "callback", "data")])
def test_connect_build_sock(self):
redis = self.redis
redis.connect()
self.assertEqual(len(self.socket.called), 1)
def test_do(self):
redis = self.redis
redis.do("callback", "data", "command", "arg1", "arg2")
self.assertEqual(redis.buffer,
"*3\r\n$7\r\ncommand\r\n$4\r\narg1\r\n$4\r\narg2\r\n")
self.assertEqual(redis.callbacks,
[('command', ('arg1', 'arg2'), 'callback', 'data')])
def test_log_silent(self):
redis = self.redis
redis.log("log")
redis.log_info("log_info")
redis.log_send("log_send", ())
redis.log_recv("log_recv")
self.assertEqual(self.log.buffer, [])
class TestRedisWriter(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.redis = Redis()
Stub(self.redis, "send", returns=[0]).patch()
self.redis.buffer = self.buffer = "buffer"
def test_handle_write_none(self):
redis = self.redis
result = redis.handle_write()
self.assertEqual(result, None)
self.assertEqual(redis.buffer, self.buffer)
self.assertEqual(redis.send.called, [((self.buffer,), {})])
def test_handle_write_some(self):
redis = self.redis
sent = 3
Stub(redis, "send", returns=[sent]).patch()
result = redis.handle_write()
self.assertEqual(redis.buffer, self.buffer[sent:])
class TestRedisReader(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.redis = Redis()
Stub(self.redis, "recv").patch()
Stub(self.redis, "reader").patch()
self.redis.callbacks = self.callbacks = [("command", "args", "callback", "data")]
def test_handle_read_incomplete(self):
redis = self.redis
Stub(redis.reader, "gets", returns=[False]).patch()
result = redis.handle_read()
self.assertEqual(result, None)
self.assertEqual(redis.callbacks, self.callbacks)
def test_handle_read_exception(self):
redis = self.redis
error = redispatcher.ProtocolError
Stub(redis.reader, "gets", raises=[error()]).patch()
Stub(redis, "close").patch()
self.assertRaises(error, redis.handle_read)
self.assertEqual(len(redis.close.called), 1)
self.assertEqual(redis.callbacks, self.callbacks)
def test_handle_read_callback(self):
redis = self.redis
Stub(redis.reader, "gets", returns=["reply", False]).patch()
callback = Stub()
redis.callbacks = [("command", "args", callback, "data")]
result = redis.handle_read()
self.assertEqual(redis.callbacks, [])
self.assertEqual(callback.called,
[(('command', 'args', 'data', 'reply'), {})])
class TestDebugRedis(BaseTest):
def setUp(self):
self.redis = DebugRedis()
self.log = tmplog()
self.log.level = logging.DEBUG
self.log.orig = redispatcher.log
redispatcher.log = self.log
self.patched = []
def tearDown(self):
redispatcher.log = self.log.orig
for stub in self.patched:
stub.unpatch()
def test_log(self):
redis = self.redis
message = "message"
result = redis.log(message)
self.assertEqual(result, None)
self.assertEqual(len(self.log.buffer), 1)
record = self.log.buffer[0]
self.assertEqual(record.msg, message)
self.assertEqual(record.args, ())
def test_log_info(self):
redis = self.redis
message = "message"
result = redis.log_info(message)
self.assertEqual(result, None)
self.assertEqual(len(self.log.buffer), 1)
record = self.log.buffer[0]
self.assertEqual(record.msg, message)
self.assertEqual(record.args, ())
def test_log_send(self):
redis = self.redis
logcmd = Stub(redispatcher, "logcmd").patch(self.patched)
result = redis.log_send("command", "args")
self.assertEqual(result, None)
self.assertEqual(logcmd.called,
[(('redispatcher.client.tx', 'command', 'args'), {})])
def test_log_recv(self):
redis = self.redis
getLogger = Stub(logging, "getLogger", returns=[self.log]).patch(self.patched)
message = "reply"
result = redis.log_recv(message)
self.assertEqual(result, None)
self.assertEqual(len(self.log.buffer), 1)
record = self.log.buffer[0]
self.assertEqual(record.msg, "%r")
self.assertEqual(record.args, (message,))
| |
# Copyright 2015 Openstack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The view list module handles creating Jenkins List views.
To create a list view specify ``list`` in the ``view-type`` attribute
to the :ref:`view_list` definition.
:View Parameters:
* **name** (`str`): The name of the view.
* **view-type** (`str`): The type of view.
* **description** (`str`): A description of the view. (default '')
* **filter-executors** (`bool`): Show only executors that can
execute the included views. (default false)
* **filter-queue** (`bool`): Show only included jobs in builder
queue. (default false)
* **job-name** (`list`): List of jobs to be included.
* **job-filters** (`dict`): Job filters to be included. Requires
:jenkins-wiki:`View Job Filters <View+Job+Filters>`
* **most-recent** (`dict`)
:most-recent:
* **max-to-include** (`int`): Maximum number of jobs
to include. (default 0)
* **check-start-time** (`bool`): Check job start
time. (default false)
* **build-duration** (`dict`)
:build-duration:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-duration-type** ('str'): Duration of the
build. (default Latest)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **less-than**: ('bool'): Check build duration
less than or more than. (default True)
* **build-duration-minutes**: ('int'): Build
duration minutes. (default 0)
* **build-trend** (`dict`)
:build-trend:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-trend-type** ('str'): Duration of the
build. (default Latest)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **status**: ('str'): Job status.
(default Completed)
* **job-status** (`dict`)
:job-status:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **unstable** ('bool'): Jobs with status
unstable. (default False)
* **failed** ('bool'): Jobs with status
failed. (default False)
* **aborted** ('bool'): Jobs with status
aborted. (default False)
* **disabled** ('bool'): Jobs with status
disabled. (default False)
* **stable** ('bool'): Jobs with status
stable. (default False)
* **fallback** (`dict`)
:fallback:
* **fallback-type** ('str'): Fallback type to include/exclude
for all jobs in a view, if no jobs have been included by
previous filters. (default REMOVE_ALL_IF_ALL_INCLUDED)
* **build-status** (`dict`)
:build-status:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **never-built** ('bool'): Jobs that are never
built. (default False)
* **building** ('bool'): Jobs that are being
built. (default False)
* **in-build-queue** ('bool'): Jobs that are in
the build queue. (default False)
* **user-relevence** (`dict`)
:user-relevence:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-count** ('str'): Count of builds.
(default AtLeastOne)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **match-user-id** ('bool'): Jobs matching
user-id. (default False)
* **match-user-fullname** ('bool'): Jobs
matching user fullname. (default False)
* **ignore-case** ('bool'): Ignore case.
(default False)
* **ignore-whitespace** ('bool'): Ignore
whitespace. (default False)
* **ignore-non-alphaNumeric** ('bool'): Ignore
non-alphaNumeric. (default False)
* **match-builder** ('bool'): Jobs matching
builder. (default False)
* **match-email** ('bool'): Jobs matching
email. (default False)
* **match-scm-changes** ('bool'): Jobs matching
scm changes. (default False)
* **regex-job** (`dict`)
:regex-job:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **regex-name** ('str'): Regular expression name.
(default '')
* **regex** ('str'): Regular expression. (default '')
* **job-tpye** (`dict`)
:job-type:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **job-type** ('str'): Type of Job.
(default hudson.model.FreeStyleProject)
* **parameter** (`dict`)
:parameter:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **name** ('str'): Job name to match. (default '')
* **value** ('str'): Value to match. (default '')
* **desc** ('str'): Description to match. (default '')
* **use-default-value** ('bool'): Use default value.
(default False)
* **match-builds-in-progress** ('bool'): Match build in
progress. (default False)
* **match-all-builds** ('bool'): Match all builds.
(default False)
* **max-builds-to-match** ('int'): Maximum builds to match.
(default 0)
* **other-views** (`dict`)
:other-views:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **view-name** ('str'): View name.
(default select a view other than this one)
* **scm** (`dict`)
:scm:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **scm-type** ('str'): Type of SCM.
(default hudson.scm.NullSCM)
* **secured-job** (`dict`)
:secured-job:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **user-permissions** (`dict`)
:user-permissions:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **configure** ('bool'): User with configure permissions.
(default false)
* **amount-type**: ('bool'): User with build permissions.
(default false)
* **amount**: ('bool'): User with workspace permissions.
(default false)
* **permission-check**: ('str'): Match user permissions.
(default MustMatchAll)
* **upstream-downstream** (`dict`)
:upstream-downstream:
* **include-upstream** ('bool'): Jobs that match upstream.
(default False)
* **include-downstream** ('bool'): Jobs that match downstream.
(default False)
* **recursive** ('bool'): Jobs that are recursive.
(default False)
* **exclude-originals** ('bool'): Jobs that are originals.
(default False)
* **unclassified** (`dict`)
:unclassified:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **columns** (`list`): List of columns to be shown in view.
* **regex** (`str`): . Regular expression for selecting jobs
(optional)
* **recurse** (`bool`): Recurse in subfolders.(default false)
* **status-filter** (`bool`): Filter job list by enabled/disabled
status. (optional)
Example:
.. literalinclude::
/../../tests/views/fixtures/view_list001.yaml
Example:
.. literalinclude::
/../../tests/views/fixtures/view_list002.yaml
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
import jenkins_jobs.modules.view_jobfilters as view_jobfilters
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
'job': 'hudson.views.JobColumn',
'last-success': 'hudson.views.LastSuccessColumn',
'last-failure': 'hudson.views.LastFailureColumn',
'last-duration': 'hudson.views.LastDurationColumn',
'build-button': 'hudson.views.BuildButtonColumn',
'last-stable': 'hudson.views.LastStableColumn',
'robot-list': 'hudson.plugins.robot.view.RobotListViewColumn',
'find-bugs': 'hudson.plugins.findbugs.FindBugsColumn',
'jacoco': 'hudson.plugins.jacococoveragecolumn.JaCoCoColumn',
'git-branch': 'hudson.plugins.git.GitBranchSpecifierColumn',
'schedule-build':
'org.jenkinsci.plugins.schedulebuild.ScheduleBuildButtonColumn',
'priority-sorter': 'jenkins.advancedqueue.PrioritySorterJobColumn',
'build-filter': 'hudson.views.BuildFilterColumn',
'desc': 'jenkins.branch.DescriptionColumn',
'policy-violations':
'com.sonatype.insight.ci.hudson.QualityColumn '
'plugin="sonatype-clm-ci"',
'member-graph-view':
'com.barchart.jenkins.cascade.GraphViewColumn '
'plugin="maven-release-cascade"',
'extra-tests-total': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>2</testResultFormat>'],
'extra-tests-failed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>3</testResultFormat>'],
'extra-tests-passed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>4</testResultFormat>'],
'extra-tests-skipped': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>5</testResultFormat>'],
'extra-tests-format-0': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>0</testResultFormat>'],
'extra-tests-format-1': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>1</testResultFormat>'],
'extra-build-description': [
['jenkins.plugins.extracolumns.BuildDescriptionColumn',
{'plugin': 'extra-columns'}],
'<columnWidth>3</columnWidth>', '<forceWidth>false</forceWidth>'],
'extra-build-parameters': [
['jenkins.plugins.extracolumns.BuildParametersColumn',
{'plugin': 'extra-columns'}],
'<singlePara>false</singlePara>', '<parameterName/>'],
'extra-last-user-name':
'jenkins.plugins.extracolumns.UserNameColumn'
' plugin="extra-columns"',
'extra-last-output':
'jenkins.plugins.extracolumns.LastBuildConsoleColumn'
' plugin="extra-columns"',
'extra-workspace-link':
'jenkins.plugins.extracolumns.WorkspaceColumn '
'plugin="extra-columns"',
'extra-configure-button':
'jenkins.plugins.extracolumns.ConfigureProjectColumn'
' plugin="extra-columns"',
}
DEFAULT_COLUMNS = ['status', 'weather', 'job', 'last-success', 'last-failure',
'last-duration', 'build-button']
class List(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
root = XML.Element('hudson.model.ListView')
mapping = [
('name', 'name', None),
('description', 'description', ''),
('filter-executors', 'filterExecutors', False),
('filter-queue', 'filterQueue', False),
]
helpers.convert_mapping_to_xml(root, data, mapping, fail_required=True)
XML.SubElement(root, 'properties',
{'class': 'hudson.model.View$PropertyList'})
jn_xml = XML.SubElement(root, 'jobNames')
jobnames = data.get('job-name', None)
XML.SubElement(
jn_xml,
'comparator', {
'class': 'hudson.util.CaseInsensitiveComparator'
}
)
if jobnames is not None:
# Job names must be sorted in the xml
jobnames = sorted(jobnames, key=str.lower)
for jobname in jobnames:
XML.SubElement(jn_xml, 'string').text = str(jobname)
job_filter_xml = XML.SubElement(root, 'jobFilters')
jobfilters = data.get('job-filters', [])
for jobfilter in jobfilters:
filter = getattr(view_jobfilters, jobfilter.replace('-', '_'))
filter(job_filter_xml, jobfilters.get(jobfilter))
c_xml = XML.SubElement(root, 'columns')
columns = data.get('columns', DEFAULT_COLUMNS)
for column in columns:
if isinstance(column, dict):
if 'extra-build-parameter' in column:
p_name = column['extra-build-parameter']
x = XML.SubElement(
c_xml,
'jenkins.plugins.extracolumns.BuildParametersColumn',
plugin='extra-columns'
)
x.append(XML.fromstring(
'<singlePara>true</singlePara>'))
x.append(XML.fromstring(
'<parameterName>%s</parameterName>' % p_name))
else:
if column in COLUMN_DICT:
if isinstance(COLUMN_DICT[column], list):
x = XML.SubElement(c_xml, COLUMN_DICT[column][0][0],
**COLUMN_DICT[column][0][1])
for tag in COLUMN_DICT[column][1:]:
x.append(XML.fromstring(tag))
else:
XML.SubElement(c_xml, COLUMN_DICT[column])
mapping = [
('regex', 'includeRegex', None),
('recurse', 'recurse', False),
('status-filter', 'statusFilter', None),
]
helpers.convert_mapping_to_xml(
root, data, mapping, fail_required=False)
return root
| |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UserProfileResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'credential': 'CredentialResponse',
'image_catalog': 'ImageCatalogShortResponse',
'owner': 'str',
'account': 'str',
'username': 'str',
'ui_properties': 'dict(str, object)'
}
attribute_map = {
'credential': 'credential',
'image_catalog': 'imageCatalog',
'owner': 'owner',
'account': 'account',
'username': 'username',
'ui_properties': 'uiProperties'
}
def __init__(self, credential=None, image_catalog=None, owner=None, account=None, username=None, ui_properties=None):
"""
UserProfileResponse - a model defined in Swagger
"""
self._credential = None
self._image_catalog = None
self._owner = None
self._account = None
self._username = None
self._ui_properties = None
if credential is not None:
self.credential = credential
if image_catalog is not None:
self.image_catalog = image_catalog
if owner is not None:
self.owner = owner
if account is not None:
self.account = account
if username is not None:
self.username = username
if ui_properties is not None:
self.ui_properties = ui_properties
@property
def credential(self):
"""
Gets the credential of this UserProfileResponse.
:return: The credential of this UserProfileResponse.
:rtype: CredentialResponse
"""
return self._credential
@credential.setter
def credential(self, credential):
"""
Sets the credential of this UserProfileResponse.
:param credential: The credential of this UserProfileResponse.
:type: CredentialResponse
"""
self._credential = credential
@property
def image_catalog(self):
"""
Gets the image_catalog of this UserProfileResponse.
:return: The image_catalog of this UserProfileResponse.
:rtype: ImageCatalogShortResponse
"""
return self._image_catalog
@image_catalog.setter
def image_catalog(self, image_catalog):
"""
Sets the image_catalog of this UserProfileResponse.
:param image_catalog: The image_catalog of this UserProfileResponse.
:type: ImageCatalogShortResponse
"""
self._image_catalog = image_catalog
@property
def owner(self):
"""
Gets the owner of this UserProfileResponse.
:return: The owner of this UserProfileResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this UserProfileResponse.
:param owner: The owner of this UserProfileResponse.
:type: str
"""
self._owner = owner
@property
def account(self):
"""
Gets the account of this UserProfileResponse.
:return: The account of this UserProfileResponse.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this UserProfileResponse.
:param account: The account of this UserProfileResponse.
:type: str
"""
self._account = account
@property
def username(self):
"""
Gets the username of this UserProfileResponse.
:return: The username of this UserProfileResponse.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this UserProfileResponse.
:param username: The username of this UserProfileResponse.
:type: str
"""
self._username = username
@property
def ui_properties(self):
"""
Gets the ui_properties of this UserProfileResponse.
:return: The ui_properties of this UserProfileResponse.
:rtype: dict(str, object)
"""
return self._ui_properties
@ui_properties.setter
def ui_properties(self, ui_properties):
"""
Sets the ui_properties of this UserProfileResponse.
:param ui_properties: The ui_properties of this UserProfileResponse.
:type: dict(str, object)
"""
self._ui_properties = ui_properties
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, UserProfileResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Generic SQL database access object.
"""
__all__ = [
"db_prefix",
"DatabaseError",
"AbstractSQLDatabase",
]
import os
try:
import sqlite3 as sqlite
except ImportError:
from pysqlite2 import dbapi2 as sqlite
sqlite.connect(":memory:")
from twext.python.log import Logger
log = Logger()
db_prefix = ".db."
DatabaseError = sqlite.DatabaseError
class AbstractSQLDatabase (object):
"""
A generic SQL database.
"""
def __init__(self, dbpath, persistent, autocommit=False):
"""
@param dbpath: the path where the db file is stored.
@type dbpath: str
@param persistent: C{True} if the data in the DB must be perserved during upgrades,
C{False} if the DB data can be re-created from an external source.
@type persistent: bool
@param autocommit: C{True} if auto-commit mode is desired, C{False} otherwise
@type autocommit: bool
"""
self.dbpath = dbpath
self.persistent = persistent
self.autocommit = autocommit
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.dbpath)
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
raise NotImplementedError
def _db_type(self):
"""
@return: the collection type assigned to this index.
"""
raise NotImplementedError
def _db(self):
"""
Access the underlying database.
@return: a db2 connection object for this index's underlying data store.
"""
if not hasattr(self, "_db_connection"):
db_filename = self.dbpath
try:
if self.autocommit:
self._db_connection = sqlite.connect(db_filename, isolation_level=None)
else:
self._db_connection = sqlite.connect(db_filename)
except DatabaseError:
raise DatabaseError("Unable to open database %s" % (self.dbpath,))
q = self._db_connection.cursor()
#
# Set Journal mode to PERSIST to avoid constant unlink calls
#
q.execute("PRAGMA journal_mode = PERSIST")
#
# Set up the schema
#
try:
# Create CALDAV table if needed
if self._test_schema_table(q):
version, dbtype = self._get_schema_version(q)
if (version != self._db_version()) or (dbtype != self._db_type()):
# Clean-up first
q.close()
q = None
self._db_connection.close()
del(self._db_connection)
if dbtype != self._db_type():
log.error(
"Database {f} has different type ({t1} vs. {t2})",
f=db_filename, t1=dbtype, t2=self._db_type(),
)
# Delete this index and start over
os.remove(db_filename)
return self._db()
if version != self._db_version():
log.error(
"Database {f} has different schema (v.{v1} vs. v.{v2})",
f=db_filename, v1=version, v2=self._db_version(),
)
# Upgrade the DB
return self._db_upgrade(version)
else:
self._db_init(db_filename, q)
self._db_connection.commit()
finally:
if q is not None:
q.close()
return self._db_connection
def _test_schema_table(self, q):
q.execute("""
select (1) from SQLITE_MASTER
where TYPE = 'table' and NAME = 'CALDAV'
""")
return q.fetchone()
def _get_schema_version(self, q):
q.execute(
"""
select VALUE from CALDAV
where KEY = 'SCHEMA_VERSION'
""")
version = q.fetchone()
if version is not None:
version = version[0]
q.execute(
"""
select VALUE from CALDAV
where KEY = 'TYPE'
""")
dbtype = q.fetchone()
if dbtype is not None:
dbtype = dbtype[0]
return version, dbtype
def _db_init(self, db_filename, q):
"""
Initialise the underlying database tables.
@param db_filename: the file name of the index database.
@param q: a database cursor to use.
"""
log.info("Initializing database {f}", f=db_filename)
# We need an exclusive lock here as we are making a big change to the database and we don't
# want other processes to get stomped on or stomp on us.
old_isolation = self._db_connection.isolation_level
self._db_connection.isolation_level = None
try:
q.execute("begin exclusive transaction")
# We re-check whether the schema table is present again AFTER we've got an exclusive
# lock as some other server process may have snuck in and already created it
# before we got the lock, or whilst we were waiting for it.
if not self._test_schema_table(q):
self._db_init_schema_table(q)
self._db_init_data_tables(q)
self._db_recreate(False)
q.execute("commit")
except DatabaseError:
pass
self._db_connection.isolation_level = old_isolation
def _db_init_schema_table(self, q):
"""
Initialise the underlying database tables.
@param db_filename: the file name of the index database.
@param q: a database cursor to use.
"""
#
# CALDAV table keeps track of our schema version and type
#
q.execute(
"""
create table CALDAV (
KEY text unique, VALUE text unique
)
"""
)
q.execute(
"""
insert into CALDAV (KEY, VALUE)
values ('SCHEMA_VERSION', :1)
""", [self._db_version()]
)
q.execute(
"""
insert into CALDAV (KEY, VALUE)
values ('TYPE', :1)
""", [self._db_type()]
)
def _db_init_data_tables(self, q):
"""
Initialise the underlying database tables.
@param db_filename: the file name of the index database.
@param q: a database cursor to use.
"""
raise NotImplementedError
def _db_recreate(self, do_commit=True):
"""
Recreate the database tables.
"""
# Always commit at the end of this method as we have an open transaction from previous methods.
if do_commit:
self._db_commit()
def _db_can_upgrade(self, old_version):
return self.persistent
def _db_upgrade(self, old_version):
"""
Upgrade the database tables.
"""
if self._db_can_upgrade(old_version):
self._db_connection = sqlite.connect(self.dbpath, isolation_level=None)
q = self._db_connection.cursor()
q.execute("begin exclusive transaction")
# We re-check whether the schema version again AFTER we've got an exclusive
# lock as some other server process may have snuck in and already upgraded it
# before we got the lock, or whilst we were waiting for it.
version, _ignore_dbtype = self._get_schema_version(q)
if version != self._db_version():
self._db_upgrade_data_tables(q, old_version)
self._db_upgrade_schema(q)
q.execute("commit")
self._db_close()
else:
# Non-persistent DB's by default can be removed and re-created. However, for simple
# DB upgrades they SHOULD override this method and handle those for better performance.
os.remove(self.dbpath)
return self._db()
def _db_upgrade_data_tables(self, q, old_version):
"""
Upgrade the data from an older version of the DB.
"""
# Persistent DB's MUST override this method and do a proper upgrade. Their data
# cannot be thrown away.
raise NotImplementedError("Persistent databases MUST support an upgrade method.")
def _db_upgrade_schema(self, q):
"""
Upgrade the stored schema version to the current one.
"""
q.execute(
"""
insert or replace into CALDAV (KEY, VALUE)
values ('SCHEMA_VERSION', :1)
""", [self._db_version()]
)
def _db_close(self):
if hasattr(self, "_db_connection"):
self._db_connection.close()
del self._db_connection
def _db_values_for_sql(self, sql, *query_params):
"""
Execute an SQL query and obtain the resulting values.
@param sql: the SQL query to execute.
@param query_params: parameters to C{sql}.
@return: an interable of values in the first column of each row
resulting from executing C{sql} with C{query_params}.
@raise AssertionError: if the query yields multiple columns.
"""
return (row[0] for row in self._db_execute(sql, *query_params))
def _db_value_for_sql(self, sql, *query_params):
"""
Execute an SQL query and obtain a single value.
@param sql: the SQL query to execute.
@param query_params: parameters to C{sql}.
@return: the value resulting from the executing C{sql} with
C{query_params}.
@raise AssertionError: if the query yields multiple rows or columns.
"""
value = None
for row in self._db_values_for_sql(sql, *query_params):
assert value is None, "Multiple values in DB for %s %s" % (sql, query_params)
value = row
return value
def _db_execute(self, sql, *query_params):
"""
Execute an SQL query and obtain the resulting values.
@param sql: the SQL query to execute.
@param query_params: parameters to C{sql}.
@return: an interable of tuples for each row resulting from executing
C{sql} with C{query_params}.
"""
q = self._db().cursor()
try:
q.execute(sql, query_params)
self.lastrowid = q.lastrowid
return q.fetchall()
except DatabaseError:
log.error("Exception while executing SQL on DB {s!r}: {q!r} {p!r}", s=self, q=sql, p=query_params)
raise
finally:
q.close()
def _db_commit(self):
self._db_connection.commit()
def _db_rollback(self):
self._db_connection.rollback()
| |
from fluff.filters import ANDFilter, ORFilter, NOTFilter, CustomFilter
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.groups.models import Group
from corehq.fluff.calculators.xform import IN_MULTISELECT, IntegerPropertyReference
from couchforms.models import XFormInstance
from .filters import ALL_CVSU_GROUP
import fluff
from corehq.fluff.calculators import xform as xcalc
from corehq.fluff.calculators import case as ccalc
from fluff.models import SimpleCalculator
REPORT_INCIDENT_XMLNS = 'http://openrosa.org/formdesigner/A12E46B1-7ED8-4DE3-B7BB-358219CC6994'
FOLLOWUP_FORM_XMLNS = 'http://openrosa.org/formdesigner/9457DE46-E640-4F6E-AD9A-F9AC9FDA35E6'
IGA_FORM_XMLNS = 'http://openrosa.org/formdesigner/B4BAF20B-4337-409D-A446-FD4A0C8D5A9A'
OUTREACH_FORM_XMLNS = 'http://openrosa.org/formdesigner/B5C415BB-456B-49BE-A7AF-C5E7C9669E34'
get_user_id = lambda form: form.metadata.userID
@memoized
def get_group_id(form):
user_id = get_user_id(form)
groups = Group.by_user(user_id, wrap=False)
for g in groups:
if g != ALL_CVSU_GROUP:
return g
def date_reported(form):
return form.form.get('date_reported', form.received_on)
def date_provided(form):
return form.form.get('mediation_provided_date', form.received_on)
def date_mediated(form):
date = form.form.get('mediation_date', form.received_on)
return date or form.received_on # some forms empty strings
def date_reported_mediated(form):
if form.xmlns == FOLLOWUP_FORM_XMLNS:
return date_mediated(form)
else:
return date_reported(form)
def date_reported_provided_mediated(form):
if form.xmlns == FOLLOWUP_FORM_XMLNS:
return date_mediated(form)
elif ORFilter([filter_action('immediate_referral'), filter_action('actions_other')]).filter(form):
return date_reported(form)
else:
return date_provided(form)
def get_age(form):
return form.form.get('victim_age', None)
def get_sex(form):
return form.form.get('victim_sex', None)
@memoized
def filter_action(action):
return xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
property_path='form/actions_to_resolve_case',
property_value=action
)
@memoized
def filter_service(service):
return xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
operator=IN_MULTISELECT,
property_path='form/immediate_services',
property_value=service
)
@memoized
def filter_outcome(outcome, xmlns=None):
if xmlns:
return xcalc.FormPropertyFilter(
xmlns=xmlns,
property_path='form/mediation_outcome',
property_value=outcome
)
else:
return ORFilter([
xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
property_path='form/mediation_outcome',
property_value=outcome
),
xcalc.FormPropertyFilter(
xmlns=FOLLOWUP_FORM_XMLNS,
property_path='form/mediation_outcome',
property_value=outcome
)
])
@memoized
def filter_immediate_referral_org(org):
return xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
operator=IN_MULTISELECT,
property_path='form/immediate_referral_organisation',
property_value=org
)
@memoized
def filter_referral_org(org):
return ORFilter([
xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
operator=IN_MULTISELECT,
property_path='form/mediation_referral',
property_value=org
),
xcalc.FormPropertyFilter(
xmlns=FOLLOWUP_FORM_XMLNS,
operator=IN_MULTISELECT,
property_path='form/mediation_referral',
property_value=org
)
])
@memoized
def filter_abuse(category):
return xcalc.FormPropertyFilter(
xmlns=REPORT_INCIDENT_XMLNS,
operator=IN_MULTISELECT,
property_path='form/abuse_category',
property_value=category
)
class UnicefMalawiFluff(fluff.IndicatorDocument):
document_class = XFormInstance
document_filter = ANDFilter([
NOTFilter(xcalc.FormPropertyFilter(xmlns='http://openrosa.org/user-registration')),
NOTFilter(xcalc.FormPropertyFilter(xmlns='http://openrosa.org/user/registration')),
NOTFilter(xcalc.FormPropertyFilter(xmlns='http://code.javarosa.org/devicereport')),
CustomFilter(lambda f: get_user_id(f) != 'demo_user'),
CustomFilter(lambda f: get_group_id(f)),
])
domains = ('cvsulive',)
group_by = (
'domain',
fluff.AttributeGetter('user_id', get_user_id),
fluff.AttributeGetter('group_id', get_group_id),
fluff.AttributeGetter('age', get_age),
fluff.AttributeGetter('sex', get_sex),
)
group_by_type_map = {
'age': fluff.TYPE_INTEGER
}
# ---------------------------------------------------------------------
# incident resolution
# ---------------------------------------------------------------------
resolution_resolved_at_cvsu = SimpleCalculator(
date_provider=date_provided,
filter=ORFilter([
ANDFilter([filter_action('mediation_provided'), filter_outcome('resolved', REPORT_INCIDENT_XMLNS)]),
filter_outcome('resolved', FOLLOWUP_FORM_XMLNS)
])
)
resolution_unresolved = SimpleCalculator(
date_provider=date_provided,
filter=ORFilter([
ANDFilter([filter_action('mediation_provided'), filter_outcome('unresolved', REPORT_INCIDENT_XMLNS)]),
filter_outcome('unresolved', FOLLOWUP_FORM_XMLNS)
])
)
resolution_case_withdrawn = SimpleCalculator(
date_provider=date_provided,
filter=ORFilter([
ANDFilter([filter_action('mediation_provided'), filter_outcome('case_withdrawn', REPORT_INCIDENT_XMLNS)]),
filter_outcome('case_withdrawn', FOLLOWUP_FORM_XMLNS)
])
)
resolution_referred_ta = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('ta')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('ta')])
])
)
resolution_referral_ta_court = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('ta_court')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('ta_court')])
])
)
resolution_referral_police = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('police')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('med_ref_police')])
])
)
resolution_referral_social_welfare = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('social_welfare')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('med_ref_social_welfare')])
])
)
resolution_referral_ngo = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('ngo')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('med_ref_ngo')])
])
)
resolution_referral_other = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
ANDFilter([filter_action('immediate_referral'), filter_immediate_referral_org('referral_other')]),
ANDFilter([filter_outcome('mediation_outcome_referred'), filter_referral_org('med_ref_other')])
])
)
resolution_other = SimpleCalculator(
date_provider=date_reported_provided_mediated,
filter=ORFilter([
filter_action('actions_other'),
filter_outcome('other_mediation_outcome', REPORT_INCIDENT_XMLNS),
filter_outcome('other', FOLLOWUP_FORM_XMLNS)
]),
)
resolution_total = xcalc.or_calc([
resolution_resolved_at_cvsu,
resolution_referred_ta,
resolution_referral_ta_court,
resolution_referral_police,
resolution_referral_social_welfare,
resolution_referral_ngo,
resolution_referral_other,
resolution_unresolved,
resolution_other],
date_provider=date_reported_provided_mediated,
)
# ---------------------------------------------------------------------
# services
# ---------------------------------------------------------------------
service_referral = SimpleCalculator(
date_provider=date_reported_mediated,
filter=ORFilter([
filter_action('immediate_referral'),
filter_service('referral_hostpital'),
filter_outcome('mediation_outcome_referred')
])
)
service_mediation = SimpleCalculator(
date_provider=date_reported,
filter=ORFilter([filter_action('mediation_scheduled'), filter_action('mediation_provided')])
)
service_counselling = SimpleCalculator(
date_provider=date_reported,
filter=ORFilter([filter_service('counselling'), filter_service('couselling')])
)
service_psychosocial_support = SimpleCalculator(
date_provider=date_reported,
filter=filter_service('psychosocial_support')
)
service_first_aid = SimpleCalculator(
date_provider=date_reported,
filter=filter_service('first_aid')
)
service_shelter = SimpleCalculator(
date_provider=date_reported,
filter=filter_service('shelter')
)
service_other = SimpleCalculator(
date_provider=date_reported,
filter=ORFilter([filter_action('actions_other'), filter_service('services_other')])
)
service_total = xcalc.or_calc([
service_referral,
service_mediation,
service_counselling,
service_psychosocial_support,
service_first_aid,
service_shelter,
service_other],
date_provider=date_reported_mediated,
)
# ---------------------------------------------------------------------
# outreach
# ---------------------------------------------------------------------
incidents = SimpleCalculator(
date_provider=date_reported,
filter=xcalc.FormPropertyFilter(xmlns=REPORT_INCIDENT_XMLNS)
)
outreach = SimpleCalculator(
date_provider=lambda form: form.form.get('date', form.received_on),
filter=xcalc.FormPropertyFilter(xmlns=OUTREACH_FORM_XMLNS)
)
iga = SimpleCalculator(
date_provider=lambda form: form.form.get('start_date', form.received_on),
filter=xcalc.FormPropertyFilter(xmlns=IGA_FORM_XMLNS)
)
# ---------------------------------------------------------------------
# abuse
# ---------------------------------------------------------------------
abuse_children_in_household = SimpleCalculator(
date_provider=date_reported,
filter=xcalc.FormPropertyFilter(xmlns=REPORT_INCIDENT_XMLNS),
indicator_calculator=IntegerPropertyReference(
'form/nr_children_in_household', transform=lambda x: 0 if x == 999 else x) # unknown value = 999
)
abuse_children_abused = SimpleCalculator(
date_provider=date_reported,
filter=xcalc.FormPropertyFilter(xmlns=REPORT_INCIDENT_XMLNS),
indicator_calculator=IntegerPropertyReference(
'form/no_children_abused', transform=lambda x: 0 if x == 999 else x) # unknown value = 999
)
abuse_category_physical = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('physical')
)
abuse_category_sexual = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('sexual')
)
abuse_category_psychological = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('psychological')
)
abuse_category_exploitation = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('exploitation')
)
abuse_category_neglect = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('neglect')
)
abuse_category_other = SimpleCalculator(
date_provider=date_reported,
filter=filter_abuse('abuse_other')
)
abuse_category_total = xcalc.or_calc([
abuse_category_physical,
abuse_category_sexual,
abuse_category_psychological,
abuse_category_exploitation,
abuse_category_neglect,
abuse_category_other],
date_provider=date_reported
)
class Meta:
app_label = 'cvsu'
def case_date_reported(case):
return case.date_reported
def filter_case_outcome(outcome):
return ccalc.CasePropertyFilter(
type='victim',
property_name='mediation_outcome',
property_value=outcome
)
UnicefMalawiFluffPillow = UnicefMalawiFluff.pillow()
| |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for test_dispatcher.py."""
# pylint: disable=R0201
# pylint: disable=W0212
import os
import sys
import unittest
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.utils import watchdog_timer
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_collection
from pylib.base import test_dispatcher
sys.path.append(
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=import-error
class TestException(Exception):
pass
def _MockDevice(serial):
d = mock.MagicMock(spec=device_utils.DeviceUtils)
d.__str__.return_value = serial
d.adb = mock.MagicMock(spec=adb_wrapper.AdbWrapper)
d.adb.GetDeviceSerial = mock.MagicMock(return_value=serial)
d.IsOnline = mock.MagicMock(return_value=True)
return d
class MockRunner(object):
"""A mock TestRunner."""
def __init__(self, device=None, shard_index=0):
self.device = device or _MockDevice('0')
self.device_serial = self.device.adb.GetDeviceSerial()
self.shard_index = shard_index
self.setups = 0
self.teardowns = 0
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.PASS))
return (results, None)
def SetUp(self):
self.setups += 1
def TearDown(self):
self.teardowns += 1
class MockRunnerFail(MockRunner):
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.FAIL))
return (results, test)
class MockRunnerFailTwice(MockRunner):
def __init__(self, device=None, shard_index=0):
super(MockRunnerFailTwice, self).__init__(device, shard_index)
self._fails = 0
def RunTest(self, test):
self._fails += 1
results = base_test_result.TestRunResults()
if self._fails <= 2:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.FAIL))
return (results, test)
else:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.PASS))
return (results, None)
class MockRunnerException(MockRunner):
def RunTest(self, test):
raise TestException
class TestFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunTestsFromQueue."""
@staticmethod
def _RunTests(mock_runner, tests):
results = []
tests = test_collection.TestCollection(
[test_dispatcher._Test(t) for t in tests])
test_dispatcher._RunTestsFromQueue(mock_runner, tests, results,
watchdog_timer.WatchdogTimer(None), 2)
run_results = base_test_result.TestRunResults()
for r in results:
run_results.AddTestRunResults(r)
return run_results
def testRunTestsFromQueue(self):
results = TestFunctions._RunTests(MockRunner(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testRunTestsFromQueueRetry(self):
results = TestFunctions._RunTests(MockRunnerFail(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 2)
def testRunTestsFromQueueFailTwice(self):
results = TestFunctions._RunTests(MockRunnerFailTwice(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testSetUp(self):
runners = []
counter = test_dispatcher._ThreadSafeCounter()
test_dispatcher._SetUp(MockRunner, _MockDevice('0'), runners, counter)
self.assertEqual(len(runners), 1)
self.assertEqual(runners[0].setups, 1)
def testThreadSafeCounter(self):
counter = test_dispatcher._ThreadSafeCounter()
for i in xrange(5):
self.assertEqual(counter.GetAndIncrement(), i)
def testApplyMaxPerRun(self):
self.assertEqual(
['A:B', 'C:D', 'E', 'F:G', 'H:I'],
test_dispatcher.ApplyMaxPerRun(['A:B', 'C:D:E', 'F:G:H:I'], 2))
class TestThreadGroupFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunAllTests and test_dispatcher._CreateRunners."""
def setUp(self):
self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shared_test_collection = test_collection.TestCollection(
[test_dispatcher._Test(t) for t in self.tests])
self.test_collection_factory = lambda: shared_test_collection
def testCreate(self):
runners = test_dispatcher._CreateRunners(
MockRunner, [_MockDevice('0'), _MockDevice('1')])
for runner in runners:
self.assertEqual(runner.setups, 1)
self.assertEqual(set([r.device_serial for r in runners]),
set(['0', '1']))
self.assertEqual(set([r.shard_index for r in runners]),
set([0, 1]))
def testRun(self):
runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))]
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetPass()), len(self.tests))
self.assertEqual(exit_code, 0)
def testTearDown(self):
runners = [MockRunner(_MockDevice('0')), MockRunner(_MockDevice('1'))]
test_dispatcher._TearDownRunners(runners)
for runner in runners:
self.assertEqual(runner.teardowns, 1)
def testRetry(self):
runners = test_dispatcher._CreateRunners(
MockRunnerFail, [_MockDevice('0'), _MockDevice('1')])
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetFail()), len(self.tests))
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testReraise(self):
runners = test_dispatcher._CreateRunners(
MockRunnerException, [_MockDevice('0'), _MockDevice('1')])
with self.assertRaises(TestException):
test_dispatcher._RunAllTests(runners, self.test_collection_factory, 0)
class TestShard(unittest.TestCase):
"""Tests test_dispatcher.RunTests with sharding."""
@staticmethod
def _RunShard(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')],
shard=True)
def testShard(self):
results, exit_code = TestShard._RunShard(MockRunner)
self.assertEqual(len(results.GetPass()), 3)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestShard._RunShard(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 3)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=True)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
class TestReplicate(unittest.TestCase):
"""Tests test_dispatcher.RunTests with replication."""
@staticmethod
def _RunReplicate(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, [_MockDevice('0'), _MockDevice('1')],
shard=False)
def testReplicate(self):
results, exit_code = TestReplicate._RunReplicate(MockRunner)
# We expect 6 results since each test should have been run on every device
self.assertEqual(len(results.GetPass()), 6)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestReplicate._RunReplicate(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 6)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, [_MockDevice('0'), _MockDevice('1')], shard=False)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
if __name__ == '__main__':
unittest.main()
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_configured_feature_column_info1014_all_of
except ImportError:
bt_configured_feature_column_info1014_all_of = sys.modules[
"onshape_client.oas.models.bt_configured_feature_column_info1014_all_of"
]
try:
from onshape_client.oas.models import btm_parameter_reference_blob3281
except ImportError:
btm_parameter_reference_blob3281 = sys.modules[
"onshape_client.oas.models.btm_parameter_reference_blob3281"
]
class BTMParameterReferenceImage2014(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"parameter_id": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"feature_script_type": (str,), # noqa: E501
"namespace": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"node_id": "nodeId", # noqa: E501
"parameter_id": "parameterId", # noqa: E501
"element_id": "elementId", # noqa: E501
"feature_script_type": "featureScriptType", # noqa: E501
"namespace": "namespace", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btm_parameter_reference_image2014.BTMParameterReferenceImage2014 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
parameter_id (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
feature_script_type (str): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_configured_feature_column_info1014_all_of.BTConfiguredFeatureColumnInfo1014AllOf,
btm_parameter_reference_blob3281.BTMParameterReferenceBlob3281,
],
"oneOf": [],
}
| |
# sql/visitors.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(object, metaclass=VisitableType):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
| |
from unittest.mock import call
import pytest
from proxybroker import Proxy
from proxybroker.errors import BadResponseError, BadStatusError
from proxybroker.negotiators import _CONNECT_request
from .utils import future_iter
@pytest.fixture
def proxy(mocker):
proxy = Proxy('127.0.0.1', '80', timeout=0.1)
mocker.patch.multiple(
proxy, send=mocker.DEFAULT, recv=mocker.DEFAULT, connect=mocker.DEFAULT
)
yield proxy
mocker.stopall()
@pytest.mark.parametrize(
'ngtr,check_anon_lvl,use_full_path',
[
('SOCKS5', False, False),
('SOCKS4', False, False),
('CONNECT:80', False, False),
('CONNECT:25', False, False),
('HTTPS', False, False),
('HTTP', True, True),
],
)
def test_base_attrs(proxy, ngtr, check_anon_lvl, use_full_path):
proxy.ngtr = ngtr
assert proxy.ngtr.name == ngtr
assert proxy.ngtr.check_anon_lvl is check_anon_lvl
assert proxy.ngtr.use_full_path is use_full_path
@pytest.mark.asyncio
@pytest.mark.parametrize(
'ngtr,port,recv,expected',
[
(
'SOCKS5',
80,
future_iter(
b'\x05\x00', b'\x05\x00\x00\x01\xc0\xa8\x00\x18\xce\xdf'
),
[
call(b'\x05\x01\x00'),
call(b'\x05\x01\x00\x01\x7f\x00\x00\x01\x00P'),
],
),
(
'SOCKS5',
443,
future_iter(b'\x05\x00', b'\x05\x00'),
[
call(b'\x05\x01\x00'),
call(b'\x05\x01\x00\x01\x7f\x00\x00\x01\x01\xbb'),
],
), # noqa
(
'SOCKS4',
80,
future_iter(b'\x00Z\x00\x00\x00\x00\x00\x00'),
[call(b'\x04\x01\x00P\x7f\x00\x00\x01\x00')],
),
(
'SOCKS4',
443,
future_iter(b'\x00Z\x00\x00\x00\x00\x00\x00'),
[call(b'\x04\x01\x01\xbb\x7f\x00\x00\x01\x00')],
),
],
)
async def test_socks_negotiate(proxy, ngtr, port, recv, expected):
proxy.ngtr = ngtr
proxy.send.side_effect = future_iter(None, None)
proxy.recv.side_effect = recv
await proxy.ngtr.negotiate(ip='127.0.0.1', port=port)
last_msg = proxy.get_log()[-1][1]
assert last_msg == 'Request is granted'
assert proxy.send.call_args_list == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
'ngtr,recv,expected',
[
# wrong response:
('SOCKS5', future_iter(b'\x05\xff'), [call(b'\x05\x01\x00')]),
(
'SOCKS4',
future_iter(b'HTTP/1.1 400 Bad Request'),
[call(b'\x04\x01\x00P\x7f\x00\x00\x01\x00')],
), # noqa
# failed to connect:
(
'SOCKS5',
future_iter(b'\x05\x00', b'\x05\x05'),
[
call(b'\x05\x01\x00'),
call(b'\x05\x01\x00\x01\x7f\x00\x00\x01\x00P'),
],
), # noqa
(
'SOCKS4',
future_iter(b'\x00['),
[call(b'\x04\x01\x00P\x7f\x00\x00\x01\x00')],
), # noqa
],
)
async def test_socks_negotiate_error(proxy, ngtr, recv, expected):
proxy.ngtr = ngtr
proxy.send.side_effect = future_iter(None, None)
proxy.recv.side_effect = recv
with pytest.raises(BadResponseError):
await proxy.ngtr.negotiate(ip='127.0.0.1')
assert proxy.send.call_args_list == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
'ngtr,port,recv',
[
(
'CONNECT:80',
80,
future_iter(b'HTTP/1.1 200 Connection established\r\n\r\n'),
), # noqa
(
'CONNECT:25',
25,
future_iter(
b'HTTP/1.1 200 Connection established\r\n\r\n',
b'220 smtp2.test.com',
),
), # noqa
(
'HTTPS',
443,
future_iter(b'HTTP/1.1 200 Connection established\r\n\r\n'),
), # noqa
],
)
async def test_connect_negotiate(proxy, ngtr, port, recv):
host = 'test.com'
proxy.ngtr = ngtr
proxy.send.side_effect = future_iter(None)
proxy.recv.side_effect = recv
proxy.connect.side_effect = future_iter(None)
await proxy.ngtr.negotiate(host=host)
req = _CONNECT_request(host=host, port=port)
assert proxy.send.call_args_list == [call(req)]
@pytest.mark.asyncio
@pytest.mark.parametrize(
'ngtr,recv',
[
('CONNECT:80', future_iter(b'HTTP/1.1 400 Bad Request\r\n\r\n')),
(
'CONNECT:80',
future_iter(
b'<html>\r\n<head><title>400 Bad Request</title></head>\r\n'
),
), # noqa
('CONNECT:25', future_iter(b'HTTP/1.1 400 Bad Request\r\n\r\n')),
(
'CONNECT:25',
future_iter(
b'<html>\r\n<head><title>400 Bad Request</title></head>\r\n'
),
), # noqa
('CONNECT:25', future_iter(b'HTTP/1.1 200 OK\r\n\r\n', b'')),
('HTTPS', future_iter(b'HTTP/1.1 400 Bad Request\r\n\r\n')),
(
'HTTPS',
future_iter(
b'<html>\r\n<head><title>400 Bad Request</title></head>\r\n'
),
), # noqa
],
)
async def test_connect_negotiate_error(proxy, ngtr, recv):
host = 'test.com'
proxy.ngtr = ngtr
proxy.send.side_effect = future_iter(None)
proxy.recv.side_effect = recv
proxy.connect.side_effect = future_iter(None)
with pytest.raises(BadStatusError):
await proxy.ngtr.negotiate(host=host)
| |
import numpy as np
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.histogram import (
_build_histogram_naive,
_build_histogram,
_build_histogram_no_hessian,
_build_histogram_root_no_hessian,
_build_histogram_root,
_subtract_histograms
)
from sklearn.ensemble._hist_gradient_boosting.types import HISTOGRAM_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.types import X_BINNED_DTYPE
@pytest.mark.parametrize(
'build_func', [_build_histogram_naive, _build_histogram])
def test_build_histogram(build_func):
binned_feature = np.array([0, 2, 0, 1, 2, 0, 2, 1], dtype=X_BINNED_DTYPE)
# Small sample_indices (below unrolling threshold)
ordered_gradients = np.array([0, 1, 3], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2], dtype=G_H_DTYPE)
sample_indices = np.array([0, 2, 3], dtype=np.uint32)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(0, sample_indices, binned_feature, ordered_gradients,
ordered_hessians, hist)
hist = hist[0]
assert_array_equal(hist['count'], [2, 1, 0])
assert_allclose(hist['sum_gradients'], [1, 3, 0])
assert_allclose(hist['sum_hessians'], [2, 2, 0])
# Larger sample_indices (above unrolling threshold)
sample_indices = np.array([0, 2, 3, 6, 7], dtype=np.uint32)
ordered_gradients = np.array([0, 1, 3, 0, 1], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2, 1, 0], dtype=G_H_DTYPE)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(0, sample_indices, binned_feature, ordered_gradients,
ordered_hessians, hist)
hist = hist[0]
assert_array_equal(hist['count'], [2, 2, 1])
assert_allclose(hist['sum_gradients'], [1, 4, 0])
assert_allclose(hist['sum_hessians'], [2, 2, 1])
def test_histogram_sample_order_independence():
# Make sure the order of the samples has no impact on the histogram
# computations
rng = np.random.RandomState(42)
n_sub_samples = 100
n_samples = 1000
n_bins = 256
binned_feature = rng.randint(0, n_bins - 1, size=n_samples,
dtype=X_BINNED_DTYPE)
sample_indices = rng.choice(np.arange(n_samples, dtype=np.uint32),
n_sub_samples, replace=False)
ordered_gradients = rng.randn(n_sub_samples).astype(G_H_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_gc)
ordered_hessians = rng.exponential(size=n_sub_samples).astype(G_H_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_ghc)
permutation = rng.permutation(n_sub_samples)
hist_gc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(0, sample_indices[permutation],
binned_feature, ordered_gradients[permutation],
hist_gc_perm)
hist_ghc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(0, sample_indices[permutation], binned_feature,
ordered_gradients[permutation],
ordered_hessians[permutation], hist_ghc_perm)
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
hist_gc_perm = hist_gc_perm[0]
hist_ghc_perm = hist_ghc_perm[0]
assert_allclose(hist_gc['sum_gradients'], hist_gc_perm['sum_gradients'])
assert_array_equal(hist_gc['count'], hist_gc_perm['count'])
assert_allclose(hist_ghc['sum_gradients'], hist_ghc_perm['sum_gradients'])
assert_allclose(hist_ghc['sum_hessians'], hist_ghc_perm['sum_hessians'])
assert_array_equal(hist_ghc['count'], hist_ghc_perm['count'])
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_unrolled_equivalent_to_naive(constant_hessian):
# Make sure the different unrolled histogram computations give the same
# results as the naive one.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_gc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_naive = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_root_no_hessian(0, binned_feature,
ordered_gradients, hist_gc_root)
_build_histogram_root(0, binned_feature, ordered_gradients,
ordered_hessians, hist_ghc_root)
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_gc)
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_ghc)
_build_histogram_naive(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_naive)
hist_naive = hist_naive[0]
hist_gc_root = hist_gc_root[0]
hist_ghc_root = hist_ghc_root[0]
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
for hist in (hist_gc_root, hist_ghc_root, hist_gc, hist_ghc):
assert_array_equal(hist['count'], hist_naive['count'])
assert_allclose(hist['sum_gradients'], hist_naive['sum_gradients'])
for hist in (hist_ghc_root, hist_ghc):
assert_allclose(hist['sum_hessians'], hist_naive['sum_hessians'])
for hist in (hist_gc_root, hist_gc):
assert_array_equal(hist['sum_hessians'], np.zeros(n_bins))
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_hist_subtraction(constant_hessian):
# Make sure the histogram subtraction trick gives the same result as the
# classical method.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_parent = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices, binned_feature,
ordered_gradients, hist_parent)
else:
_build_histogram(0, sample_indices, binned_feature,
ordered_gradients, ordered_hessians, hist_parent)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
ordered_gradients_left = ordered_gradients[mask]
ordered_hessians_left = ordered_hessians[mask]
hist_left = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices_left,
binned_feature, ordered_gradients_left,
hist_left)
else:
_build_histogram(0, sample_indices_left, binned_feature,
ordered_gradients_left, ordered_hessians_left,
hist_left)
sample_indices_right = sample_indices[~mask]
ordered_gradients_right = ordered_gradients[~mask]
ordered_hessians_right = ordered_hessians[~mask]
hist_right = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(0, sample_indices_right,
binned_feature, ordered_gradients_right,
hist_right)
else:
_build_histogram(0, sample_indices_right, binned_feature,
ordered_gradients_right, ordered_hessians_right,
hist_right)
hist_left_sub = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_right_sub = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_subtract_histograms(0, n_bins, hist_parent, hist_right, hist_left_sub)
_subtract_histograms(0, n_bins, hist_parent, hist_left, hist_right_sub)
for key in ('count', 'sum_hessians', 'sum_gradients'):
assert_allclose(hist_left[key], hist_left_sub[key], rtol=1e-6)
assert_allclose(hist_right[key], hist_right_sub[key], rtol=1e-6)
| |
import sys
import time
import json
import importlib
import requests
import logging
from threading import Lock, Thread
from switchboard.device import RESTDevice
from switchboard.module import SwitchboardModule
from switchboard.utils import load_attribute
logging.getLogger('requests').setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
def prints(fs, val):
logger.info(val)
fs(val)
class EngineError(Exception):
def __init__(self, msg=''):
self.msg = msg
logger.warning(msg)
def __str__(self):
return self.msg
class SwitchboardEngine(object):
def __init__(self, config, ws_ctrl):
# Determines if the SwitchboardEngine logic is running or not
self.running = False
# Set to true if SwitchboardEngine should terminate
self.terminate = False
# The switchboard config object
self.config = config
# Object used to encode and disseminate the consecutive IO state
self._ws_ctrl = ws_ctrl
# Map of client alias -> _ClientInfo object
self.clients = {}
# Map of module name -> _Module object
self.modules = {}
# Map of all the Switchboard devices (name -> device instance)
self.devices = {}
# Lock used to synchronise switchboard with its settings
self.lock = Lock()
# Let the engine know how long since the last cycle
self.prev_cycle_time = 0.0
def init_clients(self):
''' Initialise the switchboard clients according to the config file '''
if not self.config.get('clients'):
return
logger.info("Initialising switchboard clients...")
for alias, client_info in self.config.get('clients').items():
try:
poll_period = client_info['poll_period'] if 'poll_period' in client_info else None
self.add_client(client_info['url'], alias, poll_period)
except Exception as e:
prints(sys.exit, 'Error adding client {}({}): {}'.format(alias, client_info['url'], e))
def init_modules(self):
''' Initialise the switchboard modules according to the config file '''
if self.config.get('modules'):
logger.info("Initialising switchboard modules...")
for module, state in self.config.get('modules').items():
try:
self.upsert_switchboard_module(module, state=='enabled')
except Exception as e:
prints(sys.exit, 'Error adding module {}: {}'.format(module, e))
self.running = self.config.get('running')
def add_client(self, client_url, client_alias, poll_period=None, log_prefix='', print_func=lambda s: None):
polling = ' poll period={}'.format(poll_period) if poll_period else ''
prints(print_func, '{}Adding client {}({}){}'.format(log_prefix, client_alias, client_url, polling))
if client_alias in self.clients:
raise EngineError('Client with alias "{}" already exists'.format(client_alias))
for client in self.clients.values():
if client.url == client_url:
raise EngineError('Client with URL "{}" already exists with'
' alias {}'.format(client_url, client.alias))
self._upsert_client(client_url, client_alias, poll_period, log_prefix, print_func=print_func)
def update_client(self, client_alias, poll_period=None, log_prefix='', print_func=lambda s: None):
if not client_alias in self.clients:
raise EngineError('Unkown client alias "{}"'.format(client_alias))
client_url = self.clients[client_alias].url
prints(print_func, '{}Updating client {}({})'.format(log_prefix, client_alias, client_url))
self._upsert_client(client_url, client_alias, poll_period, log_prefix)
def _upsert_client(self, client_url, client_alias, poll_period, log_prefix, print_func):
''' Insert or update a Switchboard client. This method throws
an exception if any issues are encountered and complies to
the strong exception guarantee (i.e., if an error is raised
SwitchboardEngine will keep running without changing state) '''
# Get the info of all the devices
info_url = client_url + '/devices_info'
try:
req = requests.get(info_url, timeout=3).json()
except Exception as e:
raise EngineError('Unable to connect to {}: {}'.format(info_url, e))
# TODO check formatting for client_url + '/devices_value'
client_devices = req['devices']
prints(print_func, '{}Adding devices:'.format(log_prefix))
new_devices = {}
for device in client_devices:
# Preprend the client name to the device name so that identical
# devices on different clients have different names
name = '{}.{}'.format(client_alias, device['name'])
device['name'] = name
# Check we don't have duplicate devices on this client
if name in new_devices:
raise EngineError('Device "{}" exists twice on client {}'.format(name, client_url))
# Make sure we don't add a device that already exists on a
# different client
if name in self.devices and self.devices[name].client_url != client_url:
clashing_client = self.devices[name].client_url
raise EngineError('Device "{}" already exists for client {}'.format(name, clashing_client))
new_devices[name] = RESTDevice(device, client_url, self.set_remote_device_value)
prints(print_func, '{}\t{}'.format(log_prefix, name))
# In case we are updating a client we need to delete all its
# known 'old' devices and remove it from the clients dict
if client_alias in self.clients:
# TODO cornercase: make sure that devices that no longer
# exist aren't used by modules
self.remove_client(client_alias)
# And now add all the new/updated client information
self.devices.update(new_devices)
self.clients[client_alias] = _ClientInfo(client_url, client_alias, new_devices, poll_period)
# Load the initial values
self._update_devices_values()
# Let ws_ctrl now we may have a new table structure
self._ws_ctrl.reset_table()
def get_modules_using_client(self, client_alias):
''' Returns a list of the modules using the given client '''
client_obj = self.clients[client_alias]
# Figure out which modules are using the IOs from this client
modules_using_client = set()
for mod_name, mod_obj in self.modules.items():
ios = set(mod_obj.module_class.inputs) | set(mod_obj.module_class.outputs)
for device in client_obj.devices:
if device in ios:
modules_using_client.add(mod_name)
break
return modules_using_client
def remove_client(self, client_alias):
''' Remove the given client from the list of polled clients and
delete the devices associated with this client '''
for old_device in self.clients[client_alias].devices:
del self.devices[old_device]
del self.clients[client_alias]
# Let ws_ctrl now we may have a new table structure
self._ws_ctrl.reset_table()
def upsert_switchboard_module(self, module_name, enabled=False):
# Instantiate the module and update data structures
logger.info('Adding module {}'.format(module_name))
swbmodule = load_attribute(module_name)
swbmodule.module_class.enabled = enabled
self.modules[module_name] = swbmodule
# Make sure all the inputs and outputs line up correctly
swbmodule.module_class.create_argument_list(self.devices)
def remove_module(self, module_name):
del self.modules[module_name]
logger.info('Removed module {}'.format(module_name))
def enable_switchboard_module(self, module_name):
if not module_name in self.modules:
raise EngineError('Unknown module {}'.format(module_name))
module_class = self.modules[module_name].module_class
if module_class.error:
logger.warning('Module {} enabled but will not run due to error: {}'.format(
module_name, module_class.error))
module_class.enabled = True
def disable_switchboard_module(self, module_name):
if not module_name in self.modules:
raise EngineError('Unknown module {}'.format(module_name))
self.modules[module_name].module_class.enabled = False
def start(self):
''' Startup the Switchboard thread '''
self._swb_thread = Thread(target=self.run)
self._swb_thread.daemon = True
self._swb_thread.start()
def run(self):
while not self.terminate:
try:
self.switchboard_loop()
except KeyboardInterrupt:
logger.info('Terminating due to keyboard interrupt')
break
def switchboard_loop(self):
''' Execute one loop/tick/clk of the Switchboard engine '''
# Wait to complete the poll period
poll_period = float(self.config.configs['poll_period'])
time_diff = time.time() - self.prev_cycle_time
sleep_time = max(0.0, poll_period - time_diff)
time.sleep(sleep_time)
self.prev_cycle_time = time.time()
# Lock so that cli actions don't interfere
with self.lock:
# Get all the latest values
self._update_devices_values()
# Evaluate the modules if we're running
if self.running:
for module in self.modules.values():
module()
# Update ws_ctrl agents
self._ws_ctrl.take_snapshot(self.clients, self.devices)
def set_remote_device_value(self, device, value):
# Strip the client alias from the device name so that the remote
# client recognises its local device
local_device_name = device.name[device.name.find('.') + 1:]
payload = json.dumps({'name': local_device_name, 'value': str(value)})
try:
r = requests.put(device.client_url + '/device_set', data=payload, timeout=1)
response = r.json()
if 'error' in response:
logger.warning(response['error'])
except Exception as e:
logger.error('Exception "{}" when setting the output value of {} to {}'.format(
e, device.name, value))
def _update_devices_values(self):
''' Get updated values from the devices '''
for client in self.clients.values():
if not client.do_update():
continue
values_url = client.url + '/devices_value'
try:
values = requests.get(values_url, timeout=5)
client.connected = True
except:
client.connected = False
client.on_error('Unable to access client {}'.format(client.url))
continue
try:
values_json = values.json()
except:
client.on_error('Invalid json formatting for client {}'.format(url))
continue
error = self._check_values_json_formatting(client.url, values_json)
if error:
client.on_error(error)
else:
client.on_no_error()
for device_json in values_json['devices']:
self._update_device_value(client.alias, device_json)
def _check_values_json_formatting(self, url, values_json):
''' Check that the request body is correctly formatted '''
if 'error' in values_json:
return 'Error for client {}: {}'.format(url, values_json['error'])
if not 'devices' in values_json:
return 'Error for client {}: no "devices" field'.format(url)
for device_json in values_json['devices']:
if not 'name' in device_json:
return 'Error for client {}: found device with no name'.format(url)
if not 'value' in device_json and not 'error' in device_json:
return 'Error for client {}: device {} has no value or error field'.format(
url, device_json['name'])
def _update_device_value(self, client_alias, device_json):
''' Given a correctly formatted json encoded device value,
update the local device object '''
global_dev_name = '{}.{}'.format(client_alias, device_json['name'])
device = self.devices[global_dev_name]
if 'error' in device_json:
if not device.error:
logger.warning('Device {} has reported an error: {}'.format(
global_dev_name, device_json['error']))
device.error = device_json['error']
elif 'value' in device_json:
if device.error:
logger.warning('Device {} no longer reporting error'.format(
global_dev_name))
device.error = None
device.update_value(device_json['value'])
class _ClientInfo:
def __init__(self, url, alias, devices, poll_period):
self.url = url
self.alias = alias
self.connected = False
self.error = None
self.devices = devices
self.poll_period = poll_period # Poll every iteration if None
self.last_polled = 0.0
logger = logging.getLogger(__name__)
def do_update(self):
''' Determines if we should update this client or not '''
if self.poll_period == None:
return True
if time.time() - self.last_polled > float(self.poll_period):
self.last_polled = time.time()
return True
return False
def on_error(self, msg):
''' Sets the error state of the client and all its associated devices '''
if self.error != msg:
logger.warning('Encountered error for client {}: {}'.format(self.url, msg))
self.error = msg
for device in self.devices.values():
device.error = 'Client error "{}"'.format(msg)
def on_no_error(self):
if self.error:
logger.info('Client {} no longer in error state'.format(self.url))
self.error = None
for device in self.devices.values():
device.error = None
| |
# Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_rh
from metpy.cbook import get_test_data
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram(object):
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = 'Latest Ob Time: {0}\nProbe ID: {1}'.format(self.time, probeid)
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
plt.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
plt.ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
ln2 = self.ax1.plot(self.dates,
wsmax,
'.r',
label='3-sec Wind Speed Max')
plt.setp(self.ax1.get_xticklabels(), visible=True)
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates,
wd,
'.k',
linewidth=0.5,
label='Wind Direction')
plt.ylabel('Wind\nDirection\n(degrees)', multialignment='center')
plt.ylim(0, 360)
plt.yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW'])
lns = ln1 + ln2 + ln3
labs = [l.get_label() for l in lns]
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates,
t,
'r-',
label='Temperature')
plt.fill_between(self.dates,
t,
td,
color='r')
plt.setp(self.ax2.get_xticklabels(), visible=True)
plt.ylabel('Temperature\n(F)', multialignment='center')
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates,
td,
'g-',
label='Dewpoint')
plt.fill_between(self.dates,
td,
plt.ylim()[0],
color='g')
ax_twin = self.ax2.twinx()
# ax_twin.set_ylim(20,90,2)
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lns = ln4 + ln5
labs = [l.get_label() for l in lns]
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lns, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates,
rh,
'g-',
label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
plt.setp(self.ax3.get_xticklabels(), visible=True)
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.fill_between(self.dates, rh, plt.ylim()[0], color='g')
plt.ylabel('Relative Humidity\n(%)', multialignment='center')
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates,
p,
'm',
label='Mean Sea Level Pressure')
plt.ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
plt.ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
plt.fill_between(self.dates, p, plt.ylim()[0], color='m')
plt.gca().xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
plt.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5)
plt.setp(self.ax4.get_xticklabels(), visible=True)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pres = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = dict()
data['wind_speed'] = (np.array(ws) * units('m/s')).to(units('knots'))
data['wind_speed_max'] = (np.array(wsmax) * units('m/s')).to(units('knots'))
data['wind_direction'] = np.array(wd) * units('degrees')
data['dewpoint'] = dewpoint_rh((np.array(temp) * units('degC')).to(units('K')),
np.array(rh) / 100.).to(units('degF'))
data['air_temperature'] = (np.array(temp) * units('degC')).to(units('degF'))
data['mean_slp'] = calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa')
data['relative_humidity'] = np.array(rh)
data['times'] = np.array(date)
fig = plt.figure(figsize=(20, 16))
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
| |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_utils import versionutils
import nova.conf
from nova.db import api as db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
CONF = nova.conf.CONF
# TODO(berrange): Remove NovaObjectDictCompat
@obj_base.NovaObjectRegistry.register
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added in_use_on_host()
# Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'label': fields.StringField(),
'injected': fields.BooleanField(),
'cidr': fields.IPV4NetworkField(nullable=True),
'cidr_v6': fields.IPV6NetworkField(nullable=True),
'multi_host': fields.BooleanField(),
'netmask': fields.IPV4AddressField(nullable=True),
'gateway': fields.IPV4AddressField(nullable=True),
'broadcast': fields.IPV4AddressField(nullable=True),
'netmask_v6': fields.IPV6AddressField(nullable=True),
'gateway_v6': fields.IPV6AddressField(nullable=True),
'bridge': fields.StringField(nullable=True),
'bridge_interface': fields.StringField(nullable=True),
'dns1': fields.IPAddressField(nullable=True),
'dns2': fields.IPAddressField(nullable=True),
'vlan': fields.IntegerField(nullable=True),
'vpn_public_address': fields.IPAddressField(nullable=True),
'vpn_public_port': fields.IntegerField(nullable=True),
'vpn_private_address': fields.IPAddressField(nullable=True),
'dhcp_start': fields.IPV4AddressField(nullable=True),
'rxtx_base': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'priority': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'mtu': fields.IntegerField(nullable=True),
'dhcp_server': fields.IPAddressField(nullable=True),
'enable_dhcp': fields.BooleanField(),
'share_address': fields.BooleanField(),
}
@staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except ValueError:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask)
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
if 'enable_dhcp' in primitive:
del primitive['enable_dhcp']
if 'dhcp_server' in primitive:
del primitive['dhcp_server']
if 'share_address' in primitive:
del primitive['share_address']
@staticmethod
def _from_db_object(context, network, db_network):
for field in network.fields:
db_value = db_network[field]
if field == 'netmask_v6' and db_value is not None:
db_value = network._convert_legacy_ipv6_netmask(db_value)
elif field == 'dhcp_server' and db_value is None:
db_value = db_network['gateway']
network[field] = db_value
network._context = context
network.obj_reset_changes()
return network
@obj_base.remotable_classmethod
def get_by_id(cls, context, network_id, project_only='allow_none'):
db_network = db.network_get(context, network_id,
project_only=project_only)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_uuid(cls, context, network_uuid):
db_network = db.network_get_by_uuid(context, network_uuid)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_cidr(cls, context, cidr):
db_network = db.network_get_by_cidr(context, cidr)
return cls._from_db_object(context, cls(), db_network)
# TODO(stephenfin): This is no longer used and can be removed
@obj_base.remotable_classmethod
def associate(cls, context, project_id, network_id=None, force=False):
db.network_associate(context, project_id, network_id=network_id,
force=force)
# TODO(stephenfin): This is no longer used and can be removed
@obj_base.remotable_classmethod
def disassociate(cls, context, network_id, host=False, project=False):
db.network_disassociate(context, network_id, host, project)
@obj_base.remotable_classmethod
def in_use_on_host(cls, context, network_id, host):
return db.network_in_use_on_host(context, network_id, host)
def _get_primitive_changes(self):
changes = {}
for key, value in self.obj_get_changes().items():
if isinstance(value, netaddr.IPAddress):
changes[key] = str(value)
else:
changes[key] = value
return changes
@obj_base.remotable
def create(self):
updates = self._get_primitive_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
db_network = db.network_create_safe(self._context, updates)
self._from_db_object(self._context, self, db_network)
@obj_base.remotable
def destroy(self):
db.network_delete_safe(self._context, self.id)
self.deleted = True
self.obj_reset_changes(['deleted'])
@obj_base.remotable
def save(self):
context = self._context
updates = self._get_primitive_changes()
if 'netmask_v6' in updates:
# NOTE(danms): For some reason, historical code stores the
# IPv6 netmask as just the CIDR mask length, so convert that
# back here before saving for now.
updates['netmask_v6'] = netaddr.IPNetwork(
updates['netmask_v6']).netmask
set_host = 'host' in updates
if set_host:
db.network_set_host(context, self.id, updates.pop('host'))
if updates:
db_network = db.network_update(context, self.id, updates)
elif set_host:
db_network = db.network_get(context, self.id)
else:
db_network = None
if db_network is not None:
self._from_db_object(context, self, db_network)
@obj_base.NovaObjectRegistry.register
class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_project()
# Version 1.2: Network <= version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Network'),
}
@obj_base.remotable_classmethod
def get_all(cls, context, project_only='allow_none'):
db_networks = db.network_get_all(context, project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_uuids(cls, context, network_uuids, project_only='allow_none'):
db_networks = db.network_get_all_by_uuids(context, network_uuids,
project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_networks = db.network_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id, associate=True):
db_networks = db.project_get_networks(context, project_id,
associate=associate)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
| |
from __future__ import absolute_import, unicode_literals
import inspect
import json
import traceback
import pykka
from mopidy import compat
def private_method(f):
f.private_method = True
return f
class JsonRpcWrapper(object):
"""
Wrap objects and make them accessible through JSON-RPC 2.0 messaging.
This class takes responsibility of communicating with the objects and
processing of JSON-RPC 2.0 messages. The transport of the messages over
HTTP, WebSocket, TCP, or whatever is of no concern to this class.
The wrapper supports exporting the methods of one or more objects. Either
way, the objects must be exported with method name prefixes, called
"mounts".
To expose objects, add them all to the objects mapping. The key in the
mapping is used as the object's mounting point in the exposed API::
jrw = JsonRpcWrapper(objects={
'foo': foo,
'hello': lambda: 'Hello, world!',
})
This will export the Python callables on the left as the JSON-RPC 2.0
method names on the right::
foo.bar() -> foo.bar
foo.baz() -> foo.baz
lambda -> hello
Only the public methods of the mounted objects, or functions/methods
included directly in the mapping, will be exposed.
If a method returns a :class:`pykka.Future`, the future will be completed
and its value unwrapped before the JSON-RPC wrapper returns the response.
For further details on the JSON-RPC 2.0 spec, see
http://www.jsonrpc.org/specification
:param objects: mapping between mounting points and exposed functions or
class instances
:type objects: dict
:param decoders: object builders to be used by :func`json.loads`
:type decoders: list of functions taking a dict and returning a dict
:param encoders: object serializers to be used by :func:`json.dumps`
:type encoders: list of :class:`json.JSONEncoder` subclasses with the
method :meth:`default` implemented
"""
def __init__(self, objects, decoders=None, encoders=None):
if '' in objects.keys():
raise AttributeError(
'The empty string is not allowed as an object mount')
self.objects = objects
self.decoder = get_combined_json_decoder(decoders or [])
self.encoder = get_combined_json_encoder(encoders or [])
def handle_json(self, request):
"""
Handles an incoming request encoded as a JSON string.
Returns a response as a JSON string for commands, and :class:`None` for
notifications.
:param request: the serialized JSON-RPC request
:type request: string
:rtype: string or :class:`None`
"""
try:
request = json.loads(request, object_hook=self.decoder)
except ValueError:
response = JsonRpcParseError().get_response()
else:
response = self.handle_data(request)
if response is None:
return None
return json.dumps(response, cls=self.encoder)
def handle_data(self, request):
"""
Handles an incoming request in the form of a Python data structure.
Returns a Python data structure for commands, or a :class:`None` for
notifications.
:param request: the unserialized JSON-RPC request
:type request: dict
:rtype: dict, list, or :class:`None`
"""
if isinstance(request, list):
return self._handle_batch(request)
else:
return self._handle_single_request(request)
def _handle_batch(self, requests):
if not requests:
return JsonRpcInvalidRequestError(
data='Batch list cannot be empty').get_response()
responses = []
for request in requests:
response = self._handle_single_request(request)
if response:
responses.append(response)
return responses or None
def _handle_single_request(self, request):
try:
self._validate_request(request)
args, kwargs = self._get_params(request)
except JsonRpcInvalidRequestError as error:
return error.get_response()
try:
method = self._get_method(request['method'])
try:
result = method(*args, **kwargs)
if self._is_notification(request):
return None
result = self._unwrap_result(result)
return {
'jsonrpc': '2.0',
'id': request['id'],
'result': result,
}
except TypeError as error:
raise JsonRpcInvalidParamsError(data={
'type': error.__class__.__name__,
'message': compat.text_type(error),
'traceback': traceback.format_exc(),
})
except Exception as error:
raise JsonRpcApplicationError(data={
'type': error.__class__.__name__,
'message': compat.text_type(error),
'traceback': traceback.format_exc(),
})
except JsonRpcError as error:
if self._is_notification(request):
return None
return error.get_response(request['id'])
def _validate_request(self, request):
if not isinstance(request, dict):
raise JsonRpcInvalidRequestError(
data='Request must be an object')
if 'jsonrpc' not in request:
raise JsonRpcInvalidRequestError(
data='"jsonrpc" member must be included')
if request['jsonrpc'] != '2.0':
raise JsonRpcInvalidRequestError(
data='"jsonrpc" value must be "2.0"')
if 'method' not in request:
raise JsonRpcInvalidRequestError(
data='"method" member must be included')
if not isinstance(request['method'], compat.text_type):
raise JsonRpcInvalidRequestError(
data='"method" must be a string')
def _get_params(self, request):
if 'params' not in request:
return [], {}
params = request['params']
if isinstance(params, list):
return params, {}
elif isinstance(params, dict):
return [], params
else:
raise JsonRpcInvalidRequestError(
data='"params", if given, must be an array or an object')
def _get_method(self, method_path):
if callable(self.objects.get(method_path, None)):
# The mounted object is the callable
return self.objects[method_path]
# The mounted object contains the callable
if '.' not in method_path:
raise JsonRpcMethodNotFoundError(
data='Could not find object mount in method name "%s"' % (
method_path))
mount, method_name = method_path.rsplit('.', 1)
if method_name.startswith('_'):
raise JsonRpcMethodNotFoundError(
data='Private methods are not exported')
try:
obj = self.objects[mount]
except KeyError:
raise JsonRpcMethodNotFoundError(
data='No object found at "%s"' % mount)
try:
return getattr(obj, method_name)
except AttributeError:
raise JsonRpcMethodNotFoundError(
data='Object mounted at "%s" has no member "%s"' % (
mount, method_name))
def _is_notification(self, request):
return 'id' not in request
def _unwrap_result(self, result):
if isinstance(result, pykka.Future):
result = result.get()
return result
class JsonRpcError(Exception):
code = -32000
message = 'Unspecified server error'
def __init__(self, data=None):
self.data = data
def get_response(self, request_id=None):
response = {
'jsonrpc': '2.0',
'id': request_id,
'error': {
'code': self.code,
'message': self.message,
},
}
if self.data:
response['error']['data'] = self.data
return response
class JsonRpcParseError(JsonRpcError):
code = -32700
message = 'Parse error'
class JsonRpcInvalidRequestError(JsonRpcError):
code = -32600
message = 'Invalid Request'
class JsonRpcMethodNotFoundError(JsonRpcError):
code = -32601
message = 'Method not found'
class JsonRpcInvalidParamsError(JsonRpcError):
code = -32602
message = 'Invalid params'
class JsonRpcApplicationError(JsonRpcError):
code = 0
message = 'Application error'
def get_combined_json_decoder(decoders):
def decode(dct):
for decoder in decoders:
dct = decoder(dct)
return dct
return decode
def get_combined_json_encoder(encoders):
class JsonRpcEncoder(json.JSONEncoder):
def default(self, obj):
for encoder in encoders:
try:
return encoder().default(obj)
except TypeError:
pass # Try next encoder
return json.JSONEncoder.default(self, obj)
return JsonRpcEncoder
class JsonRpcInspector(object):
"""
Inspects a group of classes and functions to create a description of what
methods they can expose over JSON-RPC 2.0.
To inspect one or more classes, add them all to the objects mapping. The
key in the mapping is used as the classes' mounting point in the exposed
API::
jri = JsonRpcInspector(objects={
'foo': Foo,
'hello': lambda: 'Hello, world!',
})
Since the inspector is based on inspecting classes and not instances, it
will not include methods added dynamically. The wrapper works with
instances, and it will thus export dynamically added methods as well.
:param objects: mapping between mounts and exposed functions or classes
:type objects: dict
"""
def __init__(self, objects):
if '' in objects.keys():
raise AttributeError(
'The empty string is not allowed as an object mount')
self.objects = objects
def describe(self):
"""
Inspects the object and returns a data structure which describes the
available properties and methods.
"""
methods = {}
for mount, obj in self.objects.items():
if inspect.isroutine(obj):
methods[mount] = self._describe_method(obj)
else:
obj_methods = self._get_methods(obj)
for name, description in obj_methods.items():
if mount:
name = '%s.%s' % (mount, name)
methods[name] = description
return methods
def _get_methods(self, obj):
methods = {}
for name, value in inspect.getmembers(obj):
if name.startswith('_'):
continue
if not inspect.isroutine(value):
continue
if (hasattr(value, 'private_method')):
continue
method = self._describe_method(value)
if method:
methods[name] = method
return methods
def _describe_method(self, method):
return {
'description': inspect.getdoc(method),
'params': self._describe_params(method),
}
def _describe_params(self, method):
argspec = inspect.getargspec(method)
defaults = argspec.defaults and list(argspec.defaults) or []
num_args_without_default = len(argspec.args) - len(defaults)
no_defaults = [None] * num_args_without_default
defaults = no_defaults + defaults
params = []
for arg, default in zip(argspec.args, defaults):
if arg == 'self':
continue
params.append({'name': arg})
if argspec.defaults:
for i, default in enumerate(reversed(argspec.defaults)):
params[len(params) - i - 1]['default'] = default
if argspec.varargs:
params.append({
'name': argspec.varargs,
'varargs': True,
})
if argspec.keywords:
params.append({
'name': argspec.keywords,
'kwargs': True,
})
return params
| |
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
from io import (
BytesIO,
StringIO,
)
import pytest
from pandas.errors import ParserError
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
def test_default_separator(python_parser_only):
# see gh-17333
#
# csv.Sniffer in Python treats "o" as separator.
data = "aob\n1o2\n3o4"
parser = python_parser_only
expected = DataFrame({"a": [1, 3], "b": [2, 4]})
result = parser.read_csv(StringIO(data), sep=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True])
def test_invalid_skipfooter_non_int(python_parser_only, skipfooter):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter must be an integer"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_invalid_skipfooter_negative(python_parser_only):
# see gh-15925 (comment)
data = "a\n1\n2"
parser = python_parser_only
msg = "skipfooter cannot be negative"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=-1)
@pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}])
def test_sniff_delimiter(python_parser_only, kwargs):
data = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_sniff_delimiter_comment(python_parser_only):
data = """# comment line
index|A|B|C
# comment line
foo|1|2|3 # ignore | this
bar|4|5|6
baz|7|8|9
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_sniff_delimiter_encoding(python_parser_only, encoding):
parser = python_parser_only
data = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
if encoding is not None:
from io import TextIOWrapper
data = data.encode(encoding)
data = BytesIO(data)
data = TextIOWrapper(data, encoding=encoding)
else:
data = StringIO(data)
result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=["A", "B", "C"],
index=Index(["foo", "bar", "baz"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_single_line(python_parser_only):
# see gh-6607: sniff separator
parser = python_parser_only
result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}])
def test_skipfooter(python_parser_only, kwargs):
# see gh-6607
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
parser = python_parser_only
result = parser.read_csv(StringIO(data), **kwargs)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
)
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
# see gh-6607
parser = python_parser_only
with open(csv1, "rb") as f:
data = f.read()
data = data.replace(b",", b"::")
expected = parser.read_csv(csv1)
module = pytest.importorskip(compression)
klass = getattr(module, klass)
with tm.ensure_clean() as path:
tmp = klass(path, mode="wb")
tmp.write(data)
tmp.close()
result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index(python_parser_only):
# see gh-6607
data = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
parser = python_parser_only
expected = DataFrame(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],
],
columns=["A", "B", "C", "D", "E"],
index=MultiIndex.from_tuples(
[("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)],
names=["one", "two", "three", "four"],
),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_read_csv_buglet_4x_multi_index2(python_parser_only):
# see gh-6893
data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9"
parser = python_parser_only
expected = DataFrame.from_records(
[(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list("abcABC"),
index=list("abc"),
)
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("add_footer", [True, False])
def test_skipfooter_with_decimal(python_parser_only, add_footer):
# see gh-6971
data = "1#2\n3#4"
parser = python_parser_only
expected = DataFrame({"a": [1.2, 3.4]})
if add_footer:
# The stray footer line should not mess with the
# casting of the first two lines if we skip it.
kwargs = {"skipfooter": 1}
data += "\nFooter"
else:
kwargs = {}
result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"]
)
@pytest.mark.parametrize(
"encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"]
)
def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):
# see gh-3404
expected = DataFrame({"a": [1], "b": [2]})
parser = python_parser_only
data = "1" + sep + "2"
encoded_data = data.encode(encoding)
result = parser.read_csv(
BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
def test_multi_char_sep_quotes(python_parser_only, quoting):
# see gh-13374
kwargs = {"sep": ",,"}
parser = python_parser_only
data = 'a,,b\n1,,a\n2,,"2,,b"'
if quoting == csv.QUOTE_NONE:
msg = "Expected 2 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting, **kwargs)
else:
msg = "ignored when a multi-char delimiter is used"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting, **kwargs)
def test_none_delimiter(python_parser_only, capsys):
# see gh-13374 and gh-17465
parser = python_parser_only
data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"
expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]})
# We expect the third line in the data to be
# skipped because it is malformed, but we do
# not expect any errors to occur.
result = parser.read_csv(StringIO(data), header=0, sep=None, on_bad_lines="warn")
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
@pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz'])
@pytest.mark.parametrize("skipfooter", [0, 1])
def test_skipfooter_bad_row(python_parser_only, data, skipfooter):
# see gh-13879 and gh-15910
parser = python_parser_only
if skipfooter:
msg = "parsing errors in the skipped footer rows"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
else:
msg = "unexpected end of data|expected after"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), skipfooter=skipfooter)
def test_malformed_skipfooter(python_parser_only):
parser = python_parser_only
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
| |
"""Support for interacting with Spotify Connect."""
from __future__ import annotations
from asyncio import run_coroutine_threadsafe
import datetime as dt
from datetime import timedelta
import logging
import requests
from spotipy import Spotify, SpotifyException
from yarl import URL
from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_EPISODE,
MEDIA_CLASS_GENRE,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_PODCAST,
MEDIA_CLASS_TRACK,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TRACK,
REPEAT_MODE_ALL,
REPEAT_MODE_OFF,
REPEAT_MODE_ONE,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_VOLUME_SET,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ID,
CONF_NAME,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.config_entry_oauth2_flow import OAuth2Session
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.dt import utc_from_timestamp
from .const import (
DATA_SPOTIFY_CLIENT,
DATA_SPOTIFY_ME,
DATA_SPOTIFY_SESSION,
DOMAIN,
SPOTIFY_SCOPES,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
SUPPORT_SPOTIFY = (
SUPPORT_BROWSE_MEDIA
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_REPEAT_SET
| SUPPORT_SEEK
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
| SUPPORT_VOLUME_SET
)
REPEAT_MODE_MAPPING_TO_HA = {
"context": REPEAT_MODE_ALL,
"off": REPEAT_MODE_OFF,
"track": REPEAT_MODE_ONE,
}
REPEAT_MODE_MAPPING_TO_SPOTIFY = {
value: key for key, value in REPEAT_MODE_MAPPING_TO_HA.items()
}
BROWSE_LIMIT = 48
MEDIA_TYPE_SHOW = "show"
PLAYABLE_MEDIA_TYPES = [
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_ALBUM,
MEDIA_TYPE_ARTIST,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_SHOW,
MEDIA_TYPE_TRACK,
]
LIBRARY_MAP = {
"current_user_playlists": "Playlists",
"current_user_followed_artists": "Artists",
"current_user_saved_albums": "Albums",
"current_user_saved_tracks": "Tracks",
"current_user_saved_shows": "Podcasts",
"current_user_recently_played": "Recently played",
"current_user_top_artists": "Top Artists",
"current_user_top_tracks": "Top Tracks",
"categories": "Categories",
"featured_playlists": "Featured Playlists",
"new_releases": "New Releases",
}
CONTENT_TYPE_MEDIA_CLASS = {
"current_user_playlists": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_PLAYLIST,
},
"current_user_followed_artists": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_ARTIST,
},
"current_user_saved_albums": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_ALBUM,
},
"current_user_saved_tracks": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_TRACK,
},
"current_user_saved_shows": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_PODCAST,
},
"current_user_recently_played": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_TRACK,
},
"current_user_top_artists": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_ARTIST,
},
"current_user_top_tracks": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_TRACK,
},
"featured_playlists": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_PLAYLIST,
},
"categories": {"parent": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_GENRE},
"category_playlists": {
"parent": MEDIA_CLASS_DIRECTORY,
"children": MEDIA_CLASS_PLAYLIST,
},
"new_releases": {"parent": MEDIA_CLASS_DIRECTORY, "children": MEDIA_CLASS_ALBUM},
MEDIA_TYPE_PLAYLIST: {
"parent": MEDIA_CLASS_PLAYLIST,
"children": MEDIA_CLASS_TRACK,
},
MEDIA_TYPE_ALBUM: {"parent": MEDIA_CLASS_ALBUM, "children": MEDIA_CLASS_TRACK},
MEDIA_TYPE_ARTIST: {"parent": MEDIA_CLASS_ARTIST, "children": MEDIA_CLASS_ALBUM},
MEDIA_TYPE_EPISODE: {"parent": MEDIA_CLASS_EPISODE, "children": None},
MEDIA_TYPE_SHOW: {"parent": MEDIA_CLASS_PODCAST, "children": MEDIA_CLASS_EPISODE},
MEDIA_TYPE_TRACK: {"parent": MEDIA_CLASS_TRACK, "children": None},
}
class MissingMediaInformation(BrowseError):
"""Missing media required information."""
class UnknownMediaType(BrowseError):
"""Unknown media type."""
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Spotify based on a config entry."""
spotify = SpotifyMediaPlayer(
hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_SESSION],
hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_CLIENT],
hass.data[DOMAIN][entry.entry_id][DATA_SPOTIFY_ME],
entry.data[CONF_ID],
entry.data[CONF_NAME],
)
async_add_entities([spotify], True)
def spotify_exception_handler(func):
"""Decorate Spotify calls to handle Spotify exception.
A decorator that wraps the passed in function, catches Spotify errors,
aiohttp exceptions and handles the availability of the media player.
"""
def wrapper(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
self._attr_available = True
return result
except requests.RequestException:
self._attr_available = False
except SpotifyException as exc:
self._attr_available = False
if exc.reason == "NO_ACTIVE_DEVICE":
raise HomeAssistantError("No active playback device found") from None
return wrapper
class SpotifyMediaPlayer(MediaPlayerEntity):
"""Representation of a Spotify controller."""
_attr_icon = "mdi:spotify"
_attr_media_content_type = MEDIA_TYPE_MUSIC
_attr_media_image_remotely_accessible = False
def __init__(
self,
session: OAuth2Session,
spotify: Spotify,
me: dict,
user_id: str,
name: str,
) -> None:
"""Initialize."""
self._id = user_id
self._me = me
self._name = f"Spotify {name}"
self._session = session
self._spotify = spotify
self._scope_ok = set(session.token["scope"].split(" ")).issuperset(
SPOTIFY_SCOPES
)
self._currently_playing: dict | None = {}
self._devices: list[dict] | None = []
self._playlist: dict | None = None
self._attr_name = self._name
self._attr_unique_id = user_id
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this entity."""
model = "Spotify Free"
if self._me is not None:
product = self._me["product"]
model = f"Spotify {product}"
return {
"identifiers": {(DOMAIN, self._id)},
"manufacturer": "Spotify AB",
"model": model,
"name": self._name,
}
@property
def state(self) -> str | None:
"""Return the playback state."""
if not self._currently_playing:
return STATE_IDLE
if self._currently_playing["is_playing"]:
return STATE_PLAYING
return STATE_PAUSED
@property
def volume_level(self) -> float | None:
"""Return the device volume."""
return self._currently_playing.get("device", {}).get("volume_percent", 0) / 100
@property
def media_content_id(self) -> str | None:
"""Return the media URL."""
item = self._currently_playing.get("item") or {}
return item.get("uri")
@property
def media_duration(self) -> int | None:
"""Duration of current playing media in seconds."""
if self._currently_playing.get("item") is None:
return None
return self._currently_playing["item"]["duration_ms"] / 1000
@property
def media_position(self) -> str | None:
"""Position of current playing media in seconds."""
if not self._currently_playing:
return None
return self._currently_playing["progress_ms"] / 1000
@property
def media_position_updated_at(self) -> dt.datetime | None:
"""When was the position of the current playing media valid."""
if not self._currently_playing:
return None
return utc_from_timestamp(self._currently_playing["timestamp"] / 1000)
@property
def media_image_url(self) -> str | None:
"""Return the media image URL."""
if (
self._currently_playing.get("item") is None
or not self._currently_playing["item"]["album"]["images"]
):
return None
return fetch_image_url(self._currently_playing["item"]["album"])
@property
def media_title(self) -> str | None:
"""Return the media title."""
item = self._currently_playing.get("item") or {}
return item.get("name")
@property
def media_artist(self) -> str | None:
"""Return the media artist."""
if self._currently_playing.get("item") is None:
return None
return ", ".join(
artist["name"] for artist in self._currently_playing["item"]["artists"]
)
@property
def media_album_name(self) -> str | None:
"""Return the media album."""
if self._currently_playing.get("item") is None:
return None
return self._currently_playing["item"]["album"]["name"]
@property
def media_track(self) -> int | None:
"""Track number of current playing media, music track only."""
item = self._currently_playing.get("item") or {}
return item.get("track_number")
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
if self._playlist is None:
return None
return self._playlist["name"]
@property
def source(self) -> str | None:
"""Return the current playback device."""
return self._currently_playing.get("device", {}).get("name")
@property
def source_list(self) -> list[str] | None:
"""Return a list of source devices."""
if not self._devices:
return None
return [device["name"] for device in self._devices]
@property
def shuffle(self) -> bool:
"""Shuffling state."""
return bool(self._currently_playing.get("shuffle_state"))
@property
def repeat(self) -> str | None:
"""Return current repeat mode."""
repeat_state = self._currently_playing.get("repeat_state")
return REPEAT_MODE_MAPPING_TO_HA.get(repeat_state)
@property
def supported_features(self) -> int:
"""Return the media player features that are supported."""
if self._me["product"] != "premium":
return 0
return SUPPORT_SPOTIFY
@spotify_exception_handler
def set_volume_level(self, volume: int) -> None:
"""Set the volume level."""
self._spotify.volume(int(volume * 100))
@spotify_exception_handler
def media_play(self) -> None:
"""Start or resume playback."""
self._spotify.start_playback()
@spotify_exception_handler
def media_pause(self) -> None:
"""Pause playback."""
self._spotify.pause_playback()
@spotify_exception_handler
def media_previous_track(self) -> None:
"""Skip to previous track."""
self._spotify.previous_track()
@spotify_exception_handler
def media_next_track(self) -> None:
"""Skip to next track."""
self._spotify.next_track()
@spotify_exception_handler
def media_seek(self, position):
"""Send seek command."""
self._spotify.seek_track(int(position * 1000))
@spotify_exception_handler
def play_media(self, media_type: str, media_id: str, **kwargs) -> None:
"""Play media."""
kwargs = {}
# Spotify can't handle URI's with query strings or anchors
# Yet, they do generate those types of URI in their official clients.
media_id = str(URL(media_id).with_query(None).with_fragment(None))
if media_type in (MEDIA_TYPE_TRACK, MEDIA_TYPE_EPISODE, MEDIA_TYPE_MUSIC):
kwargs["uris"] = [media_id]
elif media_type in PLAYABLE_MEDIA_TYPES:
kwargs["context_uri"] = media_id
else:
_LOGGER.error("Media type %s is not supported", media_type)
return
if not self._currently_playing.get("device") and self._devices:
kwargs["device_id"] = self._devices[0].get("id")
self._spotify.start_playback(**kwargs)
@spotify_exception_handler
def select_source(self, source: str) -> None:
"""Select playback device."""
for device in self._devices:
if device["name"] == source:
self._spotify.transfer_playback(
device["id"], self.state == STATE_PLAYING
)
return
@spotify_exception_handler
def set_shuffle(self, shuffle: bool) -> None:
"""Enable/Disable shuffle mode."""
self._spotify.shuffle(shuffle)
@spotify_exception_handler
def set_repeat(self, repeat: str) -> None:
"""Set repeat mode."""
if repeat not in REPEAT_MODE_MAPPING_TO_SPOTIFY:
raise ValueError(f"Unsupported repeat mode: {repeat}")
self._spotify.repeat(REPEAT_MODE_MAPPING_TO_SPOTIFY[repeat])
@spotify_exception_handler
def update(self) -> None:
"""Update state and attributes."""
if not self.enabled:
return
if not self._session.valid_token or self._spotify is None:
run_coroutine_threadsafe(
self._session.async_ensure_token_valid(), self.hass.loop
).result()
self._spotify = Spotify(auth=self._session.token["access_token"])
current = self._spotify.current_playback()
self._currently_playing = current or {}
self._playlist = None
context = self._currently_playing.get("context")
if context is not None and context["type"] == MEDIA_TYPE_PLAYLIST:
self._playlist = self._spotify.playlist(current["context"]["uri"])
devices = self._spotify.devices() or {}
self._devices = devices.get("devices", [])
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if not self._scope_ok:
_LOGGER.debug(
"Spotify scopes are not set correctly, this can impact features such as media browsing"
)
raise NotImplementedError
if media_content_type in [None, "library"]:
return await self.hass.async_add_executor_job(library_payload)
payload = {
"media_content_type": media_content_type,
"media_content_id": media_content_id,
}
response = await self.hass.async_add_executor_job(
build_item_response, self._spotify, self._me, payload
)
if response is None:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
return response
def build_item_response(spotify, user, payload): # noqa: C901
"""Create response payload for the provided media query."""
media_content_type = payload["media_content_type"]
media_content_id = payload["media_content_id"]
title = None
image = None
if media_content_type == "current_user_playlists":
media = spotify.current_user_playlists(limit=BROWSE_LIMIT)
items = media.get("items", [])
elif media_content_type == "current_user_followed_artists":
media = spotify.current_user_followed_artists(limit=BROWSE_LIMIT)
items = media.get("artists", {}).get("items", [])
elif media_content_type == "current_user_saved_albums":
media = spotify.current_user_saved_albums(limit=BROWSE_LIMIT)
items = [item["album"] for item in media.get("items", [])]
elif media_content_type == "current_user_saved_tracks":
media = spotify.current_user_saved_tracks(limit=BROWSE_LIMIT)
items = [item["track"] for item in media.get("items", [])]
elif media_content_type == "current_user_saved_shows":
media = spotify.current_user_saved_shows(limit=BROWSE_LIMIT)
items = [item["show"] for item in media.get("items", [])]
elif media_content_type == "current_user_recently_played":
media = spotify.current_user_recently_played(limit=BROWSE_LIMIT)
items = [item["track"] for item in media.get("items", [])]
elif media_content_type == "current_user_top_artists":
media = spotify.current_user_top_artists(limit=BROWSE_LIMIT)
items = media.get("items", [])
elif media_content_type == "current_user_top_tracks":
media = spotify.current_user_top_tracks(limit=BROWSE_LIMIT)
items = media.get("items", [])
elif media_content_type == "featured_playlists":
media = spotify.featured_playlists(country=user["country"], limit=BROWSE_LIMIT)
items = media.get("playlists", {}).get("items", [])
elif media_content_type == "categories":
media = spotify.categories(country=user["country"], limit=BROWSE_LIMIT)
items = media.get("categories", {}).get("items", [])
elif media_content_type == "category_playlists":
media = spotify.category_playlists(
category_id=media_content_id,
country=user["country"],
limit=BROWSE_LIMIT,
)
category = spotify.category(media_content_id, country=user["country"])
title = category.get("name")
image = fetch_image_url(category, key="icons")
items = media.get("playlists", {}).get("items", [])
elif media_content_type == "new_releases":
media = spotify.new_releases(country=user["country"], limit=BROWSE_LIMIT)
items = media.get("albums", {}).get("items", [])
elif media_content_type == MEDIA_TYPE_PLAYLIST:
media = spotify.playlist(media_content_id)
items = [item["track"] for item in media.get("tracks", {}).get("items", [])]
elif media_content_type == MEDIA_TYPE_ALBUM:
media = spotify.album(media_content_id)
items = media.get("tracks", {}).get("items", [])
elif media_content_type == MEDIA_TYPE_ARTIST:
media = spotify.artist_albums(media_content_id, limit=BROWSE_LIMIT)
artist = spotify.artist(media_content_id)
title = artist.get("name")
image = fetch_image_url(artist)
items = media.get("items", [])
elif media_content_type == MEDIA_TYPE_SHOW:
media = spotify.show_episodes(media_content_id, limit=BROWSE_LIMIT)
show = spotify.show(media_content_id)
title = show.get("name")
image = fetch_image_url(show)
items = media.get("items", [])
else:
media = None
items = []
if media is None:
return None
try:
media_class = CONTENT_TYPE_MEDIA_CLASS[media_content_type]
except KeyError:
_LOGGER.debug("Unknown media type received: %s", media_content_type)
return None
if media_content_type == "categories":
media_item = BrowseMedia(
title=LIBRARY_MAP.get(media_content_id),
media_class=media_class["parent"],
children_media_class=media_class["children"],
media_content_id=media_content_id,
media_content_type=media_content_type,
can_play=False,
can_expand=True,
children=[],
)
for item in items:
try:
item_id = item["id"]
except KeyError:
_LOGGER.debug("Missing ID for media item: %s", item)
continue
media_item.children.append(
BrowseMedia(
title=item.get("name"),
media_class=MEDIA_CLASS_PLAYLIST,
children_media_class=MEDIA_CLASS_TRACK,
media_content_id=item_id,
media_content_type="category_playlists",
thumbnail=fetch_image_url(item, key="icons"),
can_play=False,
can_expand=True,
)
)
return media_item
if title is None:
if "name" in media:
title = media.get("name")
else:
title = LIBRARY_MAP.get(payload["media_content_id"])
params = {
"title": title,
"media_class": media_class["parent"],
"children_media_class": media_class["children"],
"media_content_id": media_content_id,
"media_content_type": media_content_type,
"can_play": media_content_type in PLAYABLE_MEDIA_TYPES,
"children": [],
"can_expand": True,
}
for item in items:
try:
params["children"].append(item_payload(item))
except (MissingMediaInformation, UnknownMediaType):
continue
if "images" in media:
params["thumbnail"] = fetch_image_url(media)
elif image:
params["thumbnail"] = image
return BrowseMedia(**params)
def item_payload(item):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
try:
media_type = item["type"]
media_id = item["uri"]
except KeyError as err:
_LOGGER.debug("Missing type or URI for media item: %s", item)
raise MissingMediaInformation from err
try:
media_class = CONTENT_TYPE_MEDIA_CLASS[media_type]
except KeyError as err:
_LOGGER.debug("Unknown media type received: %s", media_type)
raise UnknownMediaType from err
can_expand = media_type not in [
MEDIA_TYPE_TRACK,
MEDIA_TYPE_EPISODE,
]
payload = {
"title": item.get("name"),
"media_class": media_class["parent"],
"children_media_class": media_class["children"],
"media_content_id": media_id,
"media_content_type": media_type,
"can_play": media_type in PLAYABLE_MEDIA_TYPES,
"can_expand": can_expand,
}
if "images" in item:
payload["thumbnail"] = fetch_image_url(item)
elif MEDIA_TYPE_ALBUM in item:
payload["thumbnail"] = fetch_image_url(item[MEDIA_TYPE_ALBUM])
return BrowseMedia(**payload)
def library_payload():
"""
Create response payload to describe contents of a specific library.
Used by async_browse_media.
"""
library_info = {
"title": "Media Library",
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": "library",
"media_content_type": "library",
"can_play": False,
"can_expand": True,
"children": [],
}
for item in [{"name": n, "type": t} for t, n in LIBRARY_MAP.items()]:
library_info["children"].append(
item_payload(
{"name": item["name"], "type": item["type"], "uri": item["type"]}
)
)
response = BrowseMedia(**library_info)
response.children_media_class = MEDIA_CLASS_DIRECTORY
return response
def fetch_image_url(item, key="images"):
"""Fetch image url."""
try:
return item.get(key, [])[0].get("url")
except IndexError:
return None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/AuditEvent) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class AuditEvent(domainresource.DomainResource):
""" Event record kept for security purposes.
A record of an event made for purposes of maintaining a security log.
Typical uses include detection of intrusion attempts and monitoring for
inappropriate usage.
"""
resource_name = "AuditEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.event = None
""" What was done.
Type `AuditEventEvent` (represented as `dict` in JSON). """
self.object = None
""" Specific instances of data or objects that have been accessed.
List of `AuditEventObject` items (represented as `dict` in JSON). """
self.participant = None
""" A person, a hardware device or software process.
List of `AuditEventParticipant` items (represented as `dict` in JSON). """
self.source = None
""" Application systems and processes.
Type `AuditEventSource` (represented as `dict` in JSON). """
super(AuditEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEvent, self).elementProperties()
js.extend([
("event", "event", AuditEventEvent, False, None, True),
("object", "object", AuditEventObject, True, None, False),
("participant", "participant", AuditEventParticipant, True, None, True),
("source", "source", AuditEventSource, False, None, True),
])
return js
from . import backboneelement
class AuditEventEvent(backboneelement.BackboneElement):
""" What was done.
Identifies the name, action type, time, and disposition of the audited
event.
"""
resource_name = "AuditEventEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" Type of action performed during the event.
Type `str`. """
self.dateTime = None
""" Time when the event occurred on source.
Type `FHIRDate` (represented as `str` in JSON). """
self.outcome = None
""" Whether the event succeeded or failed.
Type `str`. """
self.outcomeDesc = None
""" Description of the event outcome.
Type `str`. """
self.purposeOfEvent = None
""" The purposeOfUse of the event.
List of `Coding` items (represented as `dict` in JSON). """
self.subtype = None
""" More specific type/id for the event.
List of `Coding` items (represented as `dict` in JSON). """
self.type = None
""" Type/identifier of event.
Type `Coding` (represented as `dict` in JSON). """
super(AuditEventEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventEvent, self).elementProperties()
js.extend([
("action", "action", str, False, None, False),
("dateTime", "dateTime", fhirdate.FHIRDate, False, None, True),
("outcome", "outcome", str, False, None, False),
("outcomeDesc", "outcomeDesc", str, False, None, False),
("purposeOfEvent", "purposeOfEvent", coding.Coding, True, None, False),
("subtype", "subtype", coding.Coding, True, None, False),
("type", "type", coding.Coding, False, None, True),
])
return js
class AuditEventObject(backboneelement.BackboneElement):
""" Specific instances of data or objects that have been accessed.
"""
resource_name = "AuditEventObject"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Descriptive text.
Type `str`. """
self.detail = None
""" Additional Information about the Object.
List of `AuditEventObjectDetail` items (represented as `dict` in JSON). """
self.identifier = None
""" Specific instance of object (e.g. versioned).
Type `Identifier` (represented as `dict` in JSON). """
self.lifecycle = None
""" Life-cycle stage for the object.
Type `Coding` (represented as `dict` in JSON). """
self.name = None
""" Instance-specific descriptor for Object.
Type `str`. """
self.query = None
""" Actual query for object.
Type `str`. """
self.reference = None
""" Specific instance of resource (e.g. versioned).
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.role = None
""" What role the Object played.
Type `Coding` (represented as `dict` in JSON). """
self.securityLabel = None
""" Security labels applied to the object.
List of `Coding` items (represented as `dict` in JSON). """
self.type = None
""" Type of object involved.
Type `Coding` (represented as `dict` in JSON). """
super(AuditEventObject, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventObject, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("detail", "detail", AuditEventObjectDetail, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("lifecycle", "lifecycle", coding.Coding, False, None, False),
("name", "name", str, False, None, False),
("query", "query", str, False, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, False),
("role", "role", coding.Coding, False, None, False),
("securityLabel", "securityLabel", coding.Coding, True, None, False),
("type", "type", coding.Coding, False, None, False),
])
return js
class AuditEventObjectDetail(backboneelement.BackboneElement):
""" Additional Information about the Object.
"""
resource_name = "AuditEventObjectDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" Name of the property.
Type `str`. """
self.value = None
""" Property value.
Type `str`. """
super(AuditEventObjectDetail, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventObjectDetail, self).elementProperties()
js.extend([
("type", "type", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class AuditEventParticipant(backboneelement.BackboneElement):
""" A person, a hardware device or software process.
"""
resource_name = "AuditEventParticipant"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.altId = None
""" Alternative User id e.g. authentication.
Type `str`. """
self.location = None
""" Where.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.media = None
""" Type of media.
Type `Coding` (represented as `dict` in JSON). """
self.name = None
""" Human-meaningful name for the user.
Type `str`. """
self.network = None
""" Logical network location for application activity.
Type `AuditEventParticipantNetwork` (represented as `dict` in JSON). """
self.policy = None
""" Policy that authorized event.
List of `str` items. """
self.purposeOfUse = None
""" Reason given for this user.
List of `Coding` items (represented as `dict` in JSON). """
self.reference = None
""" Direct reference to resource.
Type `FHIRReference` referencing `Practitioner, Organization, Device, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.requestor = None
""" Whether user is initiator.
Type `bool`. """
self.role = None
""" User roles (e.g. local RBAC codes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.userId = None
""" Unique identifier for the user.
Type `Identifier` (represented as `dict` in JSON). """
super(AuditEventParticipant, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventParticipant, self).elementProperties()
js.extend([
("altId", "altId", str, False, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("media", "media", coding.Coding, False, None, False),
("name", "name", str, False, None, False),
("network", "network", AuditEventParticipantNetwork, False, None, False),
("policy", "policy", str, True, None, False),
("purposeOfUse", "purposeOfUse", coding.Coding, True, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, False),
("requestor", "requestor", bool, False, None, True),
("role", "role", codeableconcept.CodeableConcept, True, None, False),
("userId", "userId", identifier.Identifier, False, None, False),
])
return js
class AuditEventParticipantNetwork(backboneelement.BackboneElement):
""" Logical network location for application activity.
Logical network location for application activity, if the activity has a
network location.
"""
resource_name = "AuditEventParticipantNetwork"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" Identifier for the network access point of the user device.
Type `str`. """
self.type = None
""" The type of network access point.
Type `str`. """
super(AuditEventParticipantNetwork, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventParticipantNetwork, self).elementProperties()
js.extend([
("address", "address", str, False, None, False),
("type", "type", str, False, None, False),
])
return js
class AuditEventSource(backboneelement.BackboneElement):
""" Application systems and processes.
"""
resource_name = "AuditEventSource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identifier = None
""" The identity of source detecting the event.
Type `Identifier` (represented as `dict` in JSON). """
self.site = None
""" Logical source location within the enterprise.
Type `str`. """
self.type = None
""" The type of source where event originated.
List of `Coding` items (represented as `dict` in JSON). """
super(AuditEventSource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AuditEventSource, self).elementProperties()
js.extend([
("identifier", "identifier", identifier.Identifier, False, None, True),
("site", "site", str, False, None, False),
("type", "type", coding.Coding, True, None, False),
])
return js
from . import codeableconcept
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
| |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from os_win import exceptions as os_win_exc
from oslo_utils import units
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import migrationops
class MigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V MigrationOps class."""
_FAKE_DISK = 'fake_disk'
_FAKE_TIMEOUT = 10
_FAKE_RETRY_INTERVAL = 5
def setUp(self):
super(MigrationOpsTestCase, self).setUp()
self.context = 'fake-context'
self._migrationops = migrationops.MigrationOps()
self._migrationops._hostutils = mock.MagicMock()
self._migrationops._vmops = mock.MagicMock()
self._migrationops._vmutils = mock.MagicMock()
self._migrationops._pathutils = mock.Mock()
self._migrationops._vhdutils = mock.MagicMock()
self._migrationops._pathutils = mock.MagicMock()
self._migrationops._volumeops = mock.MagicMock()
self._migrationops._imagecache = mock.MagicMock()
self._migrationops._block_dev_man = mock.MagicMock()
def _check_migrate_disk_files(self, shared_storage=False):
instance_path = 'fake/instance/path'
dest_instance_path = 'remote/instance/path'
self._migrationops._pathutils.get_instance_dir.side_effect = (
instance_path, dest_instance_path)
get_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
check_shared_storage = (
self._migrationops._pathutils.check_dirs_shared_storage)
check_shared_storage.return_value = shared_storage
self._migrationops._pathutils.exists.return_value = True
fake_disk_files = [os.path.join(instance_path, disk_name)
for disk_name in
['root.vhd', 'configdrive.vhd', 'configdrive.iso',
'eph0.vhd', 'eph1.vhdx']]
expected_get_dir = [mock.call(mock.sentinel.instance_name),
mock.call(mock.sentinel.instance_name,
mock.sentinel.dest_path)]
expected_move_calls = [mock.call(instance_path,
get_revert_dir.return_value)]
self._migrationops._migrate_disk_files(
instance_name=mock.sentinel.instance_name,
disk_files=fake_disk_files,
dest=mock.sentinel.dest_path)
self._migrationops._pathutils.exists.assert_called_once_with(
dest_instance_path)
check_shared_storage.assert_called_once_with(
instance_path, dest_instance_path)
get_revert_dir.assert_called_with(mock.sentinel.instance_name,
remove_dir=True, create_dir=True)
if shared_storage:
fake_dest_path = '%s_tmp' % instance_path
expected_move_calls.append(mock.call(fake_dest_path,
instance_path))
self._migrationops._pathutils.rmtree.assert_called_once_with(
fake_dest_path)
else:
fake_dest_path = dest_instance_path
self._migrationops._pathutils.makedirs.assert_called_once_with(
fake_dest_path)
check_remove_dir = self._migrationops._pathutils.check_remove_dir
check_remove_dir.assert_called_once_with(fake_dest_path)
self._migrationops._pathutils.get_instance_dir.assert_has_calls(
expected_get_dir)
self._migrationops._pathutils.copy.assert_has_calls(
mock.call(fake_disk_file, fake_dest_path)
for fake_disk_file in fake_disk_files)
self.assertEqual(len(fake_disk_files),
self._migrationops._pathutils.copy.call_count)
self._migrationops._pathutils.move_folder_files.assert_has_calls(
expected_move_calls)
def test_migrate_disk_files(self):
self._check_migrate_disk_files()
def test_migrate_disk_files_same_host(self):
self._check_migrate_disk_files(shared_storage=True)
@mock.patch.object(migrationops.MigrationOps,
'_cleanup_failed_disk_migration')
def test_migrate_disk_files_exception(self, mock_cleanup):
instance_path = 'fake/instance/path'
fake_dest_path = '%s_tmp' % instance_path
self._migrationops._pathutils.get_instance_dir.return_value = (
instance_path)
get_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
self._migrationops._hostutils.get_local_ips.return_value = [
mock.sentinel.dest_path]
self._migrationops._pathutils.copy.side_effect = IOError(
"Expected exception.")
self.assertRaises(IOError, self._migrationops._migrate_disk_files,
instance_name=mock.sentinel.instance_name,
disk_files=[self._FAKE_DISK],
dest=mock.sentinel.dest_path)
mock_cleanup.assert_called_once_with(instance_path,
get_revert_dir.return_value,
fake_dest_path)
def test_cleanup_failed_disk_migration(self):
self._migrationops._pathutils.exists.return_value = True
self._migrationops._cleanup_failed_disk_migration(
instance_path=mock.sentinel.instance_path,
revert_path=mock.sentinel.revert_path,
dest_path=mock.sentinel.dest_path)
expected = [mock.call(mock.sentinel.dest_path),
mock.call(mock.sentinel.revert_path)]
self._migrationops._pathutils.exists.assert_has_calls(expected)
move_folder_files = self._migrationops._pathutils.move_folder_files
move_folder_files.assert_called_once_with(
mock.sentinel.revert_path, mock.sentinel.instance_path)
self._migrationops._pathutils.rmtree.assert_has_calls([
mock.call(mock.sentinel.dest_path),
mock.call(mock.sentinel.revert_path)])
def test_check_target_flavor(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.root_gb = 1
mock_flavor = mock.MagicMock(root_gb=0)
self.assertRaises(exception.InstanceFaultRollback,
self._migrationops._check_target_flavor,
mock_instance, mock_flavor)
def test_check_and_attach_config_drive(self):
mock_instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
mock_instance.config_drive = 'True'
self._migrationops._check_and_attach_config_drive(
mock_instance, mock.sentinel.vm_gen)
self._migrationops._vmops.attach_config_drive.assert_called_once_with(
mock_instance,
self._migrationops._pathutils.lookup_configdrive_path.return_value,
mock.sentinel.vm_gen)
def test_check_and_attach_config_drive_unknown_path(self):
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
instance.config_drive = 'True'
self._migrationops._pathutils.lookup_configdrive_path.return_value = (
None)
self.assertRaises(exception.ConfigDriveNotFound,
self._migrationops._check_and_attach_config_drive,
instance,
mock.sentinel.FAKE_VM_GEN)
@mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
@mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
def test_migrate_disk_and_power_off(self, mock_check_flavor,
mock_migrate_disk_files):
instance = fake_instance.fake_instance_obj(self.context)
flavor = mock.MagicMock()
network_info = mock.MagicMock()
disk_files = [mock.MagicMock()]
volume_drives = [mock.MagicMock()]
mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
mock_get_vm_st_path.return_value = (disk_files, volume_drives)
self._migrationops.migrate_disk_and_power_off(
self.context, instance, mock.sentinel.FAKE_DEST, flavor,
network_info, mock.sentinel.bdi,
self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
mock_check_flavor.assert_called_once_with(instance, flavor)
self._migrationops._vmops.power_off.assert_called_once_with(
instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
mock_get_vm_st_path.assert_called_once_with(instance.name)
mock_migrate_disk_files.assert_called_once_with(
instance.name, disk_files, mock.sentinel.FAKE_DEST)
self._migrationops._vmops.destroy.assert_called_once_with(
instance, network_info, mock.sentinel.bdi, destroy_disks=False)
def test_confirm_migration(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._migrationops.confirm_migration(
context=self.context,
migration=mock.sentinel.migration, instance=mock_instance,
network_info=mock.sentinel.network_info)
get_instance_migr_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
get_instance_migr_revert_dir.assert_called_with(mock_instance.name,
remove_dir=True)
def test_revert_migration_files(self):
instance_path = (
self._migrationops._pathutils.get_instance_dir.return_value)
get_revert_dir = (
self._migrationops._pathutils.get_instance_migr_revert_dir)
self._migrationops._revert_migration_files(
instance_name=mock.sentinel.instance_name)
self._migrationops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.instance_name, create_dir=False, remove_dir=True)
get_revert_dir.assert_called_with(mock.sentinel.instance_name)
self._migrationops._pathutils.rename.assert_called_once_with(
get_revert_dir.return_value, instance_path)
@mock.patch.object(migrationops.MigrationOps,
'_check_and_attach_config_drive')
@mock.patch.object(migrationops.MigrationOps, '_revert_migration_files')
@mock.patch.object(migrationops.MigrationOps, '_check_ephemeral_disks')
@mock.patch.object(objects.ImageMeta, "from_instance")
def _check_finish_revert_migration(self, mock_image,
mock_check_eph_disks,
mock_revert_migration_files,
mock_check_attach_config_drive,
disk_type=constants.DISK):
mock_image.return_value = objects.ImageMeta.from_dict({})
mock_instance = fake_instance.fake_instance_obj(self.context)
root_device = {'type': disk_type}
block_device_info = {'root_disk': root_device, 'ephemerals': []}
self._migrationops.finish_revert_migration(
context=self.context, instance=mock_instance,
network_info=mock.sentinel.network_info,
block_device_info=block_device_info,
power_on=True)
mock_revert_migration_files.assert_called_once_with(
mock_instance.name)
if root_device['type'] == constants.DISK:
lookup_root_vhd = (
self._migrationops._pathutils.lookup_root_vhd_path)
lookup_root_vhd.assert_called_once_with(mock_instance.name)
self.assertEqual(lookup_root_vhd.return_value,
root_device['path'])
get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation
get_image_vm_gen.assert_called_once_with(
mock_instance.uuid, test.MatchType(objects.ImageMeta))
self._migrationops._vmops.create_instance.assert_called_once_with(
mock_instance, mock.sentinel.network_info, root_device,
block_device_info, get_image_vm_gen.return_value,
mock_image.return_value)
mock_check_attach_config_drive.assert_called_once_with(
mock_instance, get_image_vm_gen.return_value)
self._migrationops._vmops.set_boot_order.assert_called_once_with(
mock_instance.name, get_image_vm_gen.return_value,
block_device_info)
self._migrationops._vmops.power_on.assert_called_once_with(
mock_instance, network_info=mock.sentinel.network_info)
def test_finish_revert_migration_boot_from_volume(self):
self._check_finish_revert_migration(disk_type=constants.VOLUME)
def test_finish_revert_migration_boot_from_disk(self):
self._check_finish_revert_migration(disk_type=constants.DISK)
@mock.patch.object(objects.ImageMeta, "from_instance")
def test_finish_revert_migration_no_root_vhd(self, mock_image):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._migrationops._pathutils.lookup_root_vhd_path.return_value = None
bdi = {'root_disk': {'type': constants.DISK},
'ephemerals': []}
self.assertRaises(
exception.DiskNotFound,
self._migrationops.finish_revert_migration, self.context,
mock_instance, mock.sentinel.network_info, bdi, True)
def test_merge_base_vhd(self):
fake_diff_vhd_path = 'fake/diff/path'
fake_base_vhd_path = 'fake/base/path'
base_vhd_copy_path = os.path.join(
os.path.dirname(fake_diff_vhd_path),
os.path.basename(fake_base_vhd_path))
self._migrationops._merge_base_vhd(diff_vhd_path=fake_diff_vhd_path,
base_vhd_path=fake_base_vhd_path)
self._migrationops._pathutils.copyfile.assert_called_once_with(
fake_base_vhd_path, base_vhd_copy_path)
recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd
recon_parent_vhd.assert_called_once_with(fake_diff_vhd_path,
base_vhd_copy_path)
self._migrationops._vhdutils.merge_vhd.assert_called_once_with(
fake_diff_vhd_path)
self._migrationops._pathutils.rename.assert_called_once_with(
base_vhd_copy_path, fake_diff_vhd_path)
def test_merge_base_vhd_exception(self):
fake_diff_vhd_path = 'fake/diff/path'
fake_base_vhd_path = 'fake/base/path'
base_vhd_copy_path = os.path.join(
os.path.dirname(fake_diff_vhd_path),
os.path.basename(fake_base_vhd_path))
self._migrationops._vhdutils.reconnect_parent_vhd.side_effect = (
os_win_exc.HyperVException)
self._migrationops._pathutils.exists.return_value = True
self.assertRaises(os_win_exc.HyperVException,
self._migrationops._merge_base_vhd,
fake_diff_vhd_path, fake_base_vhd_path)
self._migrationops._pathutils.exists.assert_called_once_with(
base_vhd_copy_path)
self._migrationops._pathutils.remove.assert_called_once_with(
base_vhd_copy_path)
@mock.patch.object(migrationops.MigrationOps, '_resize_vhd')
def test_check_resize_vhd(self, mock_resize_vhd):
self._migrationops._check_resize_vhd(
vhd_path=mock.sentinel.vhd_path, vhd_info={'VirtualSize': 1},
new_size=2)
mock_resize_vhd.assert_called_once_with(mock.sentinel.vhd_path, 2)
def test_check_resize_vhd_exception(self):
self.assertRaises(exception.CannotResizeDisk,
self._migrationops._check_resize_vhd,
mock.sentinel.vhd_path,
{'VirtualSize': 1}, 0)
@mock.patch.object(migrationops.MigrationOps, '_merge_base_vhd')
def test_resize_vhd(self, mock_merge_base_vhd):
fake_vhd_path = 'fake/path.vhd'
new_vhd_size = 2
self._migrationops._resize_vhd(vhd_path=fake_vhd_path,
new_size=new_vhd_size)
get_vhd_parent_path = self._migrationops._vhdutils.get_vhd_parent_path
get_vhd_parent_path.assert_called_once_with(fake_vhd_path)
mock_merge_base_vhd.assert_called_once_with(
fake_vhd_path,
self._migrationops._vhdutils.get_vhd_parent_path.return_value)
self._migrationops._vhdutils.resize_vhd.assert_called_once_with(
fake_vhd_path, new_vhd_size)
def test_check_base_disk(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_src_vhd_path = 'fake/src/path'
fake_base_vhd = 'fake/vhd'
get_cached_image = self._migrationops._imagecache.get_cached_image
get_cached_image.return_value = fake_base_vhd
self._migrationops._check_base_disk(
context=self.context, instance=mock_instance,
diff_vhd_path=mock.sentinel.diff_vhd_path,
src_base_disk_path=fake_src_vhd_path)
get_cached_image.assert_called_once_with(self.context, mock_instance)
recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd
recon_parent_vhd.assert_called_once_with(
mock.sentinel.diff_vhd_path, fake_base_vhd)
@mock.patch.object(migrationops.MigrationOps,
'_check_and_attach_config_drive')
@mock.patch.object(migrationops.MigrationOps, '_check_base_disk')
@mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd')
@mock.patch.object(migrationops.MigrationOps, '_check_ephemeral_disks')
def _check_finish_migration(self, mock_check_eph_disks,
mock_check_resize_vhd,
mock_check_base_disk,
mock_check_attach_config_drive,
disk_type=constants.DISK):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.ephemeral_gb = 1
root_device = {'type': disk_type}
block_device_info = {'root_disk': root_device, 'ephemerals': []}
lookup_root_vhd = self._migrationops._pathutils.lookup_root_vhd_path
get_vhd_info = self._migrationops._vhdutils.get_vhd_info
mock_vhd_info = get_vhd_info.return_value
expected_check_resize = []
expected_get_info = []
self._migrationops.finish_migration(
context=self.context, migration=mock.sentinel.migration,
instance=mock_instance, disk_info=mock.sentinel.disk_info,
network_info=mock.sentinel.network_info,
image_meta=mock.sentinel.image_meta, resize_instance=True,
block_device_info=block_device_info)
if root_device['type'] == constants.DISK:
root_device_path = lookup_root_vhd.return_value
lookup_root_vhd.assert_called_with(mock_instance.name)
expected_get_info = [mock.call(root_device_path)]
mock_vhd_info.get.assert_called_once_with("ParentPath")
mock_check_base_disk.assert_called_once_with(
self.context, mock_instance, root_device_path,
mock_vhd_info.get.return_value)
expected_check_resize.append(
mock.call(root_device_path, mock_vhd_info,
mock_instance.flavor.root_gb * units.Gi))
ephemerals = block_device_info['ephemerals']
mock_check_eph_disks.assert_called_once_with(
mock_instance, ephemerals, True)
mock_check_resize_vhd.assert_has_calls(expected_check_resize)
self._migrationops._vhdutils.get_vhd_info.assert_has_calls(
expected_get_info)
get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation
get_image_vm_gen.assert_called_once_with(mock_instance.uuid,
mock.sentinel.image_meta)
self._migrationops._vmops.create_instance.assert_called_once_with(
mock_instance, mock.sentinel.network_info, root_device,
block_device_info, get_image_vm_gen.return_value,
mock.sentinel.image_meta)
mock_check_attach_config_drive.assert_called_once_with(
mock_instance, get_image_vm_gen.return_value)
self._migrationops._vmops.set_boot_order.assert_called_once_with(
mock_instance.name, get_image_vm_gen.return_value,
block_device_info)
self._migrationops._vmops.power_on.assert_called_once_with(
mock_instance, network_info=mock.sentinel.network_info)
def test_finish_migration(self):
self._check_finish_migration(disk_type=constants.DISK)
def test_finish_migration_boot_from_volume(self):
self._check_finish_migration(disk_type=constants.VOLUME)
def test_finish_migration_no_root(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._migrationops._pathutils.lookup_root_vhd_path.return_value = None
bdi = {'root_disk': {'type': constants.DISK},
'ephemerals': []}
self.assertRaises(exception.DiskNotFound,
self._migrationops.finish_migration,
self.context, mock.sentinel.migration,
mock_instance, mock.sentinel.disk_info,
mock.sentinel.network_info,
mock.sentinel.image_meta, True, bdi, True)
@mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd')
@mock.patch.object(migrationops.LOG, 'warning')
def test_check_ephemeral_disks_multiple_eph_warn(self, mock_warn,
mock_check_resize_vhd):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = 3
mock_ephemerals = [{'size': 1}, {'size': 1}]
self._migrationops._check_ephemeral_disks(mock_instance,
mock_ephemerals,
True)
mock_warn.assert_called_once_with(
"Cannot resize multiple ephemeral disks for instance.",
instance=mock_instance)
def test_check_ephemeral_disks_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_ephemerals = [dict()]
lookup_eph_path = (
self._migrationops._pathutils.lookup_ephemeral_vhd_path)
lookup_eph_path.return_value = None
self.assertRaises(exception.DiskNotFound,
self._migrationops._check_ephemeral_disks,
mock_instance, mock_ephemerals)
@mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd')
def _test_check_ephemeral_disks(self, mock_check_resize_vhd,
existing_eph_path=None, new_eph_size=42):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = new_eph_size
eph = {}
mock_ephemerals = [eph]
mock_pathutils = self._migrationops._pathutils
lookup_eph_path = mock_pathutils.lookup_ephemeral_vhd_path
lookup_eph_path.return_value = existing_eph_path
mock_get_eph_vhd_path = mock_pathutils.get_ephemeral_vhd_path
mock_get_eph_vhd_path.return_value = mock.sentinel.get_path
mock_vhdutils = self._migrationops._vhdutils
mock_get_vhd_format = mock_vhdutils.get_best_supported_vhd_format
mock_get_vhd_format.return_value = mock.sentinel.vhd_format
self._migrationops._check_ephemeral_disks(mock_instance,
mock_ephemerals,
True)
self.assertEqual(mock_instance.ephemeral_gb, eph['size'])
if not existing_eph_path:
mock_vmops = self._migrationops._vmops
mock_vmops.create_ephemeral_disk.assert_called_once_with(
mock_instance.name, eph)
self.assertEqual(mock.sentinel.vhd_format, eph['format'])
self.assertEqual(mock.sentinel.get_path, eph['path'])
elif new_eph_size:
mock_check_resize_vhd.assert_called_once_with(
existing_eph_path,
self._migrationops._vhdutils.get_vhd_info.return_value,
mock_instance.ephemeral_gb * units.Gi)
self.assertEqual(existing_eph_path, eph['path'])
else:
self._migrationops._pathutils.remove.assert_called_once_with(
existing_eph_path)
def test_check_ephemeral_disks_create(self):
self._test_check_ephemeral_disks()
def test_check_ephemeral_disks_resize(self):
self._test_check_ephemeral_disks(existing_eph_path=mock.sentinel.path)
def test_check_ephemeral_disks_remove(self):
self._test_check_ephemeral_disks(existing_eph_path=mock.sentinel.path,
new_eph_size=0)
| |
r"""
Logbook
-------
An awesome logging implementation that is fun to use.
Quickstart
``````````
::
from logbook import Logger
log = Logger('A Fancy Name')
log.warn('Logbook is too awesome for most applications')
log.error("Can't touch this")
Works for web apps too
``````````````````````
::
from logbook import MailHandler, Processor
mailhandler = MailHandler(from_addr='servererror@example.com',
recipients=['admin@example.com'],
level='ERROR', format_string=u'''\
Subject: Application Error for {record.extra[path]} [{record.extra[method]}]
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Time: {record.time:%Y-%m-%d %H:%M:%S}
Remote IP: {record.extra[ip]}
Request: {record.extra[path]} [{record.extra[method]}]
Message:
{record.message}
''')
def handle_request(request):
def inject_extra(record, handler):
record.extra['ip'] = request.remote_addr
record.extra['method'] = request.method
record.extra['path'] = request.path
with Processor(inject_extra):
with mailhandler:
# execute code that might fail in the context of the
# request.
"""
import os
import platform
import sys
from itertools import chain
from distutils.command.build_ext import build_ext
from distutils.errors import (
CCompilerError, DistutilsExecError, DistutilsPlatformError)
from setuptools import Distribution as _Distribution, Extension, setup
from setuptools.command.test import test as TestCommand
cmdclass = {}
if sys.version_info < (2, 6):
raise Exception('Logbook requires Python 2.6 or higher.')
cpython = platform.python_implementation() == 'CPython'
ext_modules = [Extension('logbook._speedups', sources=['logbook/_speedups.c'])]
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
class Distribution(_Distribution):
def has_ext_modules(self):
# We want to always claim that we have ext_modules. This will be fine
# if we don't actually have them (such as on PyPy) because nothing
# will get built, however we don't want to provide an overally broad
# Wheel package when building a wheel without C support. This will
# ensure that Wheel knows to treat us as if the build output is
# platform specific.
return True
class PyTest(TestCommand):
# from https://pytest.org/latest/goodpractises.html\
# #integration-with-setuptools-test-commands
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
default_options = ['tests']
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(
' '.join(self.default_options) + ' ' + self.pytest_args)
sys.exit(errno)
cmdclass['test'] = PyTest
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
version_file_path = os.path.join(
os.path.dirname(__file__), 'logbook', '__version__.py')
with open(version_file_path) as version_file:
exec(version_file.read()) # pylint: disable=W0122
extras_require = dict()
extras_require['test'] = set(['pytest', 'pytest-cov'])
if sys.version_info[:2] < (3, 3):
extras_require['test'] |= set(['mock'])
extras_require['dev'] = set(['cython']) | extras_require['test']
extras_require['execnet'] = set(['execnet>=1.0.9'])
extras_require['sqlalchemy'] = set(['sqlalchemy'])
extras_require['redis'] = set(['redis'])
extras_require['zmq'] = set(['pyzmq'])
extras_require['jinja'] = set(['Jinja2'])
extras_require['riemann'] = set(['riemann-client'])
extras_require['all'] = set(chain.from_iterable(extras_require.values()))
def run_setup(with_cext):
kwargs = {}
if with_cext:
kwargs['ext_modules'] = ext_modules
else:
kwargs['ext_modules'] = []
setup(
name='Logbook',
version=__version__,
license='BSD',
url='http://logbook.pocoo.org/',
author='Armin Ronacher, Georg Brandl',
author_email='armin.ronacher@active-4.com',
description='A logging replacement for Python',
long_description=__doc__,
packages=['logbook'],
zip_safe=False,
platforms='any',
cmdclass=cmdclass,
tests_require=['pytest'],
classifiers=[
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
extras_require=extras_require,
distclass=Distribution,
**kwargs
)
if not cpython:
run_setup(False)
status_msgs(
'WARNING: C extensions are not supported on ' +
'this Python platform, speedups are not enabled.',
'Plain-Python build succeeded.'
)
elif os.environ.get('DISABLE_LOGBOOK_CEXT'):
run_setup(False)
status_msgs(
'DISABLE_LOGBOOK_CEXT is set; ' +
'not attempting to build C extensions.',
'Plain-Python build succeeded.'
)
else:
try:
run_setup(True)
except BuildFailed as exc:
status_msgs(
exc.cause,
'WARNING: The C extension could not be compiled, ' +
'speedups are not enabled.',
'Failure information, if any, is above.',
'Retrying the build without the C extension now.'
)
run_setup(False)
status_msgs(
'WARNING: The C extension could not be compiled, ' +
'speedups are not enabled.',
'Plain-Python build succeeded.'
)
| |
#!/usr/bin/env python
import Queue
import cStringIO
import logging
import os.path
import time
import traceback
from PIL import Image
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.common import action_chains
import logenv
KANCOLLE_URL = 'http://www.dmm.com/netgame/social/-/gadgets/=/app_id=854854/'
COMMAND_CLICK = 'click'
COMMAND_CLICK_HOLD = 'click_hold'
COMMAND_CLICK_RELEASE = 'click_release'
COMMAND_MOVE_MOUSE = 'move_mouse'
COMMAND_COVER = 'cover'
COMMAND_TAKE_SCREENSHOT = 'take_screenshot'
logger = logging.getLogger('kcaa.browser')
def get_desired_capabilities(args):
capabilities = {}
capabilities['proxy'] = {'httpProxy': args.proxy,
'ftpProxy': args.proxy,
'sslProxy': None,
'noProxy': '127.0.0.1,localhost',
'proxyType': 'MANUAL',
'class': 'org.openqa.selenium.Proxy',
'autodetect': False}
return capabilities
def setup_chrome(name, args, desired_capabilities, is_chromium):
options = webdriver.ChromeOptions()
options.binary_location = (
args.chromium_binary if is_chromium else args.chrome_binary)
if args.chrome_user_data_basedir:
options.add_argument('--user-data-dir={}'.format(
os.path.join(args.chrome_user_data_basedir, name)))
# Do not ignore SSL certificate errors.
# See also other Chrome-specific capabilities at
# https://sites.google.com/a/chromium.org/chromedriver/capabilities
options.add_experimental_option('excludeSwitches', [
'ignore-certificate-errors'])
return webdriver.Chrome(executable_path=args.chromedriver_binary,
chrome_options=options,
desired_capabilities=desired_capabilities)
def setup_firefox(name, args, desired_capabilities):
return webdriver.Firefox(capabilities=desired_capabilities)
def setup_phantomjs(name, args, desired_capabilities):
# Use PhantomJS with caution: it doesn't support proxying only HTTP
# transactions (= bypassing HTTPS ones). This may reveal your username and
# password to anyone who can access the proxy server, or anyone who can run
# a malicious process on the machine where KCAA runs.
# TODO: Support 'https' in --proxy-type in PhantomJS, and make it
# distinguishable from 'http'
service_args = [
'--proxy={}'.format(args.proxy),
'--proxy-type=http',
'--ignore-ssl-errors=true',
]
browser = webdriver.PhantomJS(args.phantomjs_binary,
service_args=service_args,
desired_capabilities=desired_capabilities)
return browser
def open_browser(name, browser_type, args):
desired_capabilities = get_desired_capabilities(args)
browser = None
if browser_type == 'chrome':
browser = setup_chrome(name, args, desired_capabilities, False)
elif browser_type == 'chromium':
browser = setup_chrome(name, args, desired_capabilities, True)
elif browser_type == 'firefox':
browser = setup_firefox(name, args, desired_capabilities)
elif browser_type == 'phantomjs':
browser = setup_phantomjs(name, args, desired_capabilities)
else:
raise ValueError('Unrecognized browser: {browser}'.format(
browser=browser_type))
return browser
def open_kancolle_browser(args):
logger.info('Opening Kancolle browser...')
browser = open_browser('kancolle', args.kancolle_browser, args)
browser.set_window_size(980, 750)
browser.set_window_position(0, 0)
logger.info('Opening the Kancolle game URL...')
browser.get(KANCOLLE_URL)
if args.credentials and os.path.isfile(args.credentials):
logger.info('Trying to sign in with the given credentials...')
with open(args.credentials, 'r') as credentials_file:
user, passwd = credentials_file.read().strip().split(':')
try:
login_id = browser.find_element_by_id('login_id')
login_id.send_keys(user)
password = browser.find_element_by_id('password')
password.send_keys(passwd)
last_exception = None
for _ in xrange(5):
logger.info('Login trial...')
time.sleep(1.0)
try:
login_button = browser.find_element_by_xpath(
'//div[@class="box-btn-login"]'
'//input[@type="submit"]')
login_button.click()
break
except exceptions.NoSuchElementException:
logger.info('The page must have transitioned..')
break
except exceptions.WebDriverException as e:
last_exception = e
logger.info(
'Seems like page loading failed. This may be just'
'a transient error in a browser like phantomjs. '
'Retrying.')
else:
raise last_exception
except Exception as e:
browser.get_screenshot_as_file('screen.png')
logger.error(str(e))
logger.fatal(
'Login failed. Check the generated screenshot '
'(screen.png) to see if there is any visible error.')
raise e
logger.info('Kancolle browser is ready.')
return browser
def get_game_frame(browser, debug):
# Is there a better way to get this? Currently these are read from the
# iframe source.
game_area_width = 800
game_area_height = 480
game_area_top = 16
try:
game_frame = browser.find_element_by_id('game_frame')
except:
return None, None, None, None
dx = (game_frame.size['width'] - game_area_width) / 2
dy = game_area_top
add_game_frame_cover(browser, game_area_width, game_area_height, dx, dy)
# If in the debug mode, show the digitizer tools.
if debug:
add_digitizer(browser)
location = game_frame.location
left = int(location['x'] + dx)
top = int(location['y'] + dy)
return game_frame, dx, dy, (left, top, left + game_area_width,
top + game_area_height)
def add_game_frame_cover(browser, game_area_width, game_area_height, dx, dy):
browser.execute_script('''
var gameFrame = document.querySelector("#game_frame");
var frameRect = gameFrame.getBoundingClientRect();
var gameFrameCover = document.createElement("div");
gameFrameCover.id = "game_frame_cover";
gameFrameCover.style.boxShadow =
"0 0 50px 50px hsla(240, 80%, 20%, 0.5) inset";
gameFrameCover.style.boxSizing = "border-box";
gameFrameCover.style.color = "white";
gameFrameCover.style.display = "none";
gameFrameCover.style.fontSize = "30px";
gameFrameCover.style.height = ''' + str(game_area_height) + ''' + "px";
gameFrameCover.style.left =
Math.floor(frameRect.left + ''' + str(dx) + ''') + "px";
gameFrameCover.style.padding = "20px";
gameFrameCover.style.position = "absolute";
gameFrameCover.style.textAlign = "right";
gameFrameCover.style.textShadow = "0 0 5px black";
gameFrameCover.style.top =
Math.floor(frameRect.top + ''' + str(dy) + ''') + "px";
gameFrameCover.style.width = ''' + str(game_area_width) + ''' + "px";
gameFrameCover.style.zIndex = "1";
document.body.appendChild(gameFrameCover);
coverText = document.createElement("span");
coverText.style.position = "relative";
coverText.style.top = "410px";
coverText.textContent = "Automatically manipulated";
gameFrameCover.appendChild(coverText);
''')
def add_digitizer(browser):
browser.execute_script('''
var gameFrameCover = document.querySelector("#game_frame_cover");
var digitizerDisplay = document.createElement("div");
digitizerDisplay.style.fontSize = "16px";
digitizerDisplay.style.position = "absolute";
digitizerDisplay.style.top = "42px";
var toggleButton = document.createElement("button");
toggleButton.textContent = "Toggle Cover";
toggleButton.onclick = function (e) {
var isCurrentlyShown = gameFrameCover.style.display != "none";
gameFrameCover.style.display = isCurrentlyShown ? "none" : "block";
}
digitizerDisplay.appendChild(toggleButton);
var coordinates = document.createElement("span");
coordinates.style.marginLeft = "10px";
digitizerDisplay.appendChild(coordinates);
var w = document.querySelector("#w");
w.insertBefore(digitizerDisplay, w.children[0]);
gameFrameCover.onmousemove = function (e) {
var frameRect = gameFrameCover.getBoundingClientRect();
var x = e.clientX - frameRect.left;
var y = e.clientY - frameRect.top;
coordinates.textContent = "(" + x + "," + y + ")";
}
''')
def show_game_frame_cover(browser, is_shown):
display = 'block' if is_shown else 'none'
try:
# Currently this doesn't work for some long-running environment.
# It often dies with NoSichWindowExction.
return True
browser.execute_script('''
var gameFrameCover = document.querySelector("#game_frame_cover");
gameFrameCover.style.display = "''' + display + '''";
''')
return True
except exceptions.UnexpectedAlertPresentException as e:
logger.error('Unexpected alert: {}'.format(e.alert_text))
logger.debug(str(e))
return False
def perform_actions(actions):
try:
actions.perform()
return True
except exceptions.UnexpectedAlertPresentException as e:
logger.error('Unexpected alert: {}'.format(e.alert_text))
logger.debug(str(e))
return False
def setup_kancolle_browser(args, controller_queue_in, controller_queue_out,
to_exit, browser_broken):
monitor = None
try:
logenv.setup_logger(args.debug, args.log_file, args.log_level,
args.keep_timestamped_logs)
monitor = BrowserMonitor(
'Kancolle', open_kancolle_browser(args), 3)
# Signals the browser is ready.
controller_queue_out.put(True)
game_frame, dx, dy, game_area_rect = None, None, None, None
covered = False
while True:
browser = monitor.browser
if to_exit.wait(0.0):
logger.info('Browser Kancolle got an exit signal. Shutting '
'down.')
break
if not monitor.is_alive():
# If a user closes the Kancolle browser, it should be a signal
# that the user wants to exit the game.
break
if game_frame:
try:
command_type, command_args = controller_queue_in.get(timeout=1.0)
if command_type == COMMAND_CLICK:
x, y = command_args
x += dx
y += dy
actions = action_chains.ActionChains(browser)
actions.move_to_element_with_offset(game_frame, x, y)
actions.click(None)
if covered:
show_game_frame_cover(browser, False)
time.sleep(0.1)
perform_actions(actions)
if covered:
time.sleep(0.1)
show_game_frame_cover(browser, True)
elif command_type == COMMAND_CLICK_HOLD:
logger.debug('click hold!')
x, y = command_args
x += dx
y += dy
actions = action_chains.ActionChains(browser)
actions.move_to_element_with_offset(game_frame, x, y)
actions.click_and_hold(None)
if covered:
show_game_frame_cover(browser, False)
time.sleep(0.1)
perform_actions(actions)
elif command_type == COMMAND_CLICK_RELEASE:
logger.debug('click release!')
x, y = command_args
x += dx
y += dy
actions = action_chains.ActionChains(browser)
actions.move_to_element_with_offset(game_frame, x, y)
actions.release(None)
perform_actions(actions)
if covered:
time.sleep(0.1)
show_game_frame_cover(browser, True)
elif command_type == COMMAND_MOVE_MOUSE:
logger.debug('mouse move!')
x, y = command_args
x += dx
y += dy
actions = action_chains.ActionChains(browser)
actions.move_to_element_with_offset(game_frame, x, y)
perform_actions(actions)
elif command_type == COMMAND_COVER:
is_shown = command_args[0]
if is_shown != covered:
show_game_frame_cover(browser, is_shown)
covered = is_shown
elif command_type == COMMAND_TAKE_SCREENSHOT:
format, quality, width, height = command_args
im_buffer = None
response = ''
try:
im_buffer = cStringIO.StringIO(
browser.get_screenshot_as_png())
im = Image.open(im_buffer)
im.load()
im_buffer.close()
im = im.crop(game_area_rect)
if width != 0 and height != 0:
im.thumbnail((width, height), Image.NEAREST)
im_buffer = cStringIO.StringIO()
if format == 'jpeg':
im.save(im_buffer, format, quality=quality)
else:
im.save(im_buffer, format)
response = im_buffer.getvalue()
except exceptions.UnexpectedAlertPresentException as e:
logger.error('Unexpected alert: {}'.format(
e.alert_text))
logger.debug(str(e))
finally:
controller_queue_out.put(response)
if im_buffer:
im_buffer.close()
else:
raise ValueError(
'Unknown browser command: type = {}, args = {}'
.format(command_type, command_args))
except Queue.Empty:
pass
else:
game_frame, dx, dy, game_area_rect = get_game_frame(
browser, args.debug)
time.sleep(1.0)
except (KeyboardInterrupt, SystemExit):
logger.info('SIGINT received in the Kancolle browser process. '
'Exiting...')
except exceptions.NoSuchWindowException:
logger.error('Kancolle window seems to have been killed.')
browser_broken.set()
return
except:
logger.error(traceback.format_exc())
finally:
controller_queue_in.close()
controller_queue_out.close()
if monitor:
monitor.quit()
to_exit.set()
def open_kcaa_browser(args, root_url):
if not args.kcaa_browser:
logger.info('Flag --kcaa_browser is set to be empty. No browser will '
'be up locally. You can still open a KCAA Web UI with {}.'
.format(root_url))
return None
logger.info('Opening a KCAA browser.')
browser = open_browser('kcaa', args.kcaa_browser, args)
browser.set_window_size(700, 1050)
browser.set_window_position(980, 0)
logger.info('Opening the KCAA Web UI...')
browser.get(root_url)
logger.info('KCAA browser is ready.')
return browser
def setup_kcaa_browser(args, root_url, to_exit):
monitor = None
try:
logenv.setup_logger(args.debug, args.log_file, args.log_level,
args.keep_timestamped_logs)
kcaa_browser = open_kcaa_browser(args, root_url)
if not kcaa_browser:
return
monitor = BrowserMonitor('KCAA', kcaa_browser, 3)
while True:
time.sleep(1.0)
if to_exit.wait(0.0):
logger.info('Browser KCAA got an exit signal. Shutting down.')
break
if not monitor.is_alive():
# KCAA window is not vital for playing the game -- it is not
# necessarily a signal for exiting. Rather, I would restart it
# again, assuming that was an accident.
monitor = BrowserMonitor(
'KCAA', open_kcaa_browser(args, root_url), 3)
except (KeyboardInterrupt, SystemExit):
logger.info('SIGINT received in the KCAA browser process. Exiting...')
except:
logger.error(traceback.format_exc())
to_exit.set()
if monitor:
monitor.quit()
class BrowserMonitor(object):
def __init__(self, name, browser, max_credit):
self.name = name
self.browser = browser
self.max_credit = max_credit
self.credit = max_credit
def quit(self):
try:
self.browser.quit()
except:
logger.error(traceback.format_exc())
def is_alive(self):
alive = True
try:
# Check window_handles as a heartbeat.
# This seems better than current_url or title because they
# interfere with Chrome developer tools.
if self.browser.window_handles is None:
# This won't occur (as an exception will be thrown instead)
# but to make sure the above condition is evaluated.
raise RuntimeError()
except Exception:
# Browser exited, or didn't respond.
logger.debug('Browser {} not responding.'.format(self.name))
self.credit -= 1
alive = False
if alive and self.credit < self.max_credit:
logger.info('Browser recovered.')
self.credit = self.max_credit
return self.credit > 0
| |
import codecs
import errno
import fcntl
import io
import os
import pty
# import resource
# as sublime text doesn't ship with resource module, use our mock version of `resource`
from . import resource
import signal
import struct
import sys
import termios
import time
try:
import builtins # Python 3
except ImportError:
import __builtin__ as builtins # Python 2
# Constants
from pty import (STDIN_FILENO, CHILD)
from .util import which
_platform = sys.platform.lower()
# Solaris uses internal __fork_pty(). All others use pty.fork().
_is_solaris = (
_platform.startswith('solaris') or
_platform.startswith('sunos'))
if _is_solaris:
use_native_pty_fork = False
from . import _fork_pty
else:
use_native_pty_fork = True
PY3 = sys.version_info[0] >= 3
if PY3:
def _byte(i):
return bytes([i])
else:
def _byte(i):
return chr(i)
class FileNotFoundError(OSError): pass
class TimeoutError(OSError): pass
_EOF, _INTR = None, None
def _make_eof_intr():
"""Set constants _EOF and _INTR.
This avoids doing potentially costly operations on module load.
"""
global _EOF, _INTR
if (_EOF is not None) and (_INTR is not None):
return
# inherit EOF and INTR definitions from controlling process.
try:
from termios import VEOF, VINTR
try:
fd = sys.__stdin__.fileno()
except ValueError:
# ValueError: I/O operation on closed file
fd = sys.__stdout__.fileno()
intr = ord(termios.tcgetattr(fd)[6][VINTR])
eof = ord(termios.tcgetattr(fd)[6][VEOF])
except (ImportError, OSError, IOError, ValueError, termios.error):
# unless the controlling process is also not a terminal,
# such as cron(1), or when stdin and stdout are both closed.
# Fall-back to using CEOF and CINTR. There
try:
from termios import CEOF, CINTR
(intr, eof) = (CINTR, CEOF)
except ImportError:
# ^C, ^D
(intr, eof) = (3, 4)
_INTR = _byte(intr)
_EOF = _byte(eof)
class PtyProcessError(Exception):
"""Generic error class for this package."""
# setecho and setwinsize are pulled out here because on some platforms, we need
# to do this from the child before we exec()
def _setecho(fd, state):
errmsg = 'setecho() may not be called on this platform'
try:
attr = termios.tcgetattr(fd)
except termios.error as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
try:
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
# blocked on some platforms. TCSADRAIN would probably be ideal.
termios.tcsetattr(fd, termios.TCSANOW, attr)
except IOError as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
def _setwinsize(fd, rows, cols):
# Some very old platforms have a bug that causes the value for
# termios.TIOCSWINSZ to be truncated. There was a hack here to work
# around this, but it caused problems with newer platforms so has been
# removed. For details see https://github.com/pexpect/pexpect/issues/39
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
class PtyProcess(object):
'''This class represents a process running in a pseudoterminal.
The main constructor is the :meth:`spawn` classmethod.
'''
string_type = bytes
if PY3:
linesep = os.linesep.encode('ascii')
crlf = '\r\n'.encode('ascii')
@staticmethod
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
else:
linesep = os.linesep
crlf = '\r\n'
write_to_stdout = sys.stdout.write
encoding = None
argv = None
env = None
launch_dir = None
def __init__(self, pid, fd):
_make_eof_intr() # Ensure _EOF and _INTR are calculated
self.pid = pid
self.fd = fd
readf = io.open(fd, 'rb', buffering=0)
writef = io.open(fd, 'wb', buffering=0, closefd=False)
self.fileobj = io.BufferedRWPair(readf, writef)
self.terminated = False
self.closed = False
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
@classmethod
def spawn(
cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
dimensions=(24, 80)):
'''Start the given command in a child process in a pseudo terminal.
This does all the fork/exec type of stuff for a pty, and returns an
instance of PtyProcess.
If preexec_fn is supplied, it will be called with no arguments in the
child process before exec-ing the specified command.
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be used.
'''
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
command_with_path = which(command)
if command_with_path is None:
raise FileNotFoundError('The command was not found or was not ' +
'executable: %s.' % command)
command = command_with_path
argv[0] = command
# [issue #119] To prevent the case where exec fails and the user is
# stuck interacting with a python child process instead of whatever
# was expected, we implement the solution from
# http://stackoverflow.com/a/3703179 to pass the exception to the
# parent process
# [issue #119] 1. Before forking, open a pipe in the parent process.
exec_err_pipe_read, exec_err_pipe_write = os.pipe()
if use_native_pty_fork:
pid, fd = pty.fork()
else:
# Use internal fork_pty, for Solaris
pid, fd = _fork_pty.fork_pty()
# Some platforms must call setwinsize() and setecho() from the
# child process, and others from the master process. We do both,
# allowing IOError for either.
if pid == CHILD:
# set window size
try:
_setwinsize(STDIN_FILENO, *dimensions)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# disable echo if spawn argument echo was unset
if not echo:
try:
_setecho(STDIN_FILENO, False)
except (IOError, termios.error) as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# [issue #119] 3. The child closes the reading end and sets the
# close-on-exec flag for the writing end.
os.close(exec_err_pipe_read)
fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# Do not allow child to inherit open file descriptors from parent,
# with the exception of the exec_err_pipe_write of the pipe
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
os.closerange(3, exec_err_pipe_write)
os.closerange(exec_err_pipe_write+1, max_fd)
if cwd is not None:
os.chdir(cwd)
if preexec_fn is not None:
try:
preexec_fn()
except Exception as e:
ename = type(e).__name__
tosend = '{}:0:{}'.format(ename, str(e))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(1)
try:
if env is None:
os.execv(command, argv)
else:
os.execvpe(command, argv, env)
except OSError as err:
# [issue #119] 5. If exec fails, the child writes the error
# code back to the parent using the pipe, then exits.
tosend = 'OSError:{}:{}'.format(err.errno, str(err))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(os.EX_OSERR)
# Parent
inst = cls(pid, fd)
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
# [issue #119] 2. After forking, the parent closes the writing end
# of the pipe and reads from the reading end.
os.close(exec_err_pipe_write)
exec_err_data = os.read(exec_err_pipe_read, 4096)
os.close(exec_err_pipe_read)
# [issue #119] 6. The parent reads eof (a zero-length read) if the
# child successfully performed exec, since close-on-exec made
# successful exec close the writing end of the pipe. Or, if exec
# failed, the parent reads the error code and can proceed
# accordingly. Either way, the parent blocks until the child calls
# exec.
if len(exec_err_data) != 0:
try:
errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
exctype = getattr(builtins, errclass.decode('ascii'), Exception)
exception = exctype(errmsg.decode('utf-8', 'replace'))
if exctype is OSError:
exception.errno = int(errno_s)
except:
raise Exception('Subprocess failed, got bad error data: %r'
% exec_err_data)
else:
raise exception
try:
inst.setwinsize(*dimensions)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
raise
return inst
def __repr__(self):
clsname = type(self).__name__
if self.argv is not None:
args = [repr(self.argv)]
if self.env is not None:
args.append("env=%r" % self.env)
if self.launch_dir is not None:
args.append("cwd=%r" % self.launch_dir)
return "{}.spawn({})".format(clsname, ", ".join(args))
else:
return "{}(pid={}, fd={})".format(clsname, self.pid, self.fd)
@staticmethod
def _coerce_send_string(s):
if not isinstance(s, bytes):
return s.encode('utf-8')
return s
@staticmethod
def _coerce_read_string(s):
return s
def __del__(self):
'''This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. '''
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
# which exception, shouldn't we catch explicitly .. ?
except:
pass
def fileno(self):
'''This returns the file descriptor of the pty for the child.
'''
return self.fd
def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
if not self.closed:
self.flush()
self.fileobj.close() # Closes the file descriptor
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise PtyProcessError('Could not terminate the child.')
self.fd = -1
self.closed = True
#self.pid = None
def flush(self):
'''This does nothing. It is here to support the interface for a
File-like object. '''
pass
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False.
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
the child pty may not appear as a terminal device. This means
methods such as setecho(), setwinsize(), getwinsize() may raise an
IOError. '''
return os.isatty(self.fd)
def waitnoecho(self, timeout=None):
'''This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh user@example.com')
p.waitnoecho()
p.sendline(mypassword)
If timeout==None then this method to block until ECHO flag is False.
'''
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
'''This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho().
Not supported on platforms where ``isatty()`` returns False. '''
try:
attr = termios.tcgetattr(self.fd)
except termios.error as err:
errmsg = 'getecho() may not be called on this platform'
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
self.echo = bool(attr[3] & termios.ECHO)
return self.echo
def setecho(self, state):
'''This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
Not supported on platforms where ``isatty()`` returns False.
'''
_setecho(self.fd, state)
self.echo = state
def read(self, size=1024):
"""Read and return at most ``size`` bytes from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
Unlike Pexpect's ``read_nonblocking`` method, this doesn't try to deal
with the vagaries of EOF on platforms that do strange things, like IRIX
or older Solaris systems. It handles the errno=EIO pattern used on
Linux, and the empty-string return used on BSD platforms and (seemingly)
on recent Solaris.
"""
try:
s = self.fileobj.read1(size)
except (OSError, IOError) as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOFError('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
self.flag_eof = True
raise EOFError('End Of File (EOF). Empty string style platform.')
return s
def readline(self):
"""Read one line from the pseudoterminal, and return it as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
try:
s = self.fileobj.readline()
except (OSError, IOError) as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOFError('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
self.flag_eof = True
raise EOFError('End Of File (EOF). Empty string style platform.')
return s
def _writeb(self, b, flush=True):
n = self.fileobj.write(b)
if flush:
self.fileobj.flush()
return n
def write(self, s, flush=True):
"""Write bytes to the pseudoterminal.
Returns the number of bytes written.
"""
return self._writeb(s, flush=flush)
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = _byte(a)
return self._writeb(byte), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = _byte(d[char])
return self._writeb(byte), byte
def sendeof(self):
'''This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. '''
return self._writeb(_EOF), _EOF
def sendintr(self):
'''This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. '''
return self._writeb(_INTR), _INTR
def eof(self):
'''This returns True if the EOF exception was ever raised.
'''
return self.flag_eof
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. '''
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
return self.exitstatus
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status): # pragma: no cover
# You can't call wait() on a child process in the stopped state.
raise PtyProcessError('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to get the status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# No child processes
if e.errno == errno.ECHILD:
raise PtyProcessError('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e: # pragma: no cover
# This should never happen...
if e.errno == errno.ECHILD:
raise PtyProcessError('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise PtyProcessError('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
"""Send the given signal to the child application.
In keeping with UNIX tradition it has a misleading name. It does not
necessarily kill the child unless you send the right signal. See the
:mod:`signal` module for constants representing signal numbers.
"""
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def getwinsize(self):
"""Return the window size of the pseudoterminal as a tuple (rows, cols).
"""
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fd, TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
This will cause a SIGWINCH signal to be sent to the child. This does not
change the physical window size. It changes the size reported to
TTY-aware applications like vi or curses -- applications that respond to
the SIGWINCH signal.
"""
return _setwinsize(self.fd, rows, cols)
class PtyProcessUnicode(PtyProcess):
"""Unicode wrapper around a process running in a pseudoterminal.
This class exposes a similar interface to :class:`PtyProcess`, but its read
methods return unicode, and its :meth:`write` accepts unicode.
"""
if PY3:
string_type = str
else:
string_type = unicode # analysis:ignore
def __init__(self, pid, fd, encoding='utf-8', codec_errors='strict'):
super(PtyProcessUnicode, self).__init__(pid, fd)
self.encoding = encoding
self.codec_errors = codec_errors
self.decoder = codecs.getincrementaldecoder(encoding)(errors=codec_errors)
def read(self, size=1024):
"""Read at most ``size`` bytes from the pty, return them as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
The size argument still refers to bytes, not unicode code points.
"""
b = super(PtyProcessUnicode, self).read(size)
return self.decoder.decode(b, final=False)
def readline(self):
"""Read one line from the pseudoterminal, and return it as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
b = super(PtyProcessUnicode, self).readline()
return self.decoder.decode(b, final=False)
def write(self, s):
"""Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
b = s.encode(self.encoding)
return super(PtyProcessUnicode, self).write(b)
| |
import itertools
import json
import time
from collections import defaultdict
from jsonschema import validate
from cloudshell.cp.aws.domain.common.CheckCancellationThread import CheckCancellationThread
from cloudshell.cp.aws.domain.conncetivity.operations.traffic_mirror_cleaner import TrafficMirrorCleaner
from cloudshell.cp.aws.models.traffic_mirror_fulfillment import TrafficMirrorFulfillment, create_results
from cloudshell.cp.core.models import RemoveTrafficMirroringResult
from cloudshell.cp.aws.domain.services.ec2.mirroring import TrafficMirrorService
flatten = itertools.chain.from_iterable
class TrafficMirrorOperation(object):
def __init__(self, tag_service, session_number_service, traffic_mirror_service, cancellation_service):
"""
:type cloudshell.cp.aws.domain.services.ec2.mirroring.TrafficMirrorService traffic_mirror_service:
:param cloudshell.cp.aws.domain.common.cancellation_service.CommandCancellationService cancellation_service: object
:param cloudshell.cp.aws.domain.services.ec2.tags.TagService tag_service:
:param cloudshell.cp.aws.domain.services.cloudshell.traffic_mirror_pool_services.SessionNumberService session_number_service:
"""
self._tag_service = tag_service
self._session_number_service = session_number_service
self._traffic_mirror_service = traffic_mirror_service # type: TrafficMirrorService
self._cancellation_service = cancellation_service
def _handle_cancellation(self, cancellation_context):
while True:
time.sleep(1)
cancelled = self._cancellation_service.check_if_cancelled(cancellation_context)
if cancelled:
raise Exception('User cancelled traffic mirroring')
def create(self,
ec2_client,
reservation,
actions,
cancellation_context,
logger,
cloudshell):
"""
:param cloudshell.shell.core.driver_context.CancellationContext cancellation_context:
:param cloudshell.api.cloudshell_api.CloudShellAPISession cloudshell:
:param EC2.Client ec2_client:
:param cloudshell.cp.aws.models.reservation_model.ReservationModel reservation:
:param list[cloudshell.cp.core.models.CreateTrafficMirroring] actions: what traffic mirroring sessions,
targets and filters to apply
:param logging.Logger logger:
:return:
"""
success = False
logger.info('Received request to deploy traffic mirroring. ')
action_parameters_string = self._get_action_parameters_string(actions)
logger.info(action_parameters_string)
# session numbers: session number is the AWS priority of a traffic mirror session; the lower a session number,
# the earlier it gets the opportunity to capture traffic from a source nic.
# anyways, session numbers MUST be unique; to provide this behavior, we are using cloudshell pool checkout
# to read more about session numbers, go here: https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-session.html
# to read more about cloudshell pools, go here: https://help.quali.com/Online%20Help/0.0/TestShell-API/TestShell%20Python%20API.html#CheckoutFromPool
self._checkout_session_numbers(actions, cloudshell, logger, reservation)
fulfillments = [TrafficMirrorFulfillment(x, reservation) for x in actions]
logger.info('Determined session numbers: ' + ', '.join(str(f.session_number) for f in fulfillments))
success = False
try:
with CheckCancellationThread(cancellation_context, self._cancellation_service):
logger.info('Getting or creating traffic mirror targets...')
self._get_or_create_targets(ec2_client,
reservation,
fulfillments)
logger.info('Creating traffic mirror filters and sessions...')
self._create_traffic_filters_and_sessions(ec2_client,
fulfillments)
# self._get_or_create_sessions(ec2_client,
# fulfillments)
success = True
message = 'Success'
logger.info('Successfully fulfilled traffic mirror request')
except Exception as e:
logger.exception('Failed to fulfill traffic mirror request: ' + e.message)
message = e.message
logger.error('Rolling back partial traffic mirror request...')
TrafficMirrorCleaner.rollback(ec2_client, fulfillments, logger, cloudshell, reservation,
self._session_number_service)
results = create_results(success, fulfillments, message)
return results
def _get_action_parameters_string(self, actions):
return 'Here are the params:\n' + '\n'.join(str(x) for x in actions)
def _get_or_create_targets(self, ec2_client, reservation, fulfillments):
"""
create traffic mirror targets for target nics OR find existing targets that correspond
to target nic in fulfillment
:param list[cloudshell.cp.aws.models.traffic_mirror_fulfillment.TrafficMirrorFulfillment] fulfillments:
"""
target_nics = list({f.target_nic_id for f in fulfillments})
targets_found_nics_to_target_id = self._traffic_mirror_service.find_traffic_targets_by_nics(ec2_client,
target_nics)
self._create_targets_or_assign_existing_targets(ec2_client,
targets_found_nics_to_target_id,
self._tag_service,
reservation,
fulfillments)
def _create_traffic_filters_and_sessions(self, ec2_client, fulfillments):
"""
:param list[cloudshell.cp.aws.models.traffic_mirror_fulfillment.TrafficMirrorFulfillment] fulfillments:
"""
for fulfillment in fulfillments:
self._create_filter(ec2_client, fulfillment)
fulfillment.mirror_session_id = self._create_session(ec2_client, fulfillment)
def _get_or_create_sessions(self, ec2_client, fulfillments):
"""
:param list[cloudshell.cp.aws.models.traffic_mirror_fulfillment.TrafficMirrorFulfillment] fulfillments:
"""
session_names = [f.session_name for f in fulfillments]
session_name_to_found_session = self._traffic_mirror_service.find_sessions_by_session_names(ec2_client,
session_names)
found_session_names = session_name_to_found_session.keys()
for s in session_names:
fulfillment = next((f for f in fulfillments if f.session_name == s))
if s not in found_session_names:
self._create_filter(ec2_client, fulfillment)
fulfillment.mirror_session_id = self._create_session(ec2_client, fulfillment)
else:
fulfillment.mirror_session_id = session_name_to_found_session[s]['TrafficMirrorSessionId']
def _create_targets_or_assign_existing_targets(self, ec2_client,
nics_to_found_target_ids,
tag_service,
reservation,
fulfillments):
"""
:param list[cloudshell.cp.aws.models.traffic_mirror_fulfillment.TrafficMirrorFulfillment] fulfillments:
:param cloudshell.cp.aws.domain.services.ec2.tags.TagService tag_service:
"""
target_nics_to_fulfillments = defaultdict(list)
[target_nics_to_fulfillments[f.target_nic_id].append(f) for f in fulfillments]
for target_nic in target_nics_to_fulfillments.keys():
if target_nic not in nics_to_found_target_ids:
target_tags = tag_service.get_default_tags(target_nic, reservation)
target = self._traffic_mirror_service.create_traffic_mirror_target_from_nic(ec2_client, target_nic,
target_tags)
self._assign_target_to_fulfillments(target_nics_to_fulfillments[target_nic], target)
else:
self._assign_target_to_fulfillments(target_nics_to_fulfillments[target_nic],
nics_to_found_target_ids[target_nic])
@staticmethod
def _assign_target_to_fulfillments(fulfillments, traffic_target_id):
for fulfillment in fulfillments:
fulfillment.traffic_mirror_target_id = traffic_target_id
def _create_session(self, ec2_client, fulfillment):
"""
:param cloudshell.cp.aws.models.traffic_mirror_fulfillment.TrafficMirrorFulfillment fulfillment:
"""
mirror_session_tags = self._tag_service.get_default_tags('session-' + fulfillment.session_name,
fulfillment.reservation)
return self._traffic_mirror_service.create_traffic_mirror_session(ec2_client, fulfillment, mirror_session_tags)
def _create_filter(self, ec2_client, fulfillment):
traffic_filter_tags = self._tag_service.get_default_tags('filter-' + fulfillment.session_name,
fulfillment.reservation)
fulfillment.traffic_mirror_filter_id = \
self._traffic_mirror_service.create_filter(ec2_client, traffic_filter_tags)
self._traffic_mirror_service.create_filter_rules(ec2_client, fulfillment)
def validate_create_actions(self, actions, request, logger):
"""
:param str request:
:param list[cloudshell.cp.core.models.CreateTrafficMirroring] actions:
"""
self._there_are_actions(actions)
result = json.loads(request)
for a in result['driverRequest']['actions']:
self._validate_schema(CREATE_SCHEMA, a)
self._there_are_source_and_target_nics(actions)
self._session_numbers_are_valid(actions, logger) # must be 1-32766 or NONE
def _there_are_source_and_target_nics(self, actions):
for a in actions:
if not a.actionParams.sourceNicId:
raise Exception('Missing a source nic on actionId {0}'.format(a.actionId))
if not a.actionParams.targetNicId:
raise Exception('Missing a target nic on actionId {0}'.format(a.actionId))
def _there_are_actions(self, actions):
if len(actions) == 0:
raise Exception('Invalid request')
def _checkout_session_numbers(self, actions, cloudshell, logger, reservation):
"""
session number must be between 1-32766 and unique per source nic id;
every traffic mirror session must have a number assigned
the number represents the priority of a target when pulling the traffic packets.
:param list[cloudshell.cp.core.models.CreateTrafficMirroring] actions:
"""
source_nic_to_traffic_action = defaultdict(list)
[source_nic_to_traffic_action[a.actionParams.sourceNicId].append(a) for a in actions]
for source in source_nic_to_traffic_action.keys():
self.get_unique_session_number_and_assign_to_mirror_session_request(cloudshell, logger, reservation, source,
source_nic_to_traffic_action)
def get_unique_session_number_and_assign_to_mirror_session_request(self, cloudshell, logger, reservation, source,
source_nic_to_traffic_action):
for action in source_nic_to_traffic_action[source]:
session_number = action.actionParams.sessionNumber
action.actionParams.sessionNumber = self._session_number_service.checkout(cloudshell,
logger,
reservation,
source,
session_number)
@staticmethod
def _session_numbers_are_valid(actions, logger):
"""
:param list[cloudshell.cp.core.models.CreateTrafficMirroring] actions:
:param logging.Logger logger:
"""
error_msg = 'Session number must be either empty or an integer in the range 1-32766'
# must be 1-32766 or NONE
for a in actions:
try:
session_number = int(a.actionParams.sessionNumber)
if session_number and \
not isinstance(session_number, (int, long)) \
or session_number > 32766 \
or session_number < 1:
logger.error(error_msg + '\nSession number is {0}'.format(session_number))
raise Exception(error_msg)
except ValueError:
if a.actionParams.sessionNumber.strip() == '':
a.actionParams.sessionNumber = None
else:
raise ValueError(
'Session number must be an integer, or an empty string! Passed an invalid session number {0} in action {1}'
.format(a.actionParams.sessionNumber, a.actionId))
def remove(self, ec2_client, reservation, actions, logger, cloudshell):
"""
:param cloudshell.api.cloudshell_api.CloudShellAPISession cloudshell:
:param EC2.Client ec2_client:
:param cloudshell.cp.aws.models.reservation_model.ReservationModel reservation:
:param list[cloudshell.cp.core.models.RemoveTrafficMirroring] actions:
:param logging.Logger logger:
:return:
"""
logger.info('Received request to remove traffic mirroring. ')
remove_results = [RemoveTrafficMirroringResult(
actionId=a.actionId,
success=False
) for a in actions]
try:
logger.info('Finding sessions to remove...')
sessions = self._find_sessions_to_remove(ec2_client, actions)
if not len(sessions) > 0:
raise Exception('No sessions found to remove!')
logger.info('Removing sessions and release...')
session_ids = [s['TrafficMirrorSessionId'] for s in sessions]
TrafficMirrorCleaner.delete_mirror_sessions(
ec2_client,
session_ids
)
self._releasing_session_numbers(cloudshell, reservation, ec2_client, logger, sessions)
traffic_mirror_filter_ids = [s['TrafficMirrorFilterId'] for s in sessions]
if len(traffic_mirror_filter_ids) > 0:
logger.info('Removing filters...')
TrafficMirrorCleaner.delete_mirror_filters(ec2_client, traffic_mirror_filter_ids)
logger.info('Successfully removed traffic mirroring')
for res in remove_results:
res.success = True
res.infoMessage = 'Found sessions: {0}.'.format(', '.join(session_ids))
except Exception as e:
logger.exception('Failed to remove traffic mirroring: ' + e.message)
for res in remove_results:
res.errorMessage = 'Failed to remove traffic mirroring: ' + e.message
return remove_results
def _releasing_session_numbers(self, cloudshell, reservation, ec2_client, logger, sessions):
session_numbers = [str(s['SessionNumber']) for s in sessions]
traffic_mirror_session_network_interface_id = next((s['NetworkInterfaceId'] for s in sessions))
TrafficMirrorCleaner.release_session_numbers_from_pool_by_session_ids_and_network_interface_id(
cloudshell,
self._session_number_service,
logger,
reservation,
session_numbers,
traffic_mirror_session_network_interface_id
)
def _find_sessions_to_remove(self, ec2_client, actions):
sessions = []
session_ids_from_request = [a.sessionId for a in actions if a.sessionId]
sessions.extend(
self._traffic_mirror_service.find_sessions_by_session_ids(ec2_client, session_ids_from_request)
)
traffic_mirror_target_nic_ids = [a.targetNicId for a in actions if a.targetNicId]
traffic_mirror_target_ids = self._traffic_mirror_service.find_traffic_mirror_target_ids_by_target_nic_ids(
ec2_client, traffic_mirror_target_nic_ids)
sessions.extend(
self._traffic_mirror_service.find_sessions_by_traffic_mirror_target_ids(ec2_client,
traffic_mirror_target_ids)
)
unique_sessions = {s['TrafficMirrorSessionId']: s for s in sessions}
return unique_sessions.values()
@staticmethod
def validate_remove_request(request, logger):
"""
:param str request:
"""
logger.info('Validating requested actions...')
result = json.loads(request)
actions = result['driverRequest']['actions']
if len(actions) == 0:
raise Exception('Invalid request, expected remove actions but none found')
for a in actions:
TrafficMirrorOperation._validate_schema(REMOVE_SCHEMA, a)
if not a['sessionId'] and not a['targetNicId']:
raise Exception(
'Must have either sessionId or target_nic_id for actionId {0} but received empty values'.format(a.actionId))
logger.info('Completed validation for Remove Traffic Mirroring request...')
def find_traffic_mirror_target_nic_id_by_target_id(self, ec2_client, traffic_mirror_target_id):
return self._traffic_mirror_service.find_traffic_mirror_target_nic_id_by_target_id(ec2_client,
traffic_mirror_target_id)
@staticmethod
def _validate_schema(schema, action):
"""
:param cloudshell.cp.core.models.RequestActionBase action:
:return:
"""
validate(action, schema)
REMOVE_SCHEMA = {
"$id": "https://example.com/geographical-location.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "RemoveTrafficMirroring",
"required": ["actionId", "sessionId", "targetNicId"],
"additionalProperties": False,
"properties": {
"type": {"type": "string"},
"actionId": {"type": "string"},
"sessionId": {"type": "string"},
"targetNicId": {"type": "string"}
}
}
CREATE_SCHEMA = {
"$id": "https://example.com/geographical-location.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "CreateTrafficMirroring",
"type": "object",
"additionalProperties": False,
"required": ["actionId", "actionParams"],
"definitions": {
"CreateTrafficMirroringParams": {
"title": "CreateTrafficMirroringParams",
"type": "object",
"additionalProperties": False,
"required": ["sourceNicId", "targetNicId"],
"properties": {
"type": {"type": "string"},
"sourceNicId": {
"type": "string"
},
"targetNicId": {
"type": "string"
},
"sessionNumber": {
"type": "string"
},
"filterRules": {
"type": "array",
"items": {
"$ref": "#/definitions/filterRule"
}
}
}},
"filterRule": {
"type": "object",
"additionalProperties": False,
"required": ["direction", "protocol"],
"properties": {
"type": {"type": "string"},
"direction": {
"type": "string"
},
"destinationCidr": {
"type": "string"
},
"destinationPortRange": {
"type": ["object", "null"]
},
"sourceCidr": {
"type": "string"
},
"sourcePortRange": {
"type": ["object", "null"]
},
"protocol": {
"type": "string"
}
}
}
},
"properties": {
"actionId": {
"type": "string",
},
"type": {"type": "string"},
"actionParams": {"$ref": "#/definitions/CreateTrafficMirroringParams"}
}
}
| |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-dataproc-metastore documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-dataproc-metastore"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-dataproc-metastore",
"github_user": "googleapis",
"github_repo": "python-dataproc-metastore",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-dataproc-metastore-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-dataproc-metastore.tex",
"google-cloud-dataproc-metastore Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-dataproc-metastore",
"google-cloud-dataproc-metastore Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-dataproc-metastore",
"google-cloud-dataproc-metastore Documentation",
author,
"google-cloud-dataproc-metastore",
"google-cloud-dataproc-metastore Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| |
from __future__ import absolute_import
try:
import urlparse
except:
import urllib.parse as urlparse
import urllib
import json
import re
import xmltodict
from bs4 import BeautifulSoup
from external.amazon_scraper import (
get,
product_url,
extract_asin,
reviews_url,
strip_html_tags,
dict_acceptable,
retry,
html_parser,
amazon_base,
)
if 'unicode' not in dir(globals()['__builtins__']):
unicode = str
class Product(object):
def __init__(self, api, product):
self.api = api
self.product = product
self._soup = None
def __getattr__(self, name):
"""
Allow direct access to the product object
"""
return getattr(self.product, name)
@property
@retry()
def soup(self):
# lazily load the soup
# otherwise we will slow down simple operations
if not self._soup:
r = get(self.url, self.api)
self._soup = BeautifulSoup(r.text, html_parser)
return self._soup
@property
def url(self):
return product_url(self.asin)
@property
def alternatives(self):
# TODO: there are FAR more versions hidden behind API calls
# it would be nice to get them all
# kindle
tag = self.soup.find('table', class_='twisterMediaMatrix')
if tag:
asins = set([
extract_asin(anchor['href'])
for anchor in tag.find_all('a', href=re.compile(r'/dp/'))
])
if self.asin in asins:
asins.remove(self.asin)
return list(asins)
# paperback
tag = self.soup.find('div', id='MediaMatrix')
if tag:
asins = set([
extract_asin(anchor['href'])
for anchor in tag.find_all('a', href=re.compile(r'/dp/'))
])
if self.asin in asins:
asins.remove(self.asin)
return list(asins)
return []
@property
def reviews_url(self):
# we could use the asin to directly make a review url
# but some products actually use the ISBN for the review url
# and the ASIN version would fail
# so we'll get the url given to us, and get the asin/isbn from that
try:
# the variable has changed in python simple product api, sigh
item = getattr(self.product, 'item', None)
if not item:
item = getattr(self.product, 'parsed_response', None)
url = unicode(item['CustomerReviews']['IFrameURL'])
p = urlparse.urlparse(url)
q = urlparse.parse_qs(p.query)
asin = q['asin'][0]
except Exception as e:
asin = self.asin
reviews = reviews_url(asin)
return reviews
def reviews(self):
return self.api.reviews(ItemId=self.asin)
@property
def author_bio(self):
tag = self.soup.find('div', class_='mainContent')
if tag:
text = strip_html_tags(unicode(tag))
if text:
return text
return None
@property
def author_page_url(self):
tag = self.soup.find('div', class_='author_page_link')
if tag:
a = tag.find('a', href=re.compile(r'/e/', flags=re.I))
if a:
link = unicode(a['href'])
link = urlparse.urljoin(amazon_base, link)
return link
return None
@property
def ratings(self):
ratings = [0, 0, 0, 0, 0]
reviews_div = self.soup.find('div', class_='reviews')
if reviews_div:
for rating, rating_class in [
(4, 'histoRowfive'),
(3, 'histoRowfour'),
(2, 'histoRowthree'),
(1, 'histoRowtwo'),
(0, 'histoRowone'),
]:
rating_div = reviews_div.find('div', class_=rating_class)
if rating_div:
# no ratings means this won't exist
tag = rating_div.find('div', class_='histoCount')
if tag:
value = tag.string
value = value.replace(',', '')
ratings[rating] = int(value)
return ratings
table = self.soup.find('table', id='histogramTable')
if table:
for rating, row in zip([4, 3, 2, 1, 0], table.find_all('tr', class_='a-histogram-row')):
# get the third td tag
children = [child for child in row.find_all('td', recursive=False)]
td = children[2]
data = td.find('span', class_=False)
if data:
# number could have , in it which fails during int conversion
value = data.string
value = value.replace(',', '')
ratings[rating] = int(value)
return ratings
return ratings
@property
def supplemental_text(self):
# get all the known text blobs
# remove any found in editorial reviews
result = []
# kindle
# http://www.amazon.com/dp/1593080050
tag = self.soup.find('div', id='postBodyPS')
if tag:
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
# paperbacks
# http://www.amazon.com/dp/1568822812
tag = self.soup.find('div', id='bookDescription_feature_div')
if tag:
tag = tag.find('div', class_=None)
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
# extract from the javascript code that updates the iframe
# http://www.amazon.com/dp/1491268727
tag = self.soup.find('script', text=re.compile(r'bookDescEncodedData', flags=re.I))
if tag:
match = re.search(r'bookDescEncodedData\s=\s"(?P<description>[^",]+)', tag.text)
if match:
text = match.group('description')
text = urllib.unquote(text)
text = strip_html_tags(text)
if text:
result.append(text)
# http://www.amazon.com/dp/1616611359
for tag in self.soup.find_all('div', class_='productDescriptionWrapper'):
text = unicode(tag)
text = strip_html_tags(text)
if text:
result.append(text)
# android apps
# http://www.amazon.com/dp/B008A1I0SU
tag = self.soup.find('div', class_='mas-product-description-wrapper')
if tag:
sub_tag = tag.find('div', class_='content')
if sub_tag:
tag = sub_tag
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
# amazon instant video
# http://www.amazon.com/dp/B004C0YS5C
# older method
tag = self.soup.find('div', class_='prod-synopsis')
if tag:
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
# newer method
tag = self.soup.find('div', class_='dv-simple-synopsis')
if tag:
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
# http://www.amazon.com/dp/B0006FUAD6
tag = self.soup.find('div', id=re.compile('feature-bullets', flags=re.I))
if tag:
tags = map(unicode, tag.find_all('span'))
text = strip_html_tags(u''.join(tags))
if text:
result.append(text)
# http://www.amazon.com/dp/B00DHF39KS
tag = self.soup.find('div', class_='aplus')
if tag:
text = strip_html_tags(unicode(tag))
if text:
result.append(text)
return result
def to_dict(self):
d = {}
# print the object as an xml string, parse the string to a dict
# good times!
# this hack brought to you by the letters: X, M, L and by the
# words: Bad, and Design
d = xmltodict.parse(self.product.to_string())
d = json.loads(json.dumps(d))
# filter our the top level crap which includes AWS keys etc
d = {'Item': d['Item']}
# add the python properties
d.update({
k: getattr(self.product, k)
for k in dir(self.product)
if dict_acceptable(self.product, k, blacklist=['browse_nodes', 'api'])
})
# add our own properties
d.update({
k: getattr(self, k)
for k in dir(self)
if dict_acceptable(self, k, blacklist=['soup', 'api', 'ratings', 'reviews'])
})
return d
| |
from mock import mock
from sqlalchemy import and_
from importlib import reload
from flask import g
from tests.base import BaseTestCase, MockRequests
from mod_auth.models import Role
from mod_test.models import Fork, Test, TestPlatform
from mod_regression.models import RegressionTest
from mod_customized.models import TestFork, CustomizedTest
def return_gituser():
return "test"
@mock.patch('requests.get', side_effect=MockRequests)
@mock.patch('github.GitHub')
@mock.patch('mod_auth.controllers.fetch_username_from_token', side_effect=return_gituser)
class TestControllers(BaseTestCase):
def test_customize_test_page_fails_with_no_permission(self, mock_user, mock_git, mock_requests):
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.user)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/custom/')
self.assertEqual(response.status_code, 403)
def test_customize_test_page_loads_with_permission(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/custom/')
self.assertEqual(response.status_code, 200)
self.assert_template_used('custom/index.html')
self.assertIn('submit', str(response.data))
def test_customize_test_fails_with_wrong_commit_hash(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/custom/', data=self.create_customize_form('', ['linux']), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assert_template_used('custom/index.html')
self.assertIn('Commit hash is not filled in', str(response.data))
response = c.post(
'/custom/', data=self.create_customize_form('test-url', ['linux']))
self.assertIn('Wrong Commit Hash', str(response.data))
def test_customize_test_creates_with_right_test_commit(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/custom/', data=self.create_customize_form('abcdef', ['linux']), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assert_template_used('custom/index.html')
custom_test = TestFork.query.filter(
TestFork.user_id == g.user.id).first()
mock_requests.assert_called_with(('https://api.github.com/repos/{user}'
'/{repo}/commits/{hash}').format(user=self.user.name,
repo=g.github['repository'],
hash='abcdef'))
self.assertNotEqual(custom_test, None)
def test_customize_test_creates_fork_if_not_exists(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/custom/', data=self.create_customize_form('abcdef', ['linux']), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assert_template_used('custom/index.html')
fork = Fork.query.filter(Fork.github.like(
"%/test/test_repo.git")).first()
self.assertNotEqual(fork, None)
def test_customize_test_creates_with_multiple_platforms(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post('/account/login', data=self.create_login_form_data(
self.user.email, self.user.password))
response = c.post('/custom/', data=self.create_customize_form(
'abcdef', ['linux', 'windows']), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assert_template_used('custom/index.html')
test_linux = g.db.query(Test.id).filter(
and_(TestFork.test_id == Test.id, Test.platform == TestPlatform.linux)).first()
test_windows = g.db.query(Test.id).filter(
and_(TestFork.test_id == Test.id, Test.platform == TestPlatform.windows)).first()
self.assertNotEqual(test_linux, None)
self.assertNotEqual(test_windows, None)
def test_customize_test_ccreates_with_select_arr(self, mock_user, mock_git, mock_requests):
from flask import g
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
commits = []
num_commits = 4
for i in range(num_commits):
commit_hash = self.create_random_string()
url = ('https://github.com/{user}/{repo}/commit/{hash}').format(user=return_gituser(),
repo=g.github['repository'],
hash=commit_hash)
commits.append({'html_url': url, 'sha': commit_hash})
with self.app.test_client() as c:
response = c.post('/account/login', data=self.create_login_form_data(
self.user.email, self.user.password))
repo = mock_git().repos()()
repo.commits().get.return_value = commits
response = c.get('/custom/')
for commit in commits:
self.assertIn(commit['sha'], str(response.data))
def test_customize_regression_tests_load(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.get('/custom/')
self.assertEqual(response.status_code, 200)
regression_tests = RegressionTest.query.all()
for regression_test in regression_tests:
self.assertIn(regression_test.command, str(response.data))
def test_error_on_no_regression_test(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/custom/', data=self.create_customize_form('abcdef', ['linux'],
regression_test=[]), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn('Please add one or more Regression Tests', str(response.data))
def test_customize_test_creates_with_customize_regression_tests(self, mock_user, mock_git, mock_requests):
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response = c.post(
'/account/login', data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post(
'/custom/', data=self.create_customize_form('abcdef', ['linux'],
regression_test=[2]), follow_redirects=True)
self.assertEqual(response.status_code, 200)
test = Test.query.filter(Test.id == 3).first()
regression_tests = test.get_customized_regressiontests()
self.assertIn(2, regression_tests)
self.assertNotIn(1, regression_tests)
def test_customize_test_github_server_error(self, mock_user, mock_git, mock_requests):
"""
Test in case github ever returns a 500 error
"""
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response_login = c.post('/account/login',
data=self.create_login_form_data(self.user.email, self.user.password))
# Base.py: MockRequests
response = c.post('/custom/',
data=self.create_customize_form('mockWillReturn500', ['linux'],
regression_test=[2]), follow_redirects=True)
# Validate if View Works
self.assertEqual(response.status_code, 200)
self.assertIn("Error contacting Github", str(response.data))
def test_customize_test_wrong_commit_hash(self, mock_user, mock_git, mock_requests):
"""
Test in case if a wrong hash is submitted
"""
import mod_customized.controllers
reload(mod_customized.controllers)
self.create_user_with_role(
self.user.name, self.user.email, self.user.password, Role.tester)
with self.app.test_client() as c:
response_login = c.post('/account/login',
data=self.create_login_form_data(self.user.email, self.user.password))
response = c.post('/custom/',
data=self.create_customize_form('SomeoneSendMeCleanAirPlease', ['linux'],
regression_test=[2]), follow_redirects=True)
# Validate if View Works
self.assertEqual(response.status_code, 200)
self.assertIn("Wrong Commit Hash", str(response.data))
| |
"""The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for XY Version with brightness:
light:
platform: mqtt
name: "Office Light XY"
state_topic: "office/xy1/light/status"
command_topic: "office/xy1/light/switch"
brightness_state_topic: "office/xy1/brightness/status"
brightness_command_topic: "office/xy1/brightness/set"
xy_state_topic: "office/xy1/xy/status"
xy_command_topic: "office/xy1/xy/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and color temp
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
color_temp_state_topic: "office/rgb1/color_temp/status"
color_temp_command_topic: "office/rgb1/color_temp/set"
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and effect
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
effect_state_topic: "office/rgb1/effect/status"
effect_command_topic: "office/rgb1/effect/set"
effect_list:
- rainbow
- colorloop
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with white value and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
white_value_state_topic: "office/rgb1/white_value/status"
white_value_command_topic: "office/rgb1/white_value/set"
white_value_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with RGB command template:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_command_template: "{{ '#%02x%02x%02x' | format(red, green, blue)}}"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for HS Version with brightness:
light:
platform: mqtt
name: "Office Light HS"
state_topic: "office/hs1/light/status"
command_topic: "office/hs1/light/switch"
brightness_state_topic: "office/hs1/brightness/status"
brightness_command_topic: "office/hs1/brightness/set"
hs_state_topic: "office/hs1/hs/status"
hs_command_topic: "office/hs1/hs/set"
qos: 0
payload_on: "on"
payload_off: "off"
"""
import json
from os import path
from unittest.mock import call, patch
import pytest
from homeassistant import config as hass_config
from homeassistant.components import light
from homeassistant.const import ATTR_ASSUMED_STATE, SERVICE_RELOAD, STATE_OFF, STATE_ON
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if command fails with command topic."""
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {"platform": "mqtt", "name": "test"}}
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_hs_white_xy_if_no_topics(hass, mqtt_mock):
"""Test if there is no color and brightness if no topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling of the state via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "0")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "300")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("color_temp") is None
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 100
assert light_state.attributes["color_temp"] == 300
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "rainbow"
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "125,125,125")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "0")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "200,50")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (200, 50)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "0.675,0.322")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.672, 0.324)
async def test_invalid_state_via_topic(hass, mqtt_mock, caplog):
"""Test handling of empty data via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "255,255,255")
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "255")
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "none")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") == (0, 0)
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/status", "")
assert "Ignoring empty state message" in caplog.text
light_state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "")
assert "Ignoring empty brightness message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "")
assert "Ignoring empty effect message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "none"
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "")
assert "Ignoring empty rgb message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "")
assert "Ignoring empty hs message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "bad,bad")
assert "Failed to parse hs state update" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "")
assert "Ignoring empty xy-color message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "153")
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "255")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 153
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") == 255
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "")
assert "Ignoring empty color temp message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 153
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "")
assert "Ignoring empty white value message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async def test_brightness_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"brightness_state_topic": "test_scale/brightness/status",
"brightness_command_topic": "test_scale/brightness/set",
"brightness_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") is None
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/brightness/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async def test_brightness_from_rgb_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale_rgb/status",
"command_topic": "test_scale_rgb/set",
"rgb_state_topic": "test_scale_rgb/rgb/status",
"rgb_command_topic": "test_scale_rgb/rgb/set",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale_rgb/status", "on")
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "255,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "127,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 127
async def test_white_value_controlling_scale(hass, mqtt_mock):
"""Test the white_value controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"white_value_state_topic": "test_scale/white_value/status",
"white_value_command_topic": "test_scale/white_value/set",
"white_value_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") is None
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/white_value/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async def test_controlling_state_via_topic_with_templates(hass, mqtt_mock):
"""Test the setting of the state with a template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"effect_state_topic": "test_light_rgb/effect/status",
"hs_state_topic": "test_light_rgb/hs/status",
"rgb_state_topic": "test_light_rgb/rgb/status",
"white_value_state_topic": "test_light_rgb/white_value/status",
"xy_state_topic": "test_light_rgb/xy/status",
"state_value_template": "{{ value_json.hello }}",
"brightness_value_template": "{{ value_json.hello }}",
"color_temp_value_template": "{{ value_json.hello }}",
"effect_value_template": "{{ value_json.hello }}",
"hs_value_template": '{{ value_json.hello | join(",") }}',
"rgb_value_template": '{{ value_json.hello | join(",") }}',
"white_value_template": "{{ value_json.hello }}",
"xy_value_template": '{{ value_json.hello | join(",") }}',
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert state.attributes.get("rgb_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", '{"hello": [1, 2, 3]}')
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "ON"}')
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", '{"hello": "50"}')
async_fire_mqtt_message(
hass, "test_light_rgb/color_temp/status", '{"hello": "300"}'
)
async_fire_mqtt_message(
hass, "test_light_rgb/effect/status", '{"hello": "rainbow"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 50
assert state.attributes.get("rgb_color") == (84, 169, 255)
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") == "rainbow"
assert state.attributes.get("white_value") is None
async_fire_mqtt_message(
hass, "test_light_rgb/white_value/status", '{"hello": "75"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 50
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("effect") == "rainbow"
assert state.attributes.get("white_value") == 75
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", '{"hello": [100,50]}')
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", '{"hello": "0"}')
state = hass.states.get("light.test")
assert state.attributes.get("hs_color") == (100, 50)
async_fire_mqtt_message(
hass, "test_light_rgb/xy/status", '{"hello": [0.123,0.123]}'
)
state = hass.states.get("light.test")
assert state.attributes.get("xy_color") == (0.14, 0.131)
async def test_controlling_state_via_topic_with_value_template(hass, mqtt_mock):
"""Test the setting of the state with undocumented value_template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"value_template": "{{ value_json.hello }}",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "ON"}')
state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "OFF"}')
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"effect_list": ["colorloop", "random"],
"qos": 2,
"payload_on": "on",
"payload_off": "off",
}
}
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
# TODO: Test restoring state with white_value
"white_value": 0,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 2, False),
call("test_light_rgb/rgb/set", "255,128,0", 2, False),
call("test_light_rgb/brightness/set", "50", 2, False),
call("test_light_rgb/hs/set", "359.0,78.0", 2, False),
call("test_light_rgb/xy/set", "0.14,0.131", 2, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes.get("white_value") is None
assert state.attributes["xy_color"] == (0.611, 0.375)
assert state.attributes.get("color_temp") is None
await common.async_turn_on(hass, "light.test", white_value=80, color_temp=125)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/white_value/set", "80", 2, False),
call("test_light_rgb/color_temp/set", "125", 2, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes["brightness"] == 50
assert state.attributes.get("hs_color") is None
assert state.attributes["white_value"] == 80
assert state.attributes.get("xy_color") is None
assert state.attributes["color_temp"] == 125
async def test_sending_mqtt_rgb_command_with_template(hass, mqtt_mock):
"""Test the sending of RGB command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"rgb_command_template": '{{ "#%02x%02x%02x" | '
"format(red, green, blue)}}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 64])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 0, False),
call("test_light_rgb/rgb/set", "#ff803f", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 63)
async def test_sending_mqtt_color_temp_command_with_template(hass, mqtt_mock):
"""Test the sending of Color Temp command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_color_temp/set",
"color_temp_command_topic": "test_light_color_temp/color_temp/set",
"color_temp_command_template": "{{ (1000 / value) | round(0) }}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", color_temp=100)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_color_temp/set", "on", 0, False),
call("test_light_color_temp/color_temp/set", "10", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["color_temp"] == 100
async def test_on_command_first(hass, mqtt_mock):
"""Test on command being sent before brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"on_command_type": "first",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/set: 'ON'
# test_light/bright: 50
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/set", "ON", 0, False),
call("test_light/bright", "50", 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_last(hass, mqtt_mock):
"""Test on command being sent after brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/bright: 50
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/bright", "50", 0, False),
call("test_light/set", "ON", 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_brightness(hass, mqtt_mock):
"""Test on command being sent as only brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 255
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "255", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "50", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", "50", 0, False),
],
any_order=True,
)
async def test_on_command_brightness_scaled(hass, mqtt_mock):
"""Test brightness scale."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"brightness_scale": 100,
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 100
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "20", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ max brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light/bright", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ min brightness
await common.async_turn_on(hass, "light.test", brightness=1)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", "1", 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", "1", 0, False),
],
any_order=True,
)
async def test_on_command_rgb(hass, mqtt_mock):
"""Test on command in RGB brightness mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127,127,127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,255,255'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,255,255", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=1)
# Should get the following MQTT messages.
# test_light/rgb: '1,1,1'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,1,1", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
# Ensure color gets scaled with brightness.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,0,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,128,0'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
async def test_on_command_rgb_template(hass, mqtt_mock):
"""Test on command in RGB brightness mode with RGB template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
"rgb_command_template": "{{ red }}/{{ green }}/{{ blue }}",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127/127/127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_effect(hass, mqtt_mock):
"""Test effect."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"effect_command_topic": "test_light/effect/set",
"effect_list": ["rainbow", "colorloop"],
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", effect="rainbow")
# Should get the following MQTT messages.
# test_light/effect/set: 'rainbow'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/effect/set", "rainbow", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, light.DOMAIN, config)
async def test_discovery_removal_light(hass, mqtt_mock, caplog):
"""Test removal of discovered light."""
data = (
'{ "name": "test",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_deprecated(hass, mqtt_mock, caplog):
"""Test discovery of mqtt light with deprecated platform option."""
data = (
'{ "name": "Beer",' ' "platform": "mqtt",' ' "command_topic": "test_topic"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async def test_discovery_update_light_topic_and_template(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = json.dumps(
{
"name": "Beer",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state1.state }}",
"brightness_value_template": "{{ value_json.state1.brightness }}",
"color_temp_value_template": "{{ value_json.state1.ct }}",
"effect_value_template": "{{ value_json.state1.fx }}",
"hs_value_template": "{{ value_json.state1.hs }}",
"rgb_value_template": "{{ value_json.state1.rgb }}",
"white_value_template": "{{ value_json.state1.white }}",
"xy_value_template": "{{ value_json.state1.xy }}",
}
)
data2 = json.dumps(
{
"name": "Milk",
"state_topic": "test_light_rgb/state2",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state2",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state2",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state2",
"color_temp_state_topic": "test_light_rgb/state2",
"effect_state_topic": "test_light_rgb/state2",
"hs_state_topic": "test_light_rgb/state2",
"rgb_state_topic": "test_light_rgb/state2",
"white_value_state_topic": "test_light_rgb/state2",
"xy_state_topic": "test_light_rgb/state2",
"state_value_template": "{{ value_json.state2.state }}",
"brightness_value_template": "{{ value_json.state2.brightness }}",
"color_temp_value_template": "{{ value_json.state2.ct }}",
"effect_value_template": "{{ value_json.state2.fx }}",
"hs_value_template": "{{ value_json.state2.hs }}",
"rgb_value_template": "{{ value_json.state2.rgb }}",
"white_value_template": "{{ value_json.state2.white }}",
"xy_value_template": "{{ value_json.state2.xy }}",
}
)
state_data1 = [
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "white":100, "fx":"cycle"}}',
)
],
"on",
[
("brightness", 100),
("color_temp", 123),
("white_value", 100),
("effect", "cycle"),
],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2", "white":0}}',
)
],
"on",
[("hs_color", (1, 2)), ("white_value", None)],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (255, 127, 63))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"xy":"0.3, 0.4"}}',
)
],
"on",
[("xy_color", (0.3, 0.401))],
),
]
state_data2 = [
(
[
(
"test_light_rgb/state2",
'{"state2":{"state":"ON", "brightness":50, "ct":200, "white":50, "fx":"loop"}}',
)
],
"on",
[
("brightness", 50),
("color_temp", 200),
("white_value", 50),
("effect", "loop"),
],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
],
"on",
[("brightness", 50), ("color_temp", 200), ("effect", "loop")],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state1", '{"state2":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state2", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state2", '{"state2":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"state":"ON", "hs":"1.2,2.2", "white":0}}',
)
],
"on",
[("hs_color", (1.2, 2.2)), ("white_value", None)],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "hs":"1,2"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"state":"ON", "hs":"1,2"}}',
),
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"rgb":"63,127,255"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"rgb":"255,127,63"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"rgb":"255,127,63"}}',
),
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state2",
'{"state2":{"xy":"0.4, 0.3"}}',
)
],
"on",
[("xy_color", (0.4, 0.3))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
),
(
"test_light_rgb/state1",
'{"state2":{"white":50, "xy":"0.3, 0.4"}}',
),
(
"test_light_rgb/state2",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
),
],
"on",
[("xy_color", (0.4, 0.3))],
),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
light.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_light_template(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = json.dumps(
{
"name": "Beer",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state1.state }}",
"brightness_value_template": "{{ value_json.state1.brightness }}",
"color_temp_value_template": "{{ value_json.state1.ct }}",
"effect_value_template": "{{ value_json.state1.fx }}",
"hs_value_template": "{{ value_json.state1.hs }}",
"rgb_value_template": "{{ value_json.state1.rgb }}",
"white_value_template": "{{ value_json.state1.white }}",
"xy_value_template": "{{ value_json.state1.xy }}",
}
)
data2 = json.dumps(
{
"name": "Milk",
"state_topic": "test_light_rgb/state1",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/state1",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/state1",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/state1",
"color_temp_state_topic": "test_light_rgb/state1",
"effect_state_topic": "test_light_rgb/state1",
"hs_state_topic": "test_light_rgb/state1",
"rgb_state_topic": "test_light_rgb/state1",
"white_value_state_topic": "test_light_rgb/state1",
"xy_state_topic": "test_light_rgb/state1",
"state_value_template": "{{ value_json.state2.state }}",
"brightness_value_template": "{{ value_json.state2.brightness }}",
"color_temp_value_template": "{{ value_json.state2.ct }}",
"effect_value_template": "{{ value_json.state2.fx }}",
"hs_value_template": "{{ value_json.state2.hs }}",
"rgb_value_template": "{{ value_json.state2.rgb }}",
"white_value_template": "{{ value_json.state2.white }}",
"xy_value_template": "{{ value_json.state2.xy }}",
}
)
state_data1 = [
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "white":100, "fx":"cycle"}}',
)
],
"on",
[
("brightness", 100),
("color_temp", 123),
("white_value", 100),
("effect", "cycle"),
],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2", "white":0}}',
)
],
"on",
[("hs_color", (1, 2))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (255, 127, 63))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":0, "xy":"0.3, 0.4"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.3, 0.401))],
),
]
state_data2 = [
(
[
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "brightness":50, "ct":200, "white":50, "fx":"loop"}}',
)
],
"on",
[
("brightness", 50),
("color_temp", 200),
("white_value", 50),
("effect", "loop"),
],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "brightness":100, "ct":123, "fx":"cycle"}}',
),
],
"on",
[("brightness", 50), ("color_temp", 200), ("effect", "loop")],
),
(
[("test_light_rgb/state1", '{"state1":{"state":"OFF"}}')],
"on",
None,
),
(
[("test_light_rgb/state1", '{"state2":{"state":"OFF"}}')],
"off",
None,
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"state":"ON", "hs":"1.2,2.2", "white":0}}',
)
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"state":"ON", "hs":"1,2"}}',
)
],
"on",
[("hs_color", (1.2, 2.2))],
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"rgb":"63,127,255"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"rgb":"255,127,63"}}',
)
],
"on",
[("rgb_color", (63, 127, 255))],
),
(
[
(
"test_light_rgb/state1",
'{"state2":{"xy":"0.4, 0.3"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.4, 0.3))],
),
(
[
(
"test_light_rgb/state1",
'{"state1":{"white":50, "xy":"0.3, 0.4"}}',
)
],
"on",
[("white_value", None), ("xy_color", (0.4, 0.3))],
),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
light.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
with patch(
"homeassistant.components.mqtt.light.schema_basic.MqttLight.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_max_mireds(hass, mqtt_mock):
"""Test setting min_mireds and max_mireds."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_max_mireds/set",
"color_temp_command_topic": "test_max_mireds/color_temp/set",
"max_mireds": 370,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 370
async def test_reloadable(hass, mqtt_mock):
"""Test reloading an mqtt light."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test/set",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
assert hass.states.get("light.test")
assert len(hass.states.async_all()) == 1
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"mqtt/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"mqtt",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("light.test") is None
assert hass.states.get("light.reload")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import unittest
from datetime import datetime
from pyowm.commons.exceptions import APIResponseError, ParseAPIResponseError
from pyowm.weatherapi25.uris import ICONS_BASE_URI
from pyowm.weatherapi25.weather import Weather
from tests.unit.weatherapi25.json_test_responses import (CITY_WEATHER_HISTORY_JSON,
CITY_WEATHER_HISTORY_NO_RESULTS_JSON,
CITY_WEATHER_HISTORY_NOT_FOUND_JSON,
INTERNAL_SERVER_ERROR_JSON)
class TestWeather(unittest.TestCase):
__test_reference_time = 1378459200
__test_iso_reference_time = "2013-09-06 09:20:00+00:00"
__test_date_reference_time = datetime.fromisoformat(__test_iso_reference_time)
__test_sunset_time = 1378496400
__test_iso_sunset_time = "2013-09-06 19:40:00+00:00"
__test_date_sunset_time = datetime.fromisoformat(__test_iso_sunset_time)
__test_sunrise_time = 1378449600
__test_iso_sunrise_time = "2013-09-06 06:40:00+00:00"
__test_date_sunrise_time = datetime.fromisoformat(__test_iso_sunrise_time)
__test_clouds = 67
__test_rain = {"all": 20}
__test_snow = {"all": 0}
__test_wind = {"deg": 252.002, "speed": 1.100, "gust": 2.09}
__test_imperial_wind = {"deg": 252.002, "speed": 2.460634, "gust": 4.6752046}
__test_knots_wind = {'deg': 252.002, 'speed': 2.138224, 'gust': 4.0626256}
__test_beaufort_wind = {"deg": 252.002, "speed": 1, "gust": 2}
__test_kmh_wind = {'deg': 252.002, 'speed': 3.9600000000000004, 'gust': 7.524}
__test_humidity = 57
__test_pressure = {"press": 1030.119, "sea_level": 1038.589, "grnd_level": 1038.773}
__test_inhg_pressure = {'press': 30.42, 'sea_level': 30.67, 'grnd_level': 30.67}
__test_temperature = {"temp": 294.199, "temp_kf": -1.899,
"temp_max": 296.098, "temp_min": 294.199,
"feels_like": 298.0}
__test_celsius_temperature = {"temp": 21.049, "temp_kf": -1.899,
"temp_max": 22.948, "temp_min": 21.049,
"feels_like": 24.85}
__test_fahrenheit_temperature = {"temp": 69.888, "temp_kf": -1.899,
"temp_max": 73.306, "temp_min": 69.888,
"feels_like": 76.73}
__test_status = "Clouds"
__test_detailed_status = "Overcast clouds"
__test_weather_code = 804
__test_weather_icon_name = "04d"
__test_visibility_distance = 1000
__test_visibility_in_kms = 1
__test_visibility_in_miles = .62
__test_dewpoint = 300.0
__test_humidex = 298.0
__test_heat_index = 40.0
__test_precipitation_probability = 0.5
__test_instance = Weather(__test_reference_time, __test_sunset_time,
__test_sunrise_time, __test_clouds, __test_rain,
__test_snow, __test_wind, __test_humidity,
__test_pressure, __test_temperature,
__test_status, __test_detailed_status,
__test_weather_code, __test_weather_icon_name,
__test_visibility_distance, __test_dewpoint,
__test_humidex, __test_heat_index,
precipitation_probability=__test_precipitation_probability)
__bad_json = '{"a": "test", "b": 1.234, "c": [ "hello", "world"] }'
__bad_json_2 = '{"list": [{"test":"fake"}] }'
__no_items_json = '{"cnt": "0"}'
WEATHER_JSON_DUMP = '{"status": "Clouds", "visibility_distance": 1000, ' \
'"clouds": 67, "temperature": {"temp_kf": -1.899, ' \
'"temp_min": 294.199, "temp": 294.199, "temp_max": 296.098, "feels_like": 298.0},' \
' "dewpoint": 300.0, "humidex": 298.0, "detailed_status": ' \
'"Overcast clouds", "reference_time": 1378459200, ' \
'"weather_code": 804, "sunset_time": 1378496400, "rain": ' \
'{"all": 20}, "snow": {"all": 0}, "pressure": ' \
'{"press": 1030.119, "sea_level": 1038.589, "grnd_level": 1038.773}, ' \
'"sunrise_time": 1378449600, "heat_index": 40.0, ' \
'"weather_icon_name": "04d", "humidity": 57, "wind": ' \
'{"speed": 1.1, "deg": 252.002, "gust": 2.09}, "utc_offset": null, "uvi": null, ' \
'"precipitation_probability": 0.5}'
def test_init_fails_when_wrong_data_provided(self):
self.assertRaises(ValueError, Weather, -9876543210,
self.__test_sunset_time, self.__test_sunrise_time, self.__test_clouds,
self.__test_rain, self.__test_snow, self.__test_wind,
self.__test_humidity, self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time, -45,
self.__test_rain, self.__test_snow, self.__test_wind,
self.__test_humidity, self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, -16, self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
-12, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
-10.0, self.__test_heat_index)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, -10.0)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index, uvi=-1)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index,
precipitation_probability=-1.0)
self.assertRaises(ValueError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index,
precipitation_probability=2.0)
def test_init_when_wind_is_none(self):
instance = Weather(self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds,
self.__test_rain, self.__test_snow,
None,
self.__test_humidity, self.__test_pressure,
self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code,
self.__test_weather_icon_name,
self.__test_visibility_distance,
self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertIsNone(instance.wind())
def test_init_stores_negative_sunset_time_as_none(self):
instance = Weather(self.__test_reference_time,
-9876543210, self.__test_sunrise_time,
self.__test_clouds,
self.__test_rain, self.__test_snow, self.__test_wind,
self.__test_humidity, self.__test_pressure,
self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code,
self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertIsNone(instance.sunset_time())
def test_init_stores_negative_sunrise_time_as_none(self):
instance = Weather(self.__test_reference_time,
self.__test_sunset_time, -9876543210, self.__test_clouds,
self.__test_rain, self.__test_snow, self.__test_wind,
self.__test_humidity, self.__test_pressure,
self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index)
self.assertIsNone(instance.sunrise_time())
def test_init_fails_with_non_integer_utc_offset(self):
self.assertRaises(AssertionError, Weather, self.__test_reference_time,
self.__test_sunset_time, self.__test_sunrise_time,
self.__test_clouds, self.__test_rain, self.__test_snow,
self.__test_wind, self.__test_humidity,
self.__test_pressure, self.__test_temperature,
self.__test_status, self.__test_detailed_status,
self.__test_weather_code, self.__test_weather_icon_name,
self.__test_visibility_distance, self.__test_dewpoint,
self.__test_humidex, self.__test_heat_index,
'non_string_utc_offset')
def test_from_dict_fails_when_dict_is_none(self):
self.assertRaises(ParseAPIResponseError, Weather.from_dict, None)
def test_from_dict(self):
dict1 = {'clouds': {'all': 92}, 'name': 'London',
'coord': {'lat': 51.50853, 'lon': -0.12574},
'sys': {'country': 'GB', 'sunset': 1378923812,
'sunrise': 1378877413
},
'weather': [
{'main': 'Clouds', 'id': 804, 'icon': '04d',
'description': 'overcastclouds'}
],
'cod': 200, 'base': 'gdpsstations', 'dt': 1378895177,
'main': {
'pressure': 1022,
'humidity': 75,
'temp_max': 289.82,
'temp': 288.44,
'temp_min': 287.59
},
'id': 2643743,
'wind': {'gust': 2.57, 'speed': 1.54, 'deg': 31},
'visibility': {'distance': 1000},
'calc': {
'dewpoint': 300.0,
'humidex': 298.0,
'heatindex': 296.0
}
}
dict2 = {"dt": 1378897200,
"temp": {"day": 289.37, "min": 284.88, "max": 289.37,
"night": 284.88, "eve": 287.53, "morn": 289.37
},
"pressure": 1025.35,
"humidity": 71,
"weather": [
{"id": 500, "main": "Rain", "description": "light rain",
"icon": "u10d"}
], "speed": 3.76, "deg": 338, "clouds": 48, "rain": 3
}
dict3 = {"station": {
"name": "KPPQ",
"type": 1,
"status": 50,
"id": 1000,
"coord": {"lon": -90.47, "lat": 39.38}
},
"last": {
"main": {
"temp": 276.15,
"pressure": 1031},
"wind": {
"speed": 3.1,
"deg": 140
},
"visibility": {
"distance": 11265,
"prefix": 0
},
"calc": {
"dewpoint": 273.15,
"humidex": 57.8,
"heatindex": 1.2
},
"clouds": [
{"distance": 427,
"condition": "SCT"}
],
"dt": 1417977300
},
"params": ["temp", "pressure", "wind", "visibility"],
"timezone": 1234567
}
dict4 = {'clouds': {'all': 92}, 'name': 'London',
'coord': {'lat': 51.50853, 'lon': -0.12574},
'sys': {'country': 'GB', 'sunset': 1378923812,
'sunrise': 1378877413
},
'weather': [
{'main': 'Clouds', 'id': 804, 'icon': '04d',
'description': 'overcastclouds'}
],
'cod': 200, 'base': 'gdpsstations',
'main': {
'pressure': 1022,
'humidity': 75,
'temp_max': 289.82,
'temp': 288.44,
'temp_min': 287.59
},
'id': 2643743,
'wind': {'gust': 2.57, 'speed': 1.54, 'deg': 31},
'calc': {},
'last': {},
'snow': {'tot': 76.3}
}
dict5 = {'clouds': {'all': 92}, 'name': 'London',
'coord': {'lat': 51.50853, 'lon': -0.12574},
'sys': {'country': 'GB', 'sunset': 1378923812,
'sunrise': 1378877413
},
'weather': [
{'main': 'Clouds', 'id': 804, 'icon': '04d',
'description': 'overcastclouds'}
],
'cod': 200, 'base': 'gdpsstations',
'main': {
'pressure': 1022,
'humidity': 75,
'temp_max': 289.82,
'temp': 288.44,
'temp_min': 287.59
},
'id': 2643743,
'wind': {'gust': 2.57, 'speed': 1.54, 'deg': 31},
'visibility': {'distance': 1000},
"last": {}
}
dict6 = {'clouds': {'all': 92}, 'name': 'London',
'coord': {'lat': 51.50853, 'lon': -0.12574},
'sys': {'country': 'GB', 'sunset': 1378923812,
'sunrise': 1378877413
},
'weather': [
{'main': 'Clouds', 'id': 804, 'icon': '04d',
'description': 'overcastclouds'}
],
'cod': 200, 'base': 'gdpsstations', 'dt': 1378895177,
'main': {
'pressure': 1022,
'temp_max': 289.82,
'temp': 288.44,
'temp_min': 287.59
},
'id': 2643743,
'wind': {'gust': 2.57, 'speed': 1.54, 'deg': 31},
'last': {
"dt": 1417977300,
"calc": {},
'visibility': 2.34,
'main': {
"humidity": 77.2
}
},
'snow': 66.1
}
result1 = Weather.from_dict(dict1)
self.assertTrue(isinstance(result1, Weather))
result2 = Weather.from_dict(dict2)
self.assertTrue(isinstance(result2, Weather))
result3 = Weather.from_dict(dict3)
self.assertTrue(isinstance(result3, Weather))
result4 = Weather.from_dict(dict4)
self.assertTrue(isinstance(result4, Weather))
result5 = Weather.from_dict(dict5)
self.assertTrue(isinstance(result5, Weather))
result6 = Weather.from_dict(dict6)
self.assertTrue(isinstance(result6, Weather))
def test_from_dict_when_data_fields_are_none(self):
dict1 = {'clouds': {'all': 92}, 'name': 'London',
'coord': {'lat': 51.50853, 'lon': -0.12574},
'sys': {'country': 'GB', 'sunset': 1378923812,
'sunrise': 1378877413
},
'weather': [
{'main': 'Clouds', 'id': 804, 'icon': '04d',
'description': 'overcastclouds'}
],
'cod': 200, 'base': 'gdpsstations', 'dt': 1378895177,
'main': {
'pressure': 1022,
'humidity': 75,
'temp_max': 289.82,
'temp': 288.44,
'temp_min': 287.59
},
'id': 2643743,
'wind': None,
'visibility': {'distance': 1000},
'calc': {
'dewpoint': 300.0,
'humidex': 298.0,
'heatindex': 296.0
},
'rain': None,
'snow': None
}
result1 = Weather.from_dict(dict1)
self.assertTrue(isinstance(result1, Weather))
self.assertEqual(0, len(result1.wind()))
self.assertEqual(0, len(result1.rain))
self.assertEqual(0, len(result1.snow))
dict2 = {"station": {
"name": "KPPQ",
"type": 1,
"status": 50,
"id": 1000,
"coord": {"lon": -90.47, "lat": 39.38}
},
"last": {
"main": {
"temp": 276.15,
"pressure": 1031},
"wind": None,
"visibility": {
"distance": 11265,
"prefix": 0
},
"calc": {
"dewpoint": 273.15
},
"clouds": [
{"distance": 427,
"condition": "SCT"}
],
"dt": 1417977300
},
"params": ["temp", "pressure", "wind", "visibility"]
}
result2 = Weather.from_dict(dict2)
self.assertTrue(isinstance(result2, Weather))
self.assertEqual(0, len(result2.wind()))
def test_to_dict(self):
expected = json.loads(self.WEATHER_JSON_DUMP)
result = self.__test_instance.to_dict()
self.assertEqual(expected, result)
def test_from_dict_of_lists(self):
result = Weather.from_dict_of_lists(json.loads(CITY_WEATHER_HISTORY_JSON))
self.assertTrue(result)
self.assertTrue(isinstance(result, list))
for weather in result:
self.assertTrue(weather is not None)
def test_from_dict_of_lists_fails_when_JSON_data_is_None(self):
self.assertRaises(ParseAPIResponseError, Weather.from_dict_of_lists, None)
def test_from_dict_of_lists_with_malformed_JSON_data(self):
self.assertRaises(ParseAPIResponseError, Weather.from_dict_of_lists, json.loads(self.__bad_json))
self.assertRaises(ParseAPIResponseError, Weather.from_dict_of_lists, json.loads(self.__bad_json_2))
def test_from_dict_of_lists_when_no_results(self):
result = Weather.from_dict_of_lists(json.loads(CITY_WEATHER_HISTORY_NO_RESULTS_JSON))
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
result = Weather.from_dict_of_lists(json.loads(self.__no_items_json))
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def test_parse_JSON_when_location_not_found(self):
self.assertFalse(Weather.from_dict_of_lists(json.loads(CITY_WEATHER_HISTORY_NOT_FOUND_JSON)))
def test_parse_JSON_when_server_error(self):
self.assertRaises(APIResponseError, Weather.from_dict_of_lists, json.loads(INTERNAL_SERVER_ERROR_JSON))
def test_reference_time_returning_different_formats(self):
self.assertEqual(self.__test_instance.reference_time(timeformat='iso'),
self.__test_iso_reference_time)
self.assertEqual(self.__test_instance.reference_time(timeformat='unix'),
self.__test_reference_time)
self.assertEqual(self.__test_instance.reference_time(timeformat='date'),
self.__test_date_reference_time)
def test_sunset_time_returning_different_formats(self):
self.assertEqual(self.__test_instance.sunset_time(timeformat='iso'),
self.__test_iso_sunset_time)
self.assertEqual(self.__test_instance.sunset_time(timeformat='unix'),
self.__test_sunset_time)
self.assertEqual(self.__test_instance.sunset_time(timeformat='date'),
self.__test_date_sunset_time)
def test_sunrise_time_returning_different_formats(self):
self.assertEqual(self.__test_instance.sunrise_time(timeformat='iso'),
self.__test_iso_sunrise_time)
self.assertEqual(self.__test_instance.sunrise_time(timeformat='unix'),
self.__test_sunrise_time)
self.assertEqual(self.__test_instance.sunrise_time(timeformat='date'),
self.__test_date_sunrise_time)
def test_get_reference_time_fails_with_unknown_timeformat(self):
self.assertRaises(ValueError, Weather.reference_time,
self.__test_instance, 'xyz')
def test_sunset_time_fails_with_unknown_timeformat(self):
self.assertRaises(ValueError, Weather.sunset_time,
self.__test_instance, 'xyz')
def test_sunrise_time_fails_with_unknown_timeformat(self):
self.assertRaises(ValueError, Weather.sunrise_time,
self.__test_instance, 'xyz')
def test_returning_different_units_for_temperatures(self):
result_kelvin = self.__test_instance.temperature(unit='kelvin')
result_celsius = self.__test_instance.temperature(unit='celsius')
result_fahrenheit = self.__test_instance.temperature(
unit='fahrenheit')
for item in self.__test_temperature:
self.assertAlmostEqual(result_kelvin[item],
self.__test_temperature[item], delta=0.1)
self.assertAlmostEqual(result_celsius[item],
self.__test_celsius_temperature[item],
delta=0.1)
self.assertAlmostEqual(result_fahrenheit[item],
self.__test_fahrenheit_temperature[item],
delta=0.1)
def test_get_temperature_fails_with_unknown_units(self):
self.assertRaises(ValueError, Weather.temperature,
self.__test_instance, 'xyz')
def test_returning_different_units_for_wind_values(self):
result_imperial = self.__test_instance.wind(unit='miles_hour')
result_metric_ms = self.__test_instance.wind(unit='meters_sec')
result_metric_kmh = self.__test_instance.wind(unit='km_hour')
result_knots = self.__test_instance.wind(unit='knots')
result_beaufort = self.__test_instance.wind(unit='beaufort')
result_unspecified = self.__test_instance.wind()
self.assertEqual(result_unspecified, result_metric_ms)
self.assertDictEqual(result_metric_kmh, self.__test_kmh_wind)
for item in self.__test_wind:
self.assertEqual(result_metric_ms[item],
self.__test_wind[item])
self.assertEqual(result_imperial[item],
self.__test_imperial_wind[item])
self.assertEqual(result_knots[item],
self.__test_knots_wind[item])
self.assertEqual(result_beaufort[item],
self.__test_beaufort_wind[item])
def test_get_wind_fails_with_unknown_units(self):
self.assertRaises(ValueError, Weather.wind, self.__test_instance, 'xyz')
def test_barometric_pressure_returning_different_units_for_pressure_values(self):
result_imperial_inhg = self.__test_instance.barometric_pressure(unit='inHg')
result_metric_hpa = self.__test_instance.barometric_pressure(unit='hPa')
result_unspecified = self.__test_instance.barometric_pressure()
a = result_metric_hpa == result_unspecified
b = result_imperial_inhg == self.__test_inhg_pressure
self.assertTrue(a and b)
def test_barometric_pressure_fails_with_unknown_units(self):
self.assertRaises(ValueError, Weather.barometric_pressure, self.__test_instance, 'xyz')
def test_returning_different_units_for_visibility(self):
result_metric_kms = self.__test_instance.visibility(unit='kilometers')
result_imperial_miles = self.__test_instance.visibility(unit='miles')
result_unspecified = self.__test_instance.visibility()
a = self.__test_visibility_distance == result_unspecified
b = self.__test_visibility_in_kms == result_metric_kms
c = self.__test_visibility_in_miles == result_imperial_miles
self.assertTrue(a and b and c)
def test_visibility_fails_with_unknown_units(self):
self.assertRaises(ValueError, Weather.visibility, self.__test_instance, 'xyz')
def test_weather_icon_url(self):
expected_unspecified = ICONS_BASE_URI % (self.__test_instance.weather_icon_name, "")
expected_2x = ICONS_BASE_URI % (self.__test_instance.weather_icon_name, "@2x")
expected_4x = ICONS_BASE_URI % (self.__test_instance.weather_icon_name, "@4x")
result_unspecified = self.__test_instance.weather_icon_url()
result_2x = self.__test_instance.weather_icon_url(size="2x")
result_4x = self.__test_instance.weather_icon_url(size="4x")
self.assertEqual(expected_unspecified, result_unspecified)
self.assertEqual(expected_2x, result_2x)
self.assertEqual(expected_4x, result_4x)
def test_repr(self):
print(self.__test_instance)
def test_one_call_current_from_dic(self):
current1 = {
"dt": 1586001851,
"sunrise": 1586003020,
"sunset": 1586048382,
"temp": 280.15,
"feels_like": 277.75,
"pressure": 1017,
"humidity": 93,
"uvi": 9.63,
"clouds": 90,
"visibility": 6437,
"wind_speed": 2.1,
"wind_deg": 70,
"weather": [
{
"id": 501,
"main": "Rain",
"description": "moderate rain",
"icon": "10n"
},
{
"id": 701,
"main": "Mist",
"description": "mist",
"icon": "50n"
}
],
"rain": {
"1h": 1.02
}
}
result1 = Weather.from_dict(current1)
self.assertTrue(isinstance(result1, Weather))
self.assertEqual(9.63, result1.uvi)
self.assertEqual(501, result1.weather_code)
self.assertEqual(1.02, result1.rain["1h"])
self.assertEqual(280.15, result1.temperature()["temp"])
self.assertEqual(277.75, result1.temperature()["feels_like"])
current2 = {
"dt": 1587678355,
"sunrise": 1587615127,
"sunset": 1587665513,
"temp": 281.78,
"feels_like": 277.4,
"pressure": 1017,
"humidity": 39,
"dew_point": 269.13,
"uvi": 7.52,
"clouds": 2,
"visibility": 10000,
"wind_speed": 2.6,
"wind_deg": 170,
"weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
"icon": "01n"
}
]
}
result2 = Weather.from_dict(current2)
self.assertTrue(isinstance(result2, Weather))
self.assertEqual(7.52, result2.uvi)
self.assertEqual(800, result2.weather_code)
self.assertEqual(170, result2.wind()["deg"])
self.assertEqual(0, len(result2.rain))
self.assertEqual(281.78, result2.temperature()["temp"])
self.assertEqual(277.4, result2.temperature()["feels_like"])
def test_one_call_hourly_from_dic(self):
hourly1 = {
"dt": 1587675600,
"temp": 294.16,
"feels_like": 292.47,
"pressure": 1009,
"humidity": 88,
"dew_point": 292.1,
"clouds": 90,
"wind_speed": 7,
"wind_deg": 189,
"weather": [
{
"id": 501,
"main": "Rain",
"description": "moderate rain",
"icon": "10d"
}
],
"rain": {
"1h": 2.28
}
}
result1 = Weather.from_dict(hourly1)
self.assertTrue(isinstance(result1, Weather))
self.assertEqual(292.47, result1.temperature()["feels_like"])
self.assertEqual(501, result1.weather_code)
self.assertEqual(2.28, result1.rain["1h"])
self.assertEqual(294.16, result1.temperature()["temp"])
self.assertEqual(292.47, result1.temperature()["feels_like"])
hourly2 = {
"dt": 1587682800,
"temp": 279.64,
"feels_like": 276.77,
"pressure": 1020,
"humidity": 54,
"dew_point": 271.26,
"clouds": 3,
"wind_speed": 0.84,
"wind_deg": 119,
"weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
"icon": "01n"
}
]
}
result2 = Weather.from_dict(hourly2)
self.assertTrue(isinstance(result2, Weather))
self.assertEqual(3, result2.clouds)
self.assertEqual(800, result2.weather_code)
self.assertEqual(119, result2.wind()["deg"])
self.assertEqual(0, len(result2.rain))
self.assertEqual(279.64, result2.temperature()["temp"])
self.assertEqual(276.77, result2.temperature()["feels_like"])
def test_one_call_daily_from_dic(self):
daily1 = {
"dt": 1587747600,
"sunrise": 1587725080,
"sunset": 1587772792,
"temp": {
"day": 300.75,
"min": 290.76,
"max": 300.75,
"night": 290.76,
"eve": 295.22,
"morn": 291.44
},
"feels_like": {
"day": 300.69,
"night": 291.63,
"eve": 296.8,
"morn": 292.73
},
"pressure": 1009,
"humidity": 55,
"dew_point": 291.24,
"wind_speed": 3.91,
"wind_deg": 262,
"weather": [
{
"id": 500,
"main": "Rain",
"description": "light rain",
"icon": "10d"
}
],
"clouds": 95,
"rain": 0.82,
"uvi": 9.46
}
result1 = Weather.from_dict(daily1)
self.assertTrue(isinstance(result1, Weather))
self.assertEqual(9.46, result1.uvi)
self.assertEqual(500, result1.weather_code)
self.assertEqual(262, result1.wind()["deg"])
self.assertEqual(0.82, result1.rain["all"])
self.assertRaises(KeyError, lambda: result1.temperature()["temp"])
self.assertRaises(KeyError, lambda: result1.temperature()["feels_like"])
self.assertEqual(300.75, result1.temperature()["day"])
self.assertEqual(290.76, result1.temperature()["min"])
self.assertEqual(300.75, result1.temperature()["max"])
self.assertEqual(290.76, result1.temperature()["night"])
self.assertEqual(295.22, result1.temperature()["eve"])
self.assertEqual(291.44, result1.temperature()["morn"])
self.assertEqual(300.69, result1.temperature()["feels_like_day"])
self.assertEqual(291.63, result1.temperature()["feels_like_night"])
self.assertEqual(296.8, result1.temperature()["feels_like_eve"])
self.assertEqual(292.73, result1.temperature()["feels_like_morn"])
daily2 = {
"dt": 1587639600,
"sunrise": 1587615127,
"sunset": 1587665513,
"temp": {
"day": 281.78,
"min": 279.88,
"max": 281.78,
"night": 279.88,
"eve": 281.78,
"morn": 281.78
},
"feels_like": {
"day": 278.55,
"night": 276.84,
"eve": 278.55,
"morn": 278.55
},
"pressure": 1017,
"humidity": 39,
"dew_point": 269.13,
"wind_speed": 0.96,
"wind_deg": 116,
"weather": [
{
"id": 800,
"main": "Clear",
"description": "clear sky",
"icon": "01n"
}
],
"clouds": 2,
"uvi": 7.52
}
result2 = Weather.from_dict(daily2)
self.assertTrue(isinstance(result2, Weather))
self.assertEqual(7.52, result2.uvi)
self.assertEqual(800, result2.weather_code)
self.assertEqual(116, result2.wind()["deg"])
self.assertEqual(0, len(result2.rain))
self.assertRaises(KeyError, lambda: result2.temperature()["temp"])
self.assertRaises(KeyError, lambda: result2.temperature()["feels_like"])
self.assertEqual(281.78, result2.temperature()["day"])
self.assertEqual(279.88, result2.temperature()["min"])
self.assertEqual(281.78, result2.temperature()["max"])
self.assertEqual(279.88, result2.temperature()["night"])
self.assertEqual(281.78, result2.temperature()["eve"])
self.assertEqual(281.78, result2.temperature()["morn"])
self.assertEqual(278.55, result2.temperature()["feels_like_day"])
self.assertEqual(276.84, result2.temperature()["feels_like_night"])
self.assertEqual(278.55, result2.temperature()["feels_like_eve"])
self.assertEqual(278.55, result2.temperature()["feels_like_morn"])
| |
from rsock import ReliableSocket
from packet import Packet, Header, Payload
from collections import namedtuple
import struct
Symbol = namedtuple('Symbol', 'address name')
Tracepoint = namedtuple('Tracepoint', 'address handler')
Injectable = namedtuple('Injectable', 'id filename refs type name comment')
class ADBIException(Exception):
pass
class ADBI(object):
def __init__(self):
def seqgen():
while True:
for seq in xrange(2 ** 32):
yield seq
self.connection = None
self.seqgen = seqgen()
def __check_connection(self):
if not self.connection:
raise ADBIException('Not connected.')
def connect(self, address):
if self.connection:
raise ADBIException('Already connected.')
self.connection = ReliableSocket(address)
def disconnect(self):
self.__check_connection()
self.connection.close()
self.connection = None
def __recv(self):
header = Header.unpack_from(self.connection.recv(Header.size))
payload = self.connection.recv(header.length)
payload = Payload.unpack_from(payload)
return Packet(header, payload)
def __send(self, packet):
self.connection.send(packet.pack())
def request(self, type, payload=None):
self.__check_connection()
if payload is None:
payload = Payload()
header = Header(type,
next(self.seqgen),
len(payload.pack()))
packet = Packet(header, payload)
self.__send(packet)
response = self.__recv()
if response.type == 'FAIL':
raise ADBIException(response.get('msg', 'Request failed.'))
if response.type == 'USUP':
raise ADBIException(response.get('msg', 'Not supported.'))
if response.type == 'MALF':
raise ADBIException('Protocol error: {:}'.format(response.get('msg', '?')))
return response
def ping(self):
return self.request('PING')
def quit(self):
return self.request('QUIT')
@property
def executables(self):
return self.get_text(0)
def get_memory(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
response = self.request('MAPS', payload)
def iter_segments():
for i in xrange(response.get('segc', 0)):
def get(what):
return response.get('seg{:}[{:}]'.format(what, i))
yield get('lo'), get('hi'), get('type'), get('file'), get('off')
return sorted(iter_segments())
def explain_address(self, pid, address):
payload = Payload()
payload.put_u32('pid', pid)
payload.put_u64('address', address)
return self.request('ADDR', payload)
def dump(self, pid, address, size):
payload = Payload()
payload.put_u32('pid', pid)
payload.put_u64('address', address)
payload.put_u32('size', size)
response = self.request('MEMD', payload)
words = (response.get('word[%i]' % x) for x in xrange(response.get('size', 0)))
def tobytes(word):
a = (word >> 24) & 0xff
b = (word >> 16) & 0xff
c = (word >> 8) & 0xff
d = (word) & 0xff
return ''.join(chr(x) for x in (d, c, b, a))
bytes = (tobytes(x) for x in words)
return ''.join(bytes)
@property
def processes(self):
response = self.request('PROC')
return set([response.get('procv[{:}]'.format(i)) for i in
xrange(response.get('procc', 0))])
def start(self):
return self.request('STRT')
def stop(self):
return self.request('STOP')
def ls(self, path):
payload = Payload()
payload.put_str('path', path)
response = self.request('LDIR', payload)
return set([response.get('entv[{:}]'.format(i))
+ ('/' if response.get('entd[{:}]'.format(i)) else '')
for i in xrange(response.get('entc', 0))])
def loglevel(self, loglevel):
payload = Payload()
payload.put_u32('loglevel', loglevel)
return self.request('LLEV', payload)
def attach(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('ATTC', payload)
def detach(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('DETC', payload)
def spawn(self, args):
payload = Payload()
payload.put_u32('argc', len(args))
for i, v in enumerate(args):
payload.put_str('argv[{:}]'.format(i), v)
return self.request('SPWN', payload)
def iter_injectable_symbols(self, iid, which):
if which not in 'EIA':
raise ValueError
payload = Payload()
payload.put_u32('iid', iid)
response = self.request('INJ' + which, payload)
for i in xrange(response.get('symc', 0)):
postfix = '[%i]' % i
yield Symbol(response.get('symad' + postfix), response.get('symnm' + postfix))
def get_injectable_imports(self, iid):
return self.iter_injectable_symbols(iid, 'I')
def get_injectable_exports(self, iid):
return self.iter_injectable_symbols(iid, 'E')
def get_injectable_adbi(self, iid):
return self.iter_injectable_symbols(iid, 'A')
def get_injectable_tracepoints(self, iid):
payload = Payload()
payload.put_u32('iid', iid)
response = self.request('INJT', payload)
for i in xrange(response.get('tptc', 0)):
postfix = '[%i]' % i
yield Tracepoint(response.get('tpta' + postfix), response.get('tpth' + postfix))
def iter_injectables(self):
response = self.request('INJQ')
for i in xrange(response.get('injc', 0)):
postfix = '[%i]' % i
yield Injectable(response.get('injid' + postfix),
response.get('injfn' + postfix),
response.get('injrc' + postfix),
response.get('injtp' + postfix),
response.get('injnm' + postfix),
response.get('injcm' + postfix))
@property
def injectables(self):
return sorted(self.iter_injectables())
def injectable_load(self, path):
payload = Payload()
payload.put_str('path', path)
return self.request('INJL', payload)
def injectable_unload(self, iid):
payload = Payload()
payload.put_u32('iid', iid)
return self.request('INJU', payload)
def kill(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('KILL', payload)
| |
import os
from rpython.rlib.rsre import rsre_core
from rpython.rlib.rstring import StringBuilder
from topaz.objects.fileobject import FNM_NOESCAPE, FNM_DOTMATCH
from topaz.utils import regexp
from topaz.utils.ll_file import isdir
from topaz.utils.ordereddict import OrderedDict
def regexp_match(cache, re, string):
pos = 0
endpos = len(string)
code, flags, _, _, _, _ = regexp.compile(cache, re)
return rsre_core.StrMatchContext(code, string, pos, endpos, flags)
def path_split(string):
if not string:
return [""]
parts = []
for part in string.split("/"):
parts.append("/")
if part:
parts.append(part)
return parts[1:]
def combine_segments(old_segments, suffix, new_segments=[""]):
segments = []
for old_seg in old_segments:
for new_seg in new_segments:
segments.append(old_seg + suffix + new_seg)
return segments
class Glob(object):
def __init__(self, cache, matches=None):
self.cache = cache
self._matches = OrderedDict()
for match in (matches or []):
self.append_match(match)
def matches(self):
return self._matches.keys()
def append_match(self, match):
self._matches[match] = None
def is_constant(self, part, flags):
special_chars = "?*["
if not (flags & FNM_NOESCAPE):
special_chars += "\\"
for ch in part:
if ch in special_chars:
return False
return True
def single_compile(self, glob, flags=0):
parts = path_split(glob)
if parts[-1] == "/":
last = DirectoriesOnly(None, flags)
else:
file = parts.pop()
if self.is_constant(file, flags):
last = ConstantEntry(None, flags, file)
else:
last = EntryMatch(None, flags, file)
while parts:
sep_parts = []
while parts and parts[-1] == "/":
sep_parts.append(parts.pop())
last.separator = "".join(sep_parts)
if not parts:
last = RootDirectory(last, flags)
else:
dir = parts.pop()
if dir == "**":
if parts:
last = RecursiveDirectories(last, flags)
else:
last = StartRecursiveDirectories(last, flags)
elif self.is_constant(dir, flags):
last = ConstantDirectory(last, flags, dir)
else:
last = DirectoryMatch(last, flags, dir)
return last
def run(self, node):
node.call(self, None)
def glob(self, pattern, flags):
if "{" in pattern:
patterns = self.compile(pattern, flags)
for node in patterns:
self.run(node)
else:
node = self.single_compile(pattern, flags)
if node:
self.run(node)
def process_braces(self, pattern, flags, i=0):
should_escape = flags & FNM_NOESCAPE == 0
patterns = []
escaped = False
pattern_start = i
segments = [""]
while i < len(pattern):
ch = pattern[i]
if ch == "\\" and should_escape and not escaped:
escaped = True
elif ch == ",":
if escaped:
escaped = False
else:
suffix = pattern[pattern_start:i]
patterns.extend(combine_segments(segments, suffix))
segments = [""]
pattern_start = i + 1
elif ch == "}":
if escaped:
escaped = False
else:
suffix = pattern[pattern_start:i]
patterns.extend(combine_segments(segments, suffix))
return i, patterns
elif ch == "{":
if escaped:
escaped = False
else:
suffix = pattern[pattern_start:i]
i, new_segs = self.process_braces(pattern, flags, i + 1)
segments = combine_segments(segments, suffix, new_segs)
pattern_start = i + 1
else:
escaped = False
i += 1
suffix = pattern[pattern_start:]
patterns.extend(combine_segments(segments, suffix))
return i, patterns
def compile(self, pattern, flags=0):
i, patterns = self.process_braces(pattern, flags)
return [self.single_compile(p) for p in patterns]
class Node(object):
def __init__(self, nxt, flags):
self.flags = flags
self.next = nxt
self.separator = "/"
def allow_dots(self):
return self.flags & FNM_DOTMATCH != 0
def path_join(self, parent, ent):
if not parent:
return ent
if parent == "/":
return "/" + ent
else:
return parent + self.separator + ent
class ConstantDirectory(Node):
def __init__(self, nxt, flags, dir):
Node.__init__(self, nxt, flags)
self.dir = dir
def call(self, glob, path):
full = self.path_join(path, self.dir)
self.next.call(glob, full)
class ConstantEntry(Node):
def __init__(self, nxt, flags, name):
Node.__init__(self, nxt, flags)
self.name = name
def call(self, glob, parent):
path = self.path_join(parent, self.name)
if os.path.exists(path):
glob.append_match(path)
class RootDirectory(Node):
def call(self, glob, path):
self.next.call(glob, "/")
class RecursiveDirectories(Node):
def call(self, glob, start):
if not (start and os.path.exists(start)):
return
self.call_with_stack(glob, start, [start])
def call_with_stack(self, glob, start, stack):
old_sep = self.next.separator
self.next.separator = self.separator
self.next.call(glob, start)
self.next.separator = old_sep
while stack:
path = stack.pop()
try:
entries = os.listdir(path)
except OSError:
continue
for ent in entries:
full = self.path_join(path, ent)
if isdir(full) and (self.allow_dots() or ent[0] != "."):
stack.append(full)
self.next.call(glob, full)
class StartRecursiveDirectories(RecursiveDirectories):
def call(self, glob, start):
stack = []
for ent in os.listdir("."):
if isdir(ent) and (self.allow_dots() or ent[0] != "."):
stack.append(ent)
self.next.call(glob, ent)
self.call_with_stack(glob, None, stack)
class Match(Node):
def __init__(self, nxt, flags, glob_pattern):
Node.__init__(self, nxt, flags)
self.match_dotfiles = self.allow_dots() or glob_pattern[0] == "."
self.regexp = self.translate(glob_pattern, flags)
def translate(self, pattern, flags):
pattern = os.path.normcase(pattern)
should_escape = flags & FNM_NOESCAPE == 0
escaped = False
i = 0
n = len(pattern)
res = StringBuilder(n)
res.append("^")
while i < n:
c = pattern[i]
i += 1
if c == "\\":
if should_escape and not escaped:
escaped = True
else:
res.append("\\\\")
escaped = False
elif c == "*":
if escaped:
escaped = False
res.append("\\*")
else:
res.append(".*")
# skip second `*' in directory wildcards
if i < n and pattern[i] == "*":
i += 1
elif c == "?":
if escaped:
escaped = False
res.append("\\?")
else:
res.append(".")
elif c == "[":
if escaped:
escaped = False
res.append("\\[")
else:
j = i
if j < n and pattern[j] == "^":
j += 1
if j < n and pattern[j] == "]":
j += 1
while j < n and pattern[j] != "]":
j += 1
if j >= n:
res.append("\\[")
else:
res.append("[")
if pattern[i] == "^":
res.append("^")
i += 1
elif pattern[i] == "^":
res.append("\\^")
i += 1
for ch in pattern[i:j]:
if ch == "\\":
res.append("\\\\")
else:
res.append(ch)
res.append("]")
i = j + 1
else:
escaped = False
if not c.isalnum():
res.append("\\")
res.append(c)
res.append("$")
return res.build()
def ismatch(self, cache, string):
string = os.path.normcase(string)
if string.startswith(".") and not self.match_dotfiles:
return False
ctx = regexp_match(cache, self.regexp, string)
return rsre_core.search_context(ctx)
class DirectoryMatch(Match):
def call(self, glob, path):
if path and not os.path.exists(path):
return
for ent in [".", ".."] + os.listdir(path if path else "."):
if self.ismatch(glob.cache, ent):
full = self.path_join(path, ent)
if isdir(full):
self.next.call(glob, full)
class EntryMatch(Match):
def call(self, glob, path):
if path and not os.path.exists(path + "/."):
return
try:
entries = [".", ".."] + os.listdir(path if path else ".")
except OSError:
return
for ent in entries:
if self.ismatch(glob.cache, ent):
glob.append_match(self.path_join(path, ent))
class DirectoriesOnly(Node):
def call(self, glob, path):
if path and os.path.exists(path + "/."):
if path == "/":
glob.append_match("/")
else:
glob.append_match(path + "/")
| |
"""Tests for blobstore.py."""
import cgi
import cStringIO
import datetime
import pickle
import unittest
from .google_imports import namespace_manager
from .google_imports import datastore_types
from . import blobstore
from . import model
from . import tasklets
from . import test_utils
class BlobstoreTests(test_utils.NDBTest):
def setUp(self):
super(BlobstoreTests, self).setUp()
self.testbed.init_blobstore_stub()
the_module = blobstore
def testConstants(self):
# This intentionally hardcodes the values. I'd like to know when
# they change.
self.assertEqual(blobstore.BLOB_INFO_KIND, '__BlobInfo__')
self.assertEqual(blobstore.BLOB_MIGRATION_KIND, '__BlobMigration__')
self.assertEqual(blobstore.BLOB_KEY_HEADER, 'X-AppEngine-BlobKey')
self.assertEqual(blobstore.BLOB_RANGE_HEADER, 'X-AppEngine-BlobRange')
self.assertEqual(blobstore.UPLOAD_INFO_CREATION_HEADER,
'X-AppEngine-Upload-Creation')
self.assertEqual(blobstore.MAX_BLOB_FETCH_SIZE, 1015808)
def testExceptions(self):
self.assertTrue(issubclass(blobstore.Error, Exception))
self.assertTrue(issubclass(blobstore.InternalError, blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobFetchSizeTooLargeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobNotFoundError, blobstore.Error))
self.assertTrue(issubclass(blobstore.DataIndexOutOfRangeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.PermissionDeniedError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobInfoParseError, blobstore.Error))
def create_blobinfo(self, blobkey):
"""Handcraft a dummy BlobInfo."""
b = blobstore.BlobInfo(key=model.Key(blobstore.BLOB_INFO_KIND, blobkey),
content_type='text/plain',
creation=datetime.datetime(2012, 1, 24, 8, 15, 0),
filename='hello.txt',
size=42,
md5_hash='xxx')
model.Model._put_async(b).check_success()
return b
def testBlobInfo(self):
b = self.create_blobinfo('dummy')
self.assertEqual(b._get_kind(), blobstore.BLOB_INFO_KIND)
self.assertEqual(b.key(), blobstore.BlobKey('dummy'))
self.assertEqual(b.content_type, 'text/plain')
self.assertEqual(b.creation, datetime.datetime(2012, 1, 24, 8, 15, 0))
self.assertEqual(b.filename, 'hello.txt')
self.assertEqual(b.md5_hash, 'xxx')
def testBlobInfo_PutErrors(self):
b = self.create_blobinfo('dummy')
self.assertRaises(Exception, b.put)
self.assertRaises(Exception, b.put_async)
self.assertRaises(Exception, model.put_multi, [b])
self.assertRaises(Exception, model.put_multi_async, [b])
def testBlobInfo_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.BlobInfo.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.BlobInfo.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobInfo_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.BlobInfo.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
df = blobstore.BlobInfo.get_async(str(b.key()))
self.assertTrue(isinstance(df, tasklets.Future))
d = df.get_result()
self.assertEqual(d, b)
self.assertTrue(d is not b)
def testBlobInfo_GetMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
d, e = blobstore.BlobInfo.get_multi([b.key(), str(c.key())])
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_GetMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
df, ef = blobstore.BlobInfo.get_multi_async([str(b.key()), c.key()])
self.assertTrue(isinstance(df, tasklets.Future))
self.assertTrue(isinstance(ef, tasklets.Future))
d, e = df.get_result(), ef.get_result()
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_Delete(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b._key.id())
self.assertEqual(c, b)
b.delete()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobInfo_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = b.delete_async()
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
cf = blobstore.get_async('dummy')
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_Delete(self):
b = self.create_blobinfo('dummy')
blobstore.delete(b.key())
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = blobstore.delete_async(b.key())
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
blobstore.delete_multi([b.key(), str(c.key())])
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_DeleteMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
f = blobstore.delete_multi_async([b.key(), str(c.key())])
self.assertTrue(isinstance(f, tasklets.Future), f)
f.get_result()
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_CreateUploadUrl(self):
url = blobstore.create_upload_url('/foo')
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_CreateUploadUrlAsync(self):
urlf = blobstore.create_upload_url_async('/foo')
self.assertTrue(isinstance(urlf, tasklets.Future), urlf)
url = urlf.get_result()
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_ParseBlobInfo_Errors(self):
nope = blobstore.parse_blob_info(None)
self.assertEqual(nope, None)
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: hello\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: BLAH-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
def testBlobstore_ParseBlobInfo(self):
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
bi = blobstore.parse_blob_info(fs)
self.assertTrue(isinstance(bi, blobstore.BlobInfo))
self.assertEqual(
bi,
blobstore.BlobInfo(key=model.Key(blobstore.BlobInfo, 'xxx'),
content_type='image/jpeg',
creation=datetime.datetime(2012, 1, 24, 17, 35),
filename='hello.txt',
md5_hash='xxx',
size=42))
def testBlobstore_FetchData(self):
self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
result = blobstore.fetch_data('xxx', 0, 3) # Range is inclusive!
self.assertEqual(result, 'abcd')
def testBlobstore_FetchDataAsync(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
fut = blobstore.fetch_data_async(b, 0, 2)
self.assertTrue(isinstance(fut, tasklets.Future), fut)
result = fut.get_result()
self.assertEqual(result, 'abc')
def testBlobInfo_Open(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = b.open()
self.assertEqual(f.read(3), 'abc')
self.assertEqual(f.read(3), 'de')
self.assertEqual(f.blob_info, b)
def testBlobReader(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = blobstore.BlobReader('xxx')
self.assertEqual(f.read(), 'abcde')
self.assertEqual(f.blob_info, b)
def main():
unittest.main()
if __name__ == '__main__':
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 TrilioData, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Raksha base exception handling.
Includes decorator for re-raising Raksha-type exceptions.
SHOULD include dedicated exception logging.
"""
from oslo.config import cfg
import webob.exc
from raksha import flags
from raksha.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
class Error(Exception):
pass
class DBError(Error):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
def wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise InvalidUnicodeParameter()
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
raise DBError(e)
_wrap.func_name = f.func_name
return _wrap
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return inner
class RakshaException(Exception):
"""Base Raksha Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if FLAGS.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(RakshaException, self).__init__(message)
class GlanceConnectionFailed(RakshaException):
message = _("Connection to glance failed") + ": %(reason)s"
class NotAuthorized(RakshaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(RakshaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(RakshaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot") + ": %(reason)s"
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class SfJsonEncodeFailure(RakshaException):
message = _("Failed to load data into json format")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolumeType(Invalid):
message = _("Invalid volume type") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid).")
class NotFound(RakshaException):
message = _("Resource could not be found.")
code = 404
safe = True
class PersistentVolumeFileNotFound(NotFound):
message = _("Volume %(volume_id)s persistence file could not be found.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SfAccountNotFound(NotFound):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class InvalidSnapshotMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidSnapshotMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class VolumeIsBusy(RakshaException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(RakshaException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class ISCSITargetCreateFailed(RakshaException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetAttachFailed(RakshaException):
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(RakshaException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(RakshaException):
message = _("Quota exceeded for resources: %(overs)s")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(RakshaException):
message = _("Action not allowed.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(RakshaException):
pass
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class MigrationError(RakshaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(RakshaException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(RakshaException):
message = _("No valid host was found. %(reason)s")
class WillNotSchedule(RakshaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(RakshaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds "
"allowed Gigabytes quota")
class VolumeSizeExceedsQuota(QuotaError):
message = _("Maximum volume/snapshot size exceeded")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class Duplicate3PARHost(RakshaException):
message = _("3PAR Host already exists: %(err)s. %(info)s")
class Invalid3PARDomain(RakshaException):
message = _("Invalid 3PAR Domain: %(err)s")
class VolumeTypeCreateFailed(RakshaException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class SolidFireAPIException(RakshaException):
message = _("Bad response from SolidFire API")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class UnknownCmd(Invalid):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(Invalid):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class BadHTTPResponseStatus(RakshaException):
message = _("Bad HTTP response status %(status)s")
class FailedCmdWithDump(RakshaException):
message = _("Operation failed with status=%(status)s. Full dump: %(data)s")
class ZadaraServerCreateFailure(RakshaException):
message = _("Unable to create server object for initiator %(name)s")
class ZadaraServerNotFound(NotFound):
message = _("Unable to find server object for initiator %(name)s")
class ZadaraVPSANoActiveController(RakshaException):
message = _("Unable to find any active VPSA controller")
class ZadaraAttachmentsNotFound(NotFound):
message = _("Failed to retrieve attachments for volume %(name)s")
class ZadaraInvalidAttachmentInfo(Invalid):
message = _("Invalid attachment info for volume %(name)s: %(reason)s")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class VolumeBackendAPIException(RakshaException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class NfsException(RakshaException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(NotFound):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(NotFound):
message = _("There is no share which can host %(volume_size)sG")
class GlusterfsException(RakshaException):
message = _("Unknown Gluster exception")
class GlusterfsNoSharesMounted(NotFound):
message = _("No mounted Gluster shares found")
class GlusterfsNoSuitableShareFound(NotFound):
message = _("There is no share which can host %(volume_size)sG")
class GlanceMetadataExists(Invalid):
message = _("Glance metadata cannot be updated, key %(key)s"
" exists for volume id %(volume_id)s")
class ImageCopyFailure(Invalid):
message = _("Failed to copy image to volume")
class BackupJobNotFound(NotFound):
message = _("BackupJob %(backupjob_id)s could not be found.")
class BackupJobRunNotFound(NotFound):
message = _("BackupJobRun %(backupjobrun_id)s could not be found.")
class SwiftObjectNotFound(NotFound):
message = _("SwiftObject %(object_id)s could not be found.")
class InvalidBackupJob(Invalid):
message = _("Invalid backupjob: %(reason)s")
class SwiftConnectionFailed(RakshaException):
message = _("Connection to swift failed") + ": %(reason)s"
class VMsofBackupJobNotFound(NotFound):
message = _("VMs for BackupJob %(backupjob_id)s could not be found.")
class VMsOfBackupJobRunNotFound:
message = _("VMs for BackupJobRun %(backupjobrun_id)s could not be found.")
class VMRecentBackupJobRunNotFound:
message = _("Recent successful BackupJobRun for VM %(vm_id)s could not be found.")
class BackupJobRunVMResourcesNotFound:
message = _("BackupJobRunVMResources of VM %(vm_id)s BackupJobRun %(backupjobrun_id)s could not be found.")
class BackupJobRunVMResourcesWithNameNotFound:
message = _("BackupJobRunVMResource of VM %(vm_id)s BackupJobRun %(backupjobrun_id)s Resource %(resource_name)s could not be found.")
class BackupJobRunVMResourcesWithIdNotFound:
message = _("BackupJobRunVMResource with Id %(id)s could not be found.")
class VaultServiceNotFound:
message = _("Vault Service %(id)s could not be found.")
class VMResourceBackupsNotFound:
message = _("VM Resource backups for backupjobrun_vm_resource_id %(backupjobrun_vm_resource_id)s could not be found.")
| |
# Generated from T.g4 by ANTLR 4.7.1
from antlr4 import *
from io import StringIO
from typing import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2+")
buf.write("\u014f\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3")
buf.write("\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\n\3")
buf.write("\n\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3")
buf.write("\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\36")
buf.write("\3\36\5\36\u00e0\n\36\3\37\3\37\3\37\7\37\u00e5\n\37\f")
buf.write("\37\16\37\u00e8\13\37\3\37\3\37\3 \3 \3 \7 \u00ef\n \f")
buf.write(" \16 \u00f2\13 \3 \3 \3!\3!\7!\u00f8\n!\f!\16!\u00fb\13")
buf.write("!\3\"\5\"\u00fe\n\"\3\"\6\"\u0101\n\"\r\"\16\"\u0102\3")
buf.write("#\3#\3#\3#\6#\u0109\n#\r#\16#\u010a\3$\3$\3$\7$\u0110")
buf.write("\n$\f$\16$\u0113\13$\3%\3%\7%\u0117\n%\f%\16%\u011a\13")
buf.write("%\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\7\'\u0125\n\'\f\'\16")
buf.write("\'\u0128\13\'\3\'\3\'\3\'\3(\6(\u012e\n(\r(\16(\u012f")
buf.write("\3(\3(\3)\3)\3)\3)\7)\u0138\n)\f)\16)\u013b\13)\3)\3)")
buf.write("\3*\3*\3*\3*\7*\u0143\n*\f*\16*\u0146\13*\3*\3*\3*\3*")
buf.write("\3*\3+\3+\3+\4\u0126\u0144\2,\3\3\5\4\7\5\t\6\13\7\r\b")
buf.write("\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22")
buf.write("#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\35")
buf.write("9\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U\2\3\2\f\4\2$$^^\4")
buf.write("\2))^^\4\2\f\f\17\17\4\2--//\5\2\62;CHch\5\2C\\aac|\7")
buf.write("\2\60\60\62;C\\aac|\3\2\62;\5\2\13\f\17\17\"\"\n\2$$\61")
buf.write("\61^^ddhhppttvv\2\u015c\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3")
buf.write("\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2")
buf.write("\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2")
buf.write("\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2")
buf.write("!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2")
buf.write("\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3")
buf.write("\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2")
buf.write("\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2")
buf.write("\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2")
buf.write("\2\2\2Q\3\2\2\2\2S\3\2\2\2\3W\3\2\2\2\5^\3\2\2\2\7`\3")
buf.write("\2\2\2\tg\3\2\2\2\13q\3\2\2\2\ry\3\2\2\2\17{\3\2\2\2\21")
buf.write("}\3\2\2\2\23\u0082\3\2\2\2\25\u0084\3\2\2\2\27\u0086\3")
buf.write("\2\2\2\31\u008c\3\2\2\2\33\u0093\3\2\2\2\35\u0095\3\2")
buf.write("\2\2\37\u009e\3\2\2\2!\u00a0\3\2\2\2#\u00a5\3\2\2\2%\u00a9")
buf.write("\3\2\2\2\'\u00ae\3\2\2\2)\u00b5\3\2\2\2+\u00b9\3\2\2\2")
buf.write("-\u00be\3\2\2\2/\u00c0\3\2\2\2\61\u00c2\3\2\2\2\63\u00c6")
buf.write("\3\2\2\2\65\u00cc\3\2\2\2\67\u00d3\3\2\2\29\u00d8\3\2")
buf.write("\2\2;\u00df\3\2\2\2=\u00e1\3\2\2\2?\u00eb\3\2\2\2A\u00f5")
buf.write("\3\2\2\2C\u00fd\3\2\2\2E\u0104\3\2\2\2G\u010c\3\2\2\2")
buf.write("I\u0114\3\2\2\2K\u011b\3\2\2\2M\u011f\3\2\2\2O\u012d\3")
buf.write("\2\2\2Q\u0133\3\2\2\2S\u013e\3\2\2\2U\u014c\3\2\2\2WX")
buf.write("\7k\2\2XY\7o\2\2YZ\7r\2\2Z[\7q\2\2[\\\7t\2\2\\]\7v\2\2")
buf.write("]\4\3\2\2\2^_\7=\2\2_\6\3\2\2\2`a\7o\2\2ab\7q\2\2bc\7")
buf.write("f\2\2cd\7w\2\2de\7n\2\2ef\7g\2\2f\b\3\2\2\2gh\7k\2\2h")
buf.write("i\7p\2\2ij\7v\2\2jk\7g\2\2kl\7t\2\2lm\7h\2\2mn\7c\2\2")
buf.write("no\7e\2\2op\7g\2\2p\n\3\2\2\2qr\7g\2\2rs\7z\2\2st\7v\2")
buf.write("\2tu\7g\2\2uv\7p\2\2vw\7f\2\2wx\7u\2\2x\f\3\2\2\2yz\7")
buf.write("}\2\2z\16\3\2\2\2{|\7\177\2\2|\20\3\2\2\2}~\7x\2\2~\177")
buf.write("\7q\2\2\177\u0080\7k\2\2\u0080\u0081\7f\2\2\u0081\22\3")
buf.write("\2\2\2\u0082\u0083\7*\2\2\u0083\24\3\2\2\2\u0084\u0085")
buf.write("\7+\2\2\u0085\26\3\2\2\2\u0086\u0087\7e\2\2\u0087\u0088")
buf.write("\7q\2\2\u0088\u0089\7p\2\2\u0089\u008a\7u\2\2\u008a\u008b")
buf.write("\7v\2\2\u008b\30\3\2\2\2\u008c\u008d\7u\2\2\u008d\u008e")
buf.write("\7k\2\2\u008e\u008f\7i\2\2\u008f\u0090\7p\2\2\u0090\u0091")
buf.write("\7c\2\2\u0091\u0092\7n\2\2\u0092\32\3\2\2\2\u0093\u0094")
buf.write("\7?\2\2\u0094\34\3\2\2\2\u0095\u0096\7t\2\2\u0096\u0097")
buf.write("\7g\2\2\u0097\u0098\7c\2\2\u0098\u0099\7f\2\2\u0099\u009a")
buf.write("\7q\2\2\u009a\u009b\7p\2\2\u009b\u009c\7n\2\2\u009c\u009d")
buf.write("\7{\2\2\u009d\36\3\2\2\2\u009e\u009f\7.\2\2\u009f \3\2")
buf.write("\2\2\u00a0\u00a1\7d\2\2\u00a1\u00a2\7q\2\2\u00a2\u00a3")
buf.write("\7q\2\2\u00a3\u00a4\7n\2\2\u00a4\"\3\2\2\2\u00a5\u00a6")
buf.write("\7k\2\2\u00a6\u00a7\7p\2\2\u00a7\u00a8\7v\2\2\u00a8$\3")
buf.write("\2\2\2\u00a9\u00aa\7t\2\2\u00aa\u00ab\7g\2\2\u00ab\u00ac")
buf.write("\7c\2\2\u00ac\u00ad\7n\2\2\u00ad&\3\2\2\2\u00ae\u00af")
buf.write("\7u\2\2\u00af\u00b0\7v\2\2\u00b0\u00b1\7t\2\2\u00b1\u00b2")
buf.write("\7k\2\2\u00b2\u00b3\7p\2\2\u00b3\u00b4\7i\2\2\u00b4(\3")
buf.write("\2\2\2\u00b5\u00b6\7x\2\2\u00b6\u00b7\7c\2\2\u00b7\u00b8")
buf.write("\7t\2\2\u00b8*\3\2\2\2\u00b9\u00ba\7n\2\2\u00ba\u00bb")
buf.write("\7k\2\2\u00bb\u00bc\7u\2\2\u00bc\u00bd\7v\2\2\u00bd,\3")
buf.write("\2\2\2\u00be\u00bf\7>\2\2\u00bf.\3\2\2\2\u00c0\u00c1\7")
buf.write("@\2\2\u00c1\60\3\2\2\2\u00c2\u00c3\7o\2\2\u00c3\u00c4")
buf.write("\7c\2\2\u00c4\u00c5\7r\2\2\u00c5\62\3\2\2\2\u00c6\u00c7")
buf.write("\7o\2\2\u00c7\u00c8\7q\2\2\u00c8\u00c9\7f\2\2\u00c9\u00ca")
buf.write("\7g\2\2\u00ca\u00cb\7n\2\2\u00cb\64\3\2\2\2\u00cc\u00cd")
buf.write("\7u\2\2\u00cd\u00ce\7v\2\2\u00ce\u00cf\7t\2\2\u00cf\u00d0")
buf.write("\7w\2\2\u00d0\u00d1\7e\2\2\u00d1\u00d2\7v\2\2\u00d2\66")
buf.write("\3\2\2\2\u00d3\u00d4\7g\2\2\u00d4\u00d5\7p\2\2\u00d5\u00d6")
buf.write("\7w\2\2\u00d6\u00d7\7o\2\2\u00d78\3\2\2\2\u00d8\u00d9")
buf.write("\7h\2\2\u00d9\u00da\7n\2\2\u00da\u00db\7c\2\2\u00db\u00dc")
buf.write("\7i\2\2\u00dc:\3\2\2\2\u00dd\u00e0\5=\37\2\u00de\u00e0")
buf.write("\5? \2\u00df\u00dd\3\2\2\2\u00df\u00de\3\2\2\2\u00e0<")
buf.write("\3\2\2\2\u00e1\u00e6\7$\2\2\u00e2\u00e5\5U+\2\u00e3\u00e5")
buf.write("\n\2\2\2\u00e4\u00e2\3\2\2\2\u00e4\u00e3\3\2\2\2\u00e5")
buf.write("\u00e8\3\2\2\2\u00e6\u00e4\3\2\2\2\u00e6\u00e7\3\2\2\2")
buf.write("\u00e7\u00e9\3\2\2\2\u00e8\u00e6\3\2\2\2\u00e9\u00ea\7")
buf.write("$\2\2\u00ea>\3\2\2\2\u00eb\u00f0\7)\2\2\u00ec\u00ef\5")
buf.write("U+\2\u00ed\u00ef\n\3\2\2\u00ee\u00ec\3\2\2\2\u00ee\u00ed")
buf.write("\3\2\2\2\u00ef\u00f2\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f0")
buf.write("\u00f1\3\2\2\2\u00f1\u00f3\3\2\2\2\u00f2\u00f0\3\2\2\2")
buf.write("\u00f3\u00f4\7)\2\2\u00f4@\3\2\2\2\u00f5\u00f9\7B\2\2")
buf.write("\u00f6\u00f8\n\4\2\2\u00f7\u00f6\3\2\2\2\u00f8\u00fb\3")
buf.write("\2\2\2\u00f9\u00f7\3\2\2\2\u00f9\u00fa\3\2\2\2\u00faB")
buf.write("\3\2\2\2\u00fb\u00f9\3\2\2\2\u00fc\u00fe\t\5\2\2\u00fd")
buf.write("\u00fc\3\2\2\2\u00fd\u00fe\3\2\2\2\u00fe\u0100\3\2\2\2")
buf.write("\u00ff\u0101\4\62;\2\u0100\u00ff\3\2\2\2\u0101\u0102\3")
buf.write("\2\2\2\u0102\u0100\3\2\2\2\u0102\u0103\3\2\2\2\u0103D")
buf.write("\3\2\2\2\u0104\u0105\7\62\2\2\u0105\u0106\7z\2\2\u0106")
buf.write("\u0108\3\2\2\2\u0107\u0109\t\6\2\2\u0108\u0107\3\2\2\2")
buf.write("\u0109\u010a\3\2\2\2\u010a\u0108\3\2\2\2\u010a\u010b\3")
buf.write("\2\2\2\u010bF\3\2\2\2\u010c\u010d\7B\2\2\u010d\u0111\t")
buf.write("\7\2\2\u010e\u0110\t\b\2\2\u010f\u010e\3\2\2\2\u0110\u0113")
buf.write("\3\2\2\2\u0111\u010f\3\2\2\2\u0111\u0112\3\2\2\2\u0112")
buf.write("H\3\2\2\2\u0113\u0111\3\2\2\2\u0114\u0118\t\7\2\2\u0115")
buf.write("\u0117\t\b\2\2\u0116\u0115\3\2\2\2\u0117\u011a\3\2\2\2")
buf.write("\u0118\u0116\3\2\2\2\u0118\u0119\3\2\2\2\u0119J\3\2\2")
buf.write("\2\u011a\u0118\3\2\2\2\u011b\u011c\t\t\2\2\u011c\u011d")
buf.write("\7\60\2\2\u011d\u011e\t\t\2\2\u011eL\3\2\2\2\u011f\u0120")
buf.write("\7\61\2\2\u0120\u0121\7,\2\2\u0121\u0122\7,\2\2\u0122")
buf.write("\u0126\3\2\2\2\u0123\u0125\13\2\2\2\u0124\u0123\3\2\2")
buf.write("\2\u0125\u0128\3\2\2\2\u0126\u0127\3\2\2\2\u0126\u0124")
buf.write("\3\2\2\2\u0127\u0129\3\2\2\2\u0128\u0126\3\2\2\2\u0129")
buf.write("\u012a\7,\2\2\u012a\u012b\7\61\2\2\u012bN\3\2\2\2\u012c")
buf.write("\u012e\t\n\2\2\u012d\u012c\3\2\2\2\u012e\u012f\3\2\2\2")
buf.write("\u012f\u012d\3\2\2\2\u012f\u0130\3\2\2\2\u0130\u0131\3")
buf.write("\2\2\2\u0131\u0132\b(\2\2\u0132P\3\2\2\2\u0133\u0134\7")
buf.write("\61\2\2\u0134\u0135\7\61\2\2\u0135\u0139\3\2\2\2\u0136")
buf.write("\u0138\n\4\2\2\u0137\u0136\3\2\2\2\u0138\u013b\3\2\2\2")
buf.write("\u0139\u0137\3\2\2\2\u0139\u013a\3\2\2\2\u013a\u013c\3")
buf.write("\2\2\2\u013b\u0139\3\2\2\2\u013c\u013d\b)\2\2\u013dR\3")
buf.write("\2\2\2\u013e\u013f\7\61\2\2\u013f\u0140\7,\2\2\u0140\u0144")
buf.write("\3\2\2\2\u0141\u0143\13\2\2\2\u0142\u0141\3\2\2\2\u0143")
buf.write("\u0146\3\2\2\2\u0144\u0145\3\2\2\2\u0144\u0142\3\2\2\2")
buf.write("\u0145\u0147\3\2\2\2\u0146\u0144\3\2\2\2\u0147\u0148\7")
buf.write(",\2\2\u0148\u0149\7\61\2\2\u0149\u014a\3\2\2\2\u014a\u014b")
buf.write("\b*\2\2\u014bT\3\2\2\2\u014c\u014d\7^\2\2\u014d\u014e")
buf.write("\t\13\2\2\u014eV\3\2\2\2\22\2\u00df\u00e4\u00e6\u00ee")
buf.write("\u00f0\u00f9\u00fd\u0102\u010a\u0111\u0118\u0126\u012f")
buf.write("\u0139\u0144\3\b\2\2")
return buf.getvalue()
class TLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
STRING = 29
DOUBLE_STRING = 30
SINGLE_STRING = 31
TAGLINE = 32
INTCONSTANT = 33
HEXCONSTANT = 34
TAGIDENTIFIER = 35
IDENTIFIER = 36
VERSION = 37
DOCCOMMENT = 38
WHITESPACE = 39
COMMENT = 40
MULTICOMM = 41
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'import'", "';'", "'module'", "'interface'", "'extends'", "'{'",
"'}'", "'void'", "'('", "')'", "'const'", "'signal'", "'='",
"'readonly'", "','", "'bool'", "'int'", "'real'", "'string'",
"'var'", "'list'", "'<'", "'>'", "'map'", "'model'", "'struct'",
"'enum'", "'flag'" ]
symbolicNames = [ "<INVALID>",
"STRING", "DOUBLE_STRING", "SINGLE_STRING", "TAGLINE", "INTCONSTANT",
"HEXCONSTANT", "TAGIDENTIFIER", "IDENTIFIER", "VERSION", "DOCCOMMENT",
"WHITESPACE", "COMMENT", "MULTICOMM" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "STRING", "DOUBLE_STRING", "SINGLE_STRING",
"TAGLINE", "INTCONSTANT", "HEXCONSTANT", "TAGIDENTIFIER",
"IDENTIFIER", "VERSION", "DOCCOMMENT", "WHITESPACE", "COMMENT",
"MULTICOMM", "ESC" ]
grammarFileName = "T.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
# Disable the version check for now as although there is a mismatch the Lexer seems to work fine.
# Rely on the weekly CI to make sure this keeps working also with later antlr versions.
# self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Trains (a partial) implementation of the DrQa Document Reader from:
Danqi Chen, Adam Fisch, Jason Weston, Antoine Bordes. 2017.
Reading Wikipedia to Answer Open-Domain Questions.
In Association for Computational Linguistics (ACL).
Link: https://arxiv.org/abs/1704.00051
Note:
To use pretrained word embeddings, set the --embeddings_file path argument.
GloVe is recommended, see http://nlp.stanford.edu/data/glove.840B.300d.zip.
"""
try:
import torch
except ModuleNotFoundError:
raise ModuleNotFoundError('Need to install pytorch: go to pytorch.org')
import numpy as np
import logging
import copy
import sys
import random
import os.path
import pickle
from parlai.agents.drqa.agents import SimpleDictionaryAgent
from parlai.agents.drqa.agents import SimpleCharDictionaryAgent
from parlai.agents.drqa.agents import DocReaderAgent
from parlai.agents.drqa.utils import Timer
from parlai.core.worlds import DialogPartnerWorld
from parlai.core.params import ParlaiParser
from parlai.core.worlds import create_task
import pdb
import gc # garbage collector
def build_dict(opt):
opt = copy.deepcopy(opt)
opt['batchsize'] = 1
dictionary = SimpleDictionaryAgent(opt)
# We use the train set to build the dictionary.
logger.info('[ Building word dictionary... ]')
opt['datatype'] = 'train:ordered'
world = create_task(opt, dictionary)
for _ in world:
world.parley()
if(opt['vocab_size'] > 0):
nKeep=opt['vocab_size']
dictionary.sort_and_keep(nKeep)
else:
dictionary.sort()
opt['vocab_size'] = len(dictionary)+1
logger.info('[ Dictionary built (full size). ]')
logger.info('[ Num words = %d ]' % len(dictionary))
return dictionary
def build_dict_char(opt):
opt = copy.deepcopy(opt)
opt['batchsize'] = 1
dictionary = SimpleCharDictionaryAgent(opt)
# We use the train set to build the dictionary.
logger.info('[ Building character dictionary... ]')
opt['datatype'] = 'train:ordered'
world = create_task(opt, dictionary)
for _ in world:
world.parley()
nKeep=opt['vocab_size_char']
dictionary.sort_and_keep(nKeep, True)
logger.info('[ Dictionary built. ]')
logger.info('[ Num chars = %d ]' % len(dictionary))
return dictionary
def validate(opt, agent, n_iter):
opt = copy.deepcopy(opt)
opt['datatype'] = 'valid'
valid_world = create_task(opt, agent)
logger.info('[ Running validation... ]')
valid_time = Timer()
# pdb.set_trace()
for _ in valid_world:
#pdb.set_trace()
valid_world.parley()
metrics = valid_world.report()
if 'tasks' in metrics:
for task, t_metrics in metrics['tasks'].items():
logger.info('[valid] task = %s | iter = %d | exs = %d | ' %
(task, n_iter, t_metrics['total']) +
'EM = %.4f | F1 = %.4f' %
(t_metrics['accuracy'], t_metrics['f1']))
logger.info('[valid] iter = %d | overall EM = %.4f | exs = %d' %
(n_iter, metrics['accuracy'], metrics['total']))
else:
logger.info(
'[valid] iter = %d | EM = %.4f | F1 = %.4f | exs = %d' %
(n_iter, metrics['accuracy'], metrics['f1'], metrics['total'])
)
logger.info('[ Done. Time = %.2f (s) ]' % valid_time.time())
return metrics[opt['valid_metric']]
def main(opt):
#iter_global = 0
#pdb.set_trace()
# Cudnn
# Build word dictionary from task data
if os.path.isfile(("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl")):
dictionary = pickle.load( open( ("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl"), "rb") ) # word dictionary
logger.info('successfully load word dictionary')
else:
if 'pretrained_model' in opt:
dictionary = None
else:
dictionary = build_dict(opt)
pickle.dump( dictionary , open( ("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl"), "wb") )
# Generator dictionary
if os.path.isfile(("data/MSmarco/dict.word." + str(opt['vocab_size_generator']) + ".pkl")):
dictionary_gen = pickle.load( open( ("data/MSmarco/dict.word." + str(opt['vocab_size_generator']) + ".pkl"), "rb") ) # word dictionary
logger.info('successfully load word dictionary')
else:
dictionary_gen = build_dict(opt)
pickle.dump( dictionary , open( ("data/MSmarco/dict.word." + str(opt['vocab_size_gen']) + ".pkl"), "wb") )
# Character dictionary
dictionary_char=None
if opt['add_char2word']:
opt['NULLWORD_Idx_in_char'] = opt['vocab_size_char']-1
if os.path.isfile(("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl")):
dictionary_char = pickle.load( open( ("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl"), "rb") ) # char dictionary
logger.info('successfully load char dictionary')
else:
# Build char dictionary from task data
dictionary_char = build_dict_char(opt)
pickle.dump( dictionary_char , open( ("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl"), "wb") )
# Figure out max word len
# figuring out max_word_len from word dictionary is not valid choice ==> which is 25
#opt['max_word_len'] = -100 # initialize
#for i in range(len(dictionary)):
# cur_word_len = len(dictionary[i])
# if opt['max_word_len'] < cur_word_len:
# opt['max_word_len'] = cur_word_len
# just set as hyperparameter in config.py
logger.info('maximum word len = %d' % (opt['max_word_len']))
# Calculate TDNN embedding dim (after applying TDNN to char tensor)
opt['kernels'] = ''.join(opt['kernels'])
if isinstance(opt['kernels'], str):
opt['kernels'] = eval(opt['kernels']) # convert string list of tuple --> list of tuple
opt['embedding_dim_TDNN']=0
for i, n in enumerate(opt['kernels']):
opt['embedding_dim_TDNN'] += n[1]
logger.info('TDNN embedding dim = %d' % (opt['embedding_dim_TDNN']))
#pdb.set_trace()
# Build document reader
doc_reader = DocReaderAgent(opt, word_dict=dictionary, gen_dict = dictionary_gen, char_dict=dictionary_char)
# Log params
logger.info('[ Created with options: ] %s' %
''.join(['\n{}\t{}'.format(k, v)
for k, v in doc_reader.opt.items()]))
# Build training world once
opt['datatype'] = 'train'
train_world = create_task(opt, doc_reader)
train_time = Timer()
# Keep track of best model + how long since the last improvement
best_valid = 0
impatience = 0
lrate_decay = 0
logger.info("[ Ok, let's go... ]")
iteration = 0
while impatience < opt['patience']:
# Train...
logger.info('[ Training for %d iters... ]' % opt['train_interval'])
train_time.reset()
iter = 0
for _ in range(opt['train_interval']):
iter += 1
#pdb.set_trace()
train_world.parley()
#if iter % opt['collect_garbage_every'] == 0:
if iter % 100 == 0:
gc.collect()
# ...validate!
print('start validation')
valid_metric = validate(opt, doc_reader, iteration)
if valid_metric > best_valid:
#if False:
logger.info(
'[ Best eval %d: %s = %.4f (old = %.4f) ]' %
(iteration, opt['valid_metric'], valid_metric, best_valid)
)
best_valid = valid_metric
impatience = 0
if 'model_file' in opt:
doc_reader.save(opt['model_file'])
if valid_metric == 1:
logger.info('[ Task solved! Stopping. ]')
break
else:
if opt['lrate_decay']:
# doc_reader.model.opt['learning_rate'] *= 0.5
opt['learning_rate'] *= opt['lrate_decay_factor']
doc_reader.model.set_lrate(opt['learning_rate'])
logger.info('[ Decrease learning_rate %.2e]' % opt['learning_rate'] )
lrate_decay +=1
if lrate_decay > 10:
break
else:
impatience += 1
logger.info('[ Increase impatience %d ]' % impatience)
iteration += 1
logger.info('[ >> Best eval : %s = %.4f ]' % (opt['valid_metric'], best_valid))
if __name__ == '__main__':
# Get command line arguments
argparser = ParlaiParser()
argparser.add_arg(
'--train_interval', type=int, default=1000,
help='Validate after every N train updates',
)
argparser.add_arg(
'--patience', type=int, default=16,
help='Number of intervals to continue without improvement'
)
SimpleDictionaryAgent.add_cmdline_args(argparser)
DocReaderAgent.add_cmdline_args(argparser)
opt = argparser.parse_args()
# Set logging
logger = logging.getLogger('DrQA')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if 'log_file' in opt:
logfile = logging.FileHandler(opt['log_file'], 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))
# Set cuda
opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
if opt['cuda']:
logger.info('[ Using CUDA (GPU %d) ]' % opt['gpu'])
torch.cuda.set_device(opt['gpu'])
# Set random state
np.random.seed(opt['random_seed'])
random.seed(opt['random_seed'])
torch.manual_seed(opt['random_seed'])
if opt['cuda']:
torch.cuda.manual_seed(opt['random_seed'])
# Run!
main(opt)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.util import compat
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" % (self,
other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```
Dimension(m) * Dimension(n) == Dimension(m * n)
Dimension(m) * Dimension(None) == Dimension(None)
Dimension(None) * Dimension(n) == Dimension(None)
Dimension(None) * Dimension(None) == Dimension(None)
```
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
Dimension(m) // Dimension(n) == Dimension(m // n)
Dimension(m) // Dimension(None) == Dimension(None)
Dimension(None) // Dimension(n) == Dimension(None)
Dimension(None) // Dimension(None) == Dimension(None)
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
Dimension(m) < Dimension(n) == m < n
Dimension(m) < Dimension(None) == None
Dimension(None) < Dimension(n) == None
Dimension(None) < Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) <= Dimension(n) == m <= n
Dimension(m) <= Dimension(None) == None
Dimension(None) <= Dimension(n) == None
Dimension(None) <= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
Dimension(m) > Dimension(n) == m > n
Dimension(m) > Dimension(None) == None
Dimension(None) > Dimension(n) == None
Dimension(None) > Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) >= Dimension(n) == m >= n
Dimension(m) >= Dimension(None) == None
Dimension(None) >= Dimension(n) == None
Dimension(None) >= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimenson input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`}
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using @{tf.Tensor.set_shape}.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
return len(self._dims)
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop - start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError("Shapes %s and %s must have the same rank" % (self,
other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and all(dim.value is not None
for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1
if d.value is None else d.value)
for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
| |
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains utilities for analyzers."""
from __future__ import unicode_literals
from six.moves import urllib_parse as urlparse
from timesketch.lib.analyzers import interface
# Title and header text of a story that is common among browser
# based analyzers.
BROWSER_STORY_TITLE = 'Browser Artifacts'
BROWSER_STORY_HEADER = """
This is an automatically generated story that browser history
based analyzers contribute to. Each section in this story
is generated by a separate analyzer.
"""
# Title and header text of a story that is common among browser
# based analyzers.
SIGMA_STORY_TITLE = 'Sigma Artifacts'
SIGMA_STORY_HEADER = """
This is an automatically generated story that Sigma
based analyzers contribute to.
"""
# CDN domain list based on:
# https://github.com/WPO-Foundation/webpagetest/blob/master/agent/wpthook/cdn.h
# Last updated: 2019-01-11
KNOWN_CDN_DOMAINS = {
'.att-dsa.net': 'AT&T',
'.pix-cdn.org': 'Advanced Hosters CDN',
'.akamai.net': 'Akamai',
'.akamaiedge.net': 'Akamai',
'.akamaihd.net': 'Akamai',
'.akamaitechnologies.com': 'Akamai',
'.akamaitechnologies.fr': 'Akamai',
'.akamaized.net': 'Akamai',
'.edgekey.net': 'Akamai',
'.edgesuite.net': 'Akamai',
'.srip.net': 'Akamai',
'.tl88.net': 'Akamai China CDN',
'.gslb.tbcache.com': 'Alimama',
'.cloudfront.net': 'Amazon CloudFront',
'.aads-cn.net': 'Aryaka',
'.aads-cng.net': 'Aryaka',
'.aads1.net': 'Aryaka',
'.azion.net': 'Azion',
'.azioncdn.com': 'Azion',
'.azioncdn.net': 'Azion',
'.bo.lt': 'BO.LT',
'.bisongrid.net': 'Bison Grid',
'.bitgravity.com': 'BitGravity',
'.bluehatnetwork.com': 'Blue Hat Network',
'.b-cdn.net': 'BunnyCDN',
'.cdn77.net': 'CDN77',
'.cdn77.org': 'CDN77',
'.cdngc.net': 'CDNetworks',
'.gccdn.net': 'CDNetworks',
'.panthercdn.com': 'CDNetworks',
'.cdnsun.net': 'CDNsun',
'.cdnvideo.net': 'CDNvideo',
'.cdnvideo.ru': 'CDNvideo',
'.cachefly.net': 'Cachefly',
'.caspowa.com': 'Caspowa',
'.cedexis.net': 'Cedexis',
'.ccgslb.com': 'ChinaCache',
'.lxdns.com': 'ChinaNetCenter',
'.ourwebpic.com': 'ChinaNetCenter',
'.wscdns.com': 'ChinaNetCenter',
'.wscloudcdn.com': 'ChinaNetCenter',
'.cloudflare.com': 'Cloudflare',
'.cotcdn.net': 'Cotendo CDN',
'.systemcdn.net': 'Edgecast',
'.transactcdn.net': 'Edgecast',
'.v1cdn.net': 'Edgecast',
'.v2cdn.net': 'Edgecast',
'.v3cdn.net': 'Edgecast',
'.v4cdn.net': 'Edgecast',
'.v5cdn.net': 'Edgecast',
'.edgecastcdn.net': 'Edgecast',
'.cdninstagram.com': 'Facebook',
'.fbcdn.net': 'Facebook',
'.fastly.net': 'Fastly',
'.fastlylb.net': 'Fastly',
'.nocookie.net': 'Fastly',
'.cdn.gocache.net': 'GoCache',
'.doubleclick.net': 'Google',
'.googleusercontent.com': 'Google',
'.gstatic.com': 'Google',
'.googlehosted.com': 'Google',
'.googlesyndication.': 'Google',
'.hiberniacdn.com': 'HiberniaCDN',
'.hwcdn.net': 'Highwinds',
'.hosting4cdn.com': 'Hosting4CDN',
'.incapdns.net': 'Incapsula',
'.inscname.net': 'Instart Logic',
'.insnw.net': 'Instart Logic',
'.internapcdn.net': 'Internap',
'.kinxcdn.com': 'KINX CDN',
'.kinxcdn.net': 'KINX CDN',
'.kxcdn.com': 'KeyCDN',
'.lswcdn.eu': 'LeaseWeb CDN',
'.lswcdn.net': 'LeaseWeb CDN',
'.footprint.net': 'Level 3',
'.fpbns.net': 'Level 3',
'.llnwd.net': 'Limelight',
'.cdncloud.net.au': 'MediaCloud',
'.mncdn.com': 'Medianova',
'.mncdn.net': 'Medianova',
'.mncdn.org': 'Medianova',
'.azure.microsoft.com': 'Microsoft Azure',
'.azureedge.net': 'Microsoft Azure',
'.vo.msecnd.net': 'Microsoft Azure',
'.instacontent.net': 'Mirror Image',
'.mirror-image.net': 'Mirror Image',
'.ngenix.net': 'NGENIX',
'.nyiftw.com': 'NYI FTW',
'.nyiftw.net': 'NYI FTW',
'.netdna-cdn.com': 'NetDNA',
'.netdna-ssl.com': 'NetDNA',
'.netdna.com': 'NetDNA',
'.netlify.com': 'Netlify',
'.r.worldcdn.net': 'OnApp',
'.r.worldssl.net': 'OnApp',
'.optimalcdn.com': 'Optimal CDN',
'.pagerain.net': 'PageRain',
'.raxcdn.com': 'Rackspace',
'.resrc.it': 'ReSRC.it',
'.rlcdn.com': 'Reapleaf',
'.rncdn1.com': 'Reflected Networks',
'.rncdn7.com': 'Reflected Networks',
'.revcn.net': 'Rev Software',
'.revdn.net': 'Rev Software',
'.roast.io': 'Roast.io',
'.streamprovider.net': 'Rocket CDN',
'.cdn.sfr.net': 'SFR',
'.simplecdn.net': 'Simple CDN',
'.singularcdn.net.br': 'Singular CDN',
'.stackpathdns.com': 'StackPath',
'.swiftcdn1.com': 'SwiftCDN',
'.swiftserve.com': 'SwiftCDN',
'.trbcdn.ru': 'TRBCDN',
'.gslb.taobao.com': 'Taobao',
'.taobaocdn.com': 'Taobao',
'.tbcdn.cn': 'Taobao',
'.cdntel.net': 'Telenor',
'.twimg.com': 'Twitter',
'.unicorncdn.net': 'UnicornCDN',
'.voxcdn.net': 'VoxCDN',
'.gravatar.com': 'WordPress',
'.wordpress.com': 'WordPress',
'.wp.com': 'WordPress',
'.ay1.b.yahoo.com': 'Yahoo',
'.yahooapis.com': 'Yahoo',
'.yimg.': 'Yahoo',
'.yottaa.net': 'Yottaa',
'.zenedge.net': 'Zenedge',
'.afxcdn.net': 'afxcdn.net',
'.cubecdn.net': 'cubeCDN',
'.cdn.jsdelivr.net': 'jsDelivr',
'.squixa.net': 'section.io'}
def get_domain_from_url(url):
"""Extract domain from URL.
Args:
url: URL to parse.
Returns:
String with domain from URL.
"""
# TODO: See if we can optimize this because it is rather slow.
domain_parsed = urlparse.urlparse(url)
domain_full = domain_parsed.netloc
domain, _, _ = domain_full.partition(':')
return domain
def get_tld_from_domain(domain):
"""Get the top level domain from a domain string.
Args:
domain: string with a full domain, eg. www.google.com
Returns:
string: TLD or a top level domain extracted from the domain,
eg: google.com
"""
return '.'.join(domain.split('.')[-2:])
def strip_www_from_domain(domain):
"""Strip www. from beginning of domain names.
Args:
domain: string with a full domain, eg. www.google.com
Returns:
string: Domain without any www, eg: google.com
"""
if domain.startswith('www.'):
return domain[4:]
return domain
def get_cdn_provider(domain):
"""Return name of CDN provider if domain is recognized as a CDN.
Args:
domain: Domain name to check against CDN list.
Returns:
String of names of CDN providers or empty string if not found.
"""
cdn_providers = [v for k, v in iter(KNOWN_CDN_DOMAINS.items()) if
domain.endswith(k.lower())]
return ' '.join(set(cdn_providers))
def get_events_from_data_frame(frame, datastore):
"""Generates events from a data frame.
Args:
frame: a pandas DataFrame object.
datastore: OpenSearch datastore client.
Yields:
An event (interface.Event) object for each row
in the DataFrame.
"""
context = datastore if isinstance(
datastore, interface.AnalyzerContext) else None
sketch = context.sketch if context else None
for row in frame.iterrows():
_, entry = row
# pylint: disable-msg=unexpected-keyword-arg
event = interface.Event(
entry, sketch=sketch, context=context, datastore=None)
if context:
context.add_event(event)
yield event
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DatasetsOperations(object):
"""DatasetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2017-09-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01-preview"
self.config = config
def list_by_factory(
self, resource_group_name, factory_name, custom_headers=None, raw=False, **operation_config):
"""Lists datasets.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DatasetResource
:rtype:
~azure.mgmt.datafactory.models.DatasetResourcePaged[~azure.mgmt.datafactory.models.DatasetResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DatasetResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatasetResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, factory_name, dataset_name, properties, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param properties: Dataset properties.
:type properties: ~azure.mgmt.datafactory.models.Dataset
:param if_match: ETag of the dataset entity. Should only be specified
for update, for which it should match existing entity or can be * for
unconditional update.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatasetResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.DatasetResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
dataset = models.DatasetResource(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(dataset, 'DatasetResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatasetResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, factory_name, dataset_name, custom_headers=None, raw=False, **operation_config):
"""Gets a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatasetResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.DatasetResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatasetResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, factory_name, dataset_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| |
import builtins
import contextlib
import errno
import functools
import importlib
from importlib import machinery, util, invalidate_caches
import os
import os.path
from test import support
import unittest
import sys
import tempfile
import types
BUILTINS = types.SimpleNamespace()
BUILTINS.good_name = None
BUILTINS.bad_name = None
if 'errno' in sys.builtin_module_names:
BUILTINS.good_name = 'errno'
if 'importlib' not in sys.builtin_module_names:
BUILTINS.bad_name = 'importlib'
EXTENSIONS = types.SimpleNamespace()
EXTENSIONS.path = None
EXTENSIONS.ext = None
EXTENSIONS.filename = None
EXTENSIONS.file_path = None
EXTENSIONS.name = '_testcapi'
def _extension_details():
global EXTENSIONS
for path in sys.path:
for ext in machinery.EXTENSION_SUFFIXES:
filename = EXTENSIONS.name + ext
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
EXTENSIONS.path = path
EXTENSIONS.ext = ext
EXTENSIONS.filename = filename
EXTENSIONS.file_path = file_path
return
_extension_details()
def import_importlib(module_name):
"""Import a module from importlib both w/ and w/o _frozen_importlib."""
fresh = ('importlib',) if '.' in module_name else ()
frozen = support.import_fresh_module(module_name)
source = support.import_fresh_module(module_name, fresh=fresh,
blocked=('_frozen_importlib', '_frozen_importlib_external'))
return {'Frozen': frozen, 'Source': source}
def specialize_class(cls, kind, base=None, **kwargs):
# XXX Support passing in submodule names--load (and cache) them?
# That would clean up the test modules a bit more.
if base is None:
base = unittest.TestCase
elif not isinstance(base, type):
base = base[kind]
name = '{}_{}'.format(kind, cls.__name__)
bases = (cls, base)
specialized = types.new_class(name, bases)
specialized.__module__ = cls.__module__
specialized._NAME = cls.__name__
specialized._KIND = kind
for attr, values in kwargs.items():
value = values[kind]
setattr(specialized, attr, value)
return specialized
def split_frozen(cls, base=None, **kwargs):
frozen = specialize_class(cls, 'Frozen', base, **kwargs)
source = specialize_class(cls, 'Source', base, **kwargs)
return frozen, source
def test_both(test_class, base=None, **kwargs):
return split_frozen(test_class, base, **kwargs)
CASE_INSENSITIVE_FS = True
# Windows is the only OS that is *always* case-insensitive
# (OS X *can* be case-sensitive).
if sys.platform not in ('win32', 'cygwin'):
changed_name = __file__.upper()
if changed_name == __file__:
changed_name = __file__.lower()
if not os.path.exists(changed_name):
CASE_INSENSITIVE_FS = False
source_importlib = import_importlib('importlib')['Source']
__import__ = {'Frozen': staticmethod(builtins.__import__),
'Source': staticmethod(source_importlib.__import__)}
def case_insensitive_tests(test):
"""Class decorator that nullifies tests requiring a case-insensitive
file system."""
return unittest.skipIf(not CASE_INSENSITIVE_FS,
"requires a case-insensitive filesystem")(test)
def submodule(parent, name, pkg_dir, content=''):
path = os.path.join(pkg_dir, name + '.py')
with open(path, 'w') as subfile:
subfile.write(content)
return '{}.{}'.format(parent, name), path
@contextlib.contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0}".format(name))
try:
del sys.modules[name]
except KeyError:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
@contextlib.contextmanager
def temp_module(name, content='', *, pkg=False):
conflicts = [n for n in sys.modules if n.partition('.')[0] == name]
with support.temp_cwd(None) as cwd:
with uncache(name, *conflicts):
with support.DirsOnSysPath(cwd):
invalidate_caches()
location = os.path.join(cwd, name)
if pkg:
modpath = os.path.join(location, '__init__.py')
os.mkdir(name)
else:
modpath = location + '.py'
if content is None:
# Make sure the module file gets created.
content = ''
if content is not None:
# not a namespace package
with open(modpath, 'w') as modfile:
modfile.write(content)
yield location
@contextlib.contextmanager
def import_state(**kwargs):
"""Context manager to manage the various importers and stored state in the
sys module.
The 'modules' attribute is not supported as the interpreter state stores a
pointer to the dict that the interpreter uses internally;
reassigning to sys.modules does not have the desired effect.
"""
originals = {}
try:
for attr, default in (('meta_path', []), ('path', []),
('path_hooks', []),
('path_importer_cache', {})):
originals[attr] = getattr(sys, attr)
if attr in kwargs:
new_value = kwargs[attr]
del kwargs[attr]
else:
new_value = default
setattr(sys, attr, new_value)
if len(kwargs):
raise ValueError(
'unrecognized arguments: {0}'.format(kwargs.keys()))
yield
finally:
for attr, value in originals.items():
setattr(sys, attr, value)
class _ImporterMock:
"""Base class to help with creating importer mocks."""
def __init__(self, *names, module_code={}):
self.modules = {}
self.module_code = {}
for name in names:
if not name.endswith('.__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
if '.' not in name:
package = None
elif import_name == name:
package = name.rsplit('.', 1)[0]
else:
package = import_name
module = types.ModuleType(import_name)
module.__loader__ = self
module.__file__ = '<mock __file__>'
module.__package__ = package
module.attr = name
if import_name != name:
module.__path__ = ['<mock __path__>']
self.modules[import_name] = module
if import_name in module_code:
self.module_code[import_name] = module_code[import_name]
def __getitem__(self, name):
return self.modules[name]
def __enter__(self):
self._uncache = uncache(*self.modules.keys())
self._uncache.__enter__()
return self
def __exit__(self, *exc_info):
self._uncache.__exit__(None, None, None)
class mock_modules(_ImporterMock):
"""Importer mock using PEP 302 APIs."""
def find_module(self, fullname, path=None):
if fullname not in self.modules:
return None
else:
return self
def load_module(self, fullname):
if fullname not in self.modules:
raise ImportError
else:
sys.modules[fullname] = self.modules[fullname]
if fullname in self.module_code:
try:
self.module_code[fullname]()
except Exception:
del sys.modules[fullname]
raise
return self.modules[fullname]
class mock_spec(_ImporterMock):
"""Importer mock using PEP 451 APIs."""
def find_spec(self, fullname, path=None, parent=None):
try:
module = self.modules[fullname]
except KeyError:
return None
spec = util.spec_from_file_location(
fullname, module.__file__, loader=self,
submodule_search_locations=getattr(module, '__path__', None))
return spec
def create_module(self, spec):
if spec.name not in self.modules:
raise ImportError
return self.modules[spec.name]
def exec_module(self, module):
try:
self.module_code[module.__spec__.name]()
except KeyError:
pass
def writes_bytecode_files(fxn):
"""Decorator to protect sys.dont_write_bytecode from mutation and to skip
tests that require it to be set to False."""
if sys.dont_write_bytecode:
return lambda *args, **kwargs: None
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
original = sys.dont_write_bytecode
sys.dont_write_bytecode = False
try:
to_return = fxn(*args, **kwargs)
finally:
sys.dont_write_bytecode = original
return to_return
return wrapper
def ensure_bytecode_path(bytecode_path):
"""Ensure that the __pycache__ directory for PEP 3147 pyc file exists.
:param bytecode_path: File system path to PEP 3147 pyc file.
"""
try:
os.mkdir(os.path.dirname(bytecode_path))
except OSError as error:
if error.errno != errno.EEXIST:
raise
@contextlib.contextmanager
def create_modules(*names):
"""Temporarily create each named module with an attribute (named 'attr')
that contains the name passed into the context manager that caused the
creation of the module.
All files are created in a temporary directory returned by
tempfile.mkdtemp(). This directory is inserted at the beginning of
sys.path. When the context manager exits all created files (source and
bytecode) are explicitly deleted.
No magic is performed when creating packages! This means that if you create
a module within a package you must also create the package's __init__ as
well.
"""
source = 'attr = {0!r}'
created_paths = []
mapping = {}
state_manager = None
uncache_manager = None
try:
temp_dir = tempfile.mkdtemp()
mapping['.root'] = temp_dir
import_names = set()
for name in names:
if not name.endswith('__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
import_names.add(import_name)
if import_name in sys.modules:
del sys.modules[import_name]
name_parts = name.split('.')
file_path = temp_dir
for directory in name_parts[:-1]:
file_path = os.path.join(file_path, directory)
if not os.path.exists(file_path):
os.mkdir(file_path)
created_paths.append(file_path)
file_path = os.path.join(file_path, name_parts[-1] + '.py')
with open(file_path, 'w') as file:
file.write(source.format(name))
created_paths.append(file_path)
mapping[name] = file_path
uncache_manager = uncache(*import_names)
uncache_manager.__enter__()
state_manager = import_state(path=[temp_dir])
state_manager.__enter__()
yield mapping
finally:
if state_manager is not None:
state_manager.__exit__(None, None, None)
if uncache_manager is not None:
uncache_manager.__exit__(None, None, None)
support.rmtree(temp_dir)
def mock_path_hook(*entries, importer):
"""A mock sys.path_hooks entry."""
def hook(entry):
if entry not in entries:
raise ImportError
return importer
return hook
class CASEOKTestBase:
def caseok_env_changed(self, *, should_exist):
possibilities = b'PYTHONCASEOK', 'PYTHONCASEOK'
if any(x in self.importlib._bootstrap_external._os.environ
for x in possibilities) != should_exist:
self.skipTest('os.environ changes not reflected in _os.environ')
| |
#=======================================================================
# isa.py
#=======================================================================
# common bitwise utils
from pydgin.utils import (
trim_32,
trim_16,
trim_8,
sext_16,
sext_8,
signed,
intmask,
)
# arm-specific utils
from utils import (
shifter_operand,
condition_passed,
carry_from,
borrow_from,
not_borrow_from,
overflow_from_add,
overflow_from_sub,
sext_30,
addressing_mode_2,
addressing_mode_3,
addressing_mode_4,
)
from instruction import *
from pydgin.misc import create_risc_decoder, FatalError
from pydgin.jit import unroll_safe
#=======================================================================
# Register Definitions
#=======================================================================
reg_map = {
'r0' : 0, 'r1' : 1, 'r2' : 2, 'r3' : 3,
'r4' : 4, 'r5' : 5, 'r6' : 6, 'r7' : 7,
'r8' : 8, 'r9' : 9, 'r10' : 10, 'r11' : 11,
'r12' : 12, 'r13' : 13, 'r14' : 14, 'r15' : 15,
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473c/CJAJBFHC.html
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0041c/ch09s02s02.html
# http://msdn.microsoft.com/en-us/library/aa448762.aspx
'a1' : 0, 'a2' : 1, 'a3' : 2, 'a4' : 3, # scratch registers
'v1' : 4, 'v2' : 5, 'v3' : 6, 'v4' : 7, # variable registers
'v5' : 8, 'v6' : 9, 'v7' : 10, 'v8' : 11, # variable registers
'sb' : 9, # stack base
'sl' : 10, # stack limit
'fp' : 11, # frame pointer
'ip' : 12, # intra-procedure call scratch
'sp' : 13, # stack pointer
'lr' : 14, # link register
'pc' : 15, # pc
# NOTE: in ARM the PC is address of the current instruction being
# executed + 8!! That means for a given cycle in our simulator,
# PC read by fetch and PC read by execute need different values.
# Best way to do this?
# cpsr/spsr bits
#
# N 31
# Z 30
# C 29
# V 28
# Q 27
# RESERVED 26
# J 24
# RESERVED 23:20
# GE[3:0] 19:16
# RESERVED 15:10
# E 9
# A 8
# I 7
# F 6
# T 5
# M[4:0] 4:0
}
#=======================================================================
# Instruction Encodings
#=======================================================================
#
# ARM ISA Manual: (ARM DDI 0100I)
#
# - pg. A4-286
# - pg. A3-2
# - pg. A4-2
#
# NOTE: PUSH and POP are synonyms for STMDB and LDM (or LDMIA), with the
# base register sp (r13), and the adjusted address written back to the
# base register. PUSH and POP are the preferred mnemonic in these cases.
# Registers are stored on the stack in numerical order, with the lowest
# numbered register at the lowest address.
#
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204j/Babefbce.html
#
# NOTE: LDM/STM have alternative names depending on addressing mode
# (including LDMIA and STMDB). See: ARM DDI 0100I, pg. A5-48
#
encodings = [
['nop', '00000000000000000000000000000000'],
# TODO: These instructions have manually been moved to avoid incorrect
# decodings caused by encoding ambiguity. Ideally our decoder generator
# would be able to handle this automatically!
['mul', 'xxxx0000000xxxxx0000xxxx1001xxxx'], # ambiguous with and
['strh', 'xxxx000xxxx0xxxxxxxxxxxx1011xxxx'], # ambiguous with orr
['ldrh', 'xxxx000xxxx1xxxxxxxxxxxx1011xxxx'], # ambiguous with bic
['ldrsb', 'xxxx000xxxx1xxxxxxxxxxxx1101xxxx'], # ambiguous with bic
['ldrsh', 'xxxx000xxxx1xxxxxxxxxxxx1111xxxx'], # ambiguous with bic
['mla', 'xxxx0000001xxxxxxxxxxxxx1001xxxx'], # ambiguous with eor
['umull', 'xxxx0000100xxxxxxxxxxxxx1001xxxx'], # ambiguous with add
['umlal', 'xxxx0000101xxxxxxxxxxxxx1001xxxx'], # ambiguous with adc
['smlal', 'xxxx0000111xxxxxxxxxxxxx1001xxxx'], # ambiguous with rsc
['smull', 'xxxx0000110xxxxxxxxxxxxx1001xxxx'], # ambiguous with sbc
['adc', 'xxxx00x0101xxxxxxxxxxxxxxxxxxxxx'], # v4
['add', 'xxxx00x0100xxxxxxxxxxxxxxxxxxxxx'], # v4
['and', 'xxxx00x0000xxxxxxxxxxxxxxxxxxxxx'], # v4
['b', 'xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx'], # v4
['bl', 'xxxx1011xxxxxxxxxxxxxxxxxxxxxxxx'], # v4
['bic', 'xxxx00x1110xxxxxxxxxxxxxxxxxxxxx'], # v4
['bkpt', '111000010010xxxxxxxxxxxx0111xxxx'], # v5T
['blx1', '1111101xxxxxxxxxxxxxxxxxxxxxxxxx'], # v5T
['blx2', 'xxxx000100101111111111110011xxxx'], # v5T
['bx', 'xxxx000100101111111111110001xxxx'], # v4T
#?['bxj', 'xxxx000100101111111111110010xxxx'], # v5TEJ
['cdp', 'xxxx1110xxxxxxxxxxxxxxxxxxx0xxxx'], # v4
['clz', 'xxxx000101101111xxxx11110001xxxx'], # v5T
['cmn', 'xxxx00x10111xxxx0000xxxxxxxxxxxx'], # v4
['cmp', 'xxxx00x10101xxxx0000xxxxxxxxxxxx'], # v4
# ['cps', '111100010000xxx00000000xxxx0xxxx'], # v6
# ['cpy', 'xxxx000110100000xxxx00000000xxxx'], # v6
['eor', 'xxxx00x0001xxxxxxxxxxxxxxxxxxxxx'], # v4
['ldc', 'xxxx110xxxx1xxxxxxxxxxxxxxxxxxxx'], # v4
['ldc2', '1111110xxxx1xxxxxxxxxxxxxxxxxxxx'], # v5T
['ldm1', 'xxxx100xx0x1xxxxxxxxxxxxxxxxxxxx'], # v4
['ldm2', 'xxxx100xx101xxxx0xxxxxxxxxxxxxxx'], # v4
['ldm3', 'xxxx100xx1x1xxxx1xxxxxxxxxxxxxxx'], # v4
['ldr', 'xxxx01xxx0x1xxxxxxxxxxxxxxxxxxxx'], # v4
['ldrb', 'xxxx01xxx1x1xxxxxxxxxxxxxxxxxxxx'], # v4
['ldrbt', 'xxxx01x0x111xxxxxxxxxxxxxxxxxxxx'], # v4
#?['ldrd', 'xxxx000puiw0xxxxxxxxxxxx1101xxxx'], # v5TE
# ['ldrex', 'xxxx000110001xxxxxxx111110011111'], # v6
# ['ldrh', 'xxxx000xxxx1xxxxxxxxxxxx1011xxxx'], # v4, SEE ABOVE
# ['ldrsb', 'xxxx000xxxx1xxxxxxxxxxxx1101xxxx'], # v4, SEE ABOVE
# ['ldrsh', 'xxxx000xxxx1xxxxxxxxxxxx1111xxxx'], # v4, SEE ABOVE
['ldrt', 'xxxx01x0x011xxxxxxxxxxxxxxxxxxxx'], # v4
['mcr', 'xxxx1110xxx0xxxxxxxxxxxxxxx1xxxx'], # v4
['mcr2', '11111110xxx0xxxxxxxxxxxxxxx1xxxx'], # v5T
['mcrr', 'xxxx11000100xxxxxxxxxxxxxxxxxxxx'], # v5TE
['mcrr2', '111111000100xxxxxxxxxxxxxxxxxxxx'], # v6
# ['mla', 'xxxx0000001xxxxxxxxxxxxx1001xxxx'], # v4, SEE ABOVE
['mov', 'xxxx00x1101x0000xxxxxxxxxxxxxxxx'], # v4
['mrc', 'xxxx1110xxx1xxxxxxxxxxxxxxx1xxxx'], # v4
['mrc2', '11111110xxx1xxxxxxxxxxxxxxx1xxxx'], # v5T
#?['mrrc', 'xxxx11000101xxxxxxxxxxxxxxxxxxxx'], # v5TE
# ['mrrc2', '111111000101xxxxxxxxxxxxxxxxxxxx'], # v6
['mrs', 'xxxx00010x001111xxxx000000000000'], # v4
['msr', 'xxxx00x10x10xxxx1111xxxxxxxxxxxx'], # v4, TODO
# ['mul', 'xxxx0000000xxxxx0000xxxx1001xxxx'], # v4, SEE ABOVE
['mvn', 'xxxx00x1111x0000xxxxxxxxxxxxxxxx'], # v4
['orr', 'xxxx00x1100xxxxxxxxxxxxxxxxxxxxx'], # v4
# ['pkhbt', 'xxxx01101000xxxxxxxxxxxxx001xxxx'], # v6
# ['pkhtb', 'xxxx01101000xxxxxxxxxxxxx101xxxx'], # v6
#?['pld', '111101x1x101xxxx1111xxxxxxxxxxxx'], # v5TE
#?['qadd', 'xxxx00010000xxxxxxxx00000101xxxx'], # v5TE
# ['qadd16', 'xxxx01100010xxxxxxxx11110001xxxx'], # v6
# ['qadd8', 'xxxx01100010xxxxxxxx11111001xxxx'], # v6
# ['qaddsubx', 'xxxx01100010xxxxxxxx11110011xxxx'], # v6
#?['qdadd', 'xxxx00010100xxxxxxxx00000101xxxx'], # v5TE
#?['qdsub', 'xxxx00010110xxxxxxxx00000101xxxx'], # v5TE
#?['qsub', 'xxxx00010010xxxxxxxx00000101xxxx'], # v5TE
# ['qsub16', 'xxxx01100010xxxxxxxx11110111xxxx'], # v6
# ['qsub8', 'xxxx01100010xxxxxxxx11111111xxxx'], # v6
# ['qsubaddx', 'xxxx01100010xxxxxxxx11110101xxxx'], # v6
# ['rev', 'xxxx011010111111xxxx11110011xxxx'], # v6
# ['rev16', 'xxxx011010111111xxxx11111011xxxx'], # v6
# ['revsh', 'xxxx011011111111xxxx11111011xxxx'], # v6
# ['rfe', '1111100xx0x1xxxx0000101000000000'], # v6
['rsb', 'xxxx00x0011xxxxxxxxxxxxxxxxxxxxx'], # v4
['rsc', 'xxxx00x0111xxxxxxxxxxxxxxxxxxxxx'], # v4
# ['sadd16', 'xxxx01100001xxxxxxxx11110001xxxx'], # v6
# ['sadd8', 'xxxx01100001xxxxxxxx11111001xxxx'], # v6
# ['saddsubx', 'xxxx01100001xxxxxxxx11110011xxxx'], # v6
['sbc', 'xxxx00x0110xxxxxxxxxxxxxxxxxxxxx'], # v4
# ['sel', 'xxxx01101000xxxxxxxx11111011xxxx'], # v6
# ['setend', '1111000100000001000000x000000000'], # v6
# ['shadd16', 'xxxx01100011xxxxxxxx11110001xxxx'], # v6
# ['shadd8', 'xxxx01100011xxxxxxxx11111001xxxx'], # v6
# ['shaddsubx','xxxx01100011xxxxxxxx11110011xxxx'], # v6
# ['shsub16', 'xxxx01100011xxxxxxxx11110111xxxx'], # v6
# ['shsub8', 'xxxx01100011xxxxxxxx11111111xxxx'], # v6
# ['shsubaddx','xxxx01100011xxxxxxxx11110101xxxx'], # v6
# ['smlad', 'xxxx01110000xxxxxxxxxxxx00x1xxxx'], # v6
# ['smlal', 'xxxx0000111xxxxxxxxxxxxx1001xxxx'], # v4, SEE ABOVE
# ['smlald', 'xxxx01110100xxxxxxxxxxxx00x1xxxx'], # v6
#?['smla_xy', 'xxxx00010000xxxxxxxxxxxx1xx0xxxx'], # v5TE
#?['smlal_xy', 'xxxx00010100xxxxxxxxxxxx1xx0xxxx'], # v5TE
#?['smlaw_y', 'xxxx00010010xxxxxxxxxxxx1x00xxxx'], # v5TE
# ['smlsd', 'xxxx01110000xxxxxxxxxxxx01x1xxxx'], # v6
# ['smlsld', 'xxxx01110100xxxxxxxxxxxx01x1xxxx'], # v6
# ['smmla', 'xxxx01110101xxxxxxxxxxxx00x1xxxx'], # v6
# ['smmls', 'xxxx01110101xxxxxxxxxxxx11x1xxxx'], # v6
# ['smmul', 'xxxx01110101xxxx1111xxxx00x1xxxx'], # v6
# ['smuad', 'xxxx01110000xxxx1111xxxx00x1xxxx'], # v6
# ['smull', 'xxxx0000110xxxxxxxxxxxxx1001xxxx'], # v4, SEE ABOVE
#?['smul_xy', 'xxxx00010110xxxx0000xxxx1xx0xxxx'], # v5TE
#?['smulw', 'xxxx00010010xxxx0000xxxx1x10xxxx'], # v5TE
# ['smusd', 'xxxx01110000xxxx1111xxxx01x1xxxx'], # v6
# ['srs', '1111100xx1x0110100000101000xxxxx'], # v6
# ['ssat', 'xxxx0110101xxxxxxxxxxxxxxx01xxxx'], # v6
# ['ssat16', 'xxxx01101010xxxxxxxx11110011xxxx'], # v6
# ['ssub16', 'xxxx01100001xxxxxxxx11110111xxxx'], # v6
# ['ssub8', 'xxxx01100001xxxxxxxx11111111xxxx'], # v6
# ['ssubaddx', 'xxxx01100001xxxxxxxx11110101xxxx'], # v6
['stc', 'xxxx110xxxx0xxxxxxxxxxxxxxxxxxxx'], # v4
# ['stc2', '1111110xxxx0xxxxxxxxxxxxxxxxxxxx'], # v5T
['stm1', 'xxxx100xx0x0xxxxxxxxxxxxxxxxxxxx'], # v4
['stm2', 'xxxx100xx100xxxxxxxxxxxxxxxxxxxx'], # v4
['str', 'xxxx01xxx0x0xxxxxxxxxxxxxxxxxxxx'], # v4
['strb', 'xxxx01xxx1x0xxxxxxxxxxxxxxxxxxxx'], # v4
['strbt', 'xxxx01x0x110xxxxxxxxxxxxxxxxxxxx'], # v4
#?['strd', 'xxxx000xxxx0xxxxxxxxxxxx1111xxxx'], # v5TE
# ['strex', 'xxxx00011000xxxxxxxx11111001xxxx'], # v6
# ['strh', 'xxxx000xxxx0xxxxxxxxxxxx1011xxxx'], # v4, SEE ABOVE
['strt', 'xxxx01x0x010xxxxxxxxxxxxxxxxxxxx'], # v4
['sub', 'xxxx00x0010xxxxxxxxxxxxxxxxxxxxx'], # v4
['swi', 'xxxx1111xxxxxxxxxxxxxxxxxxxxxxxx'], # v4
['swp', 'xxxx00010000xxxxxxxx00001001xxxx'], # v4, Deprecated in v6
['swpb', 'xxxx00010100xxxxxxxx00001001xxxx'], # v4, Deprecated in v6
# ['sxtab', 'xxxx01101010xxxxxxxxxx000111xxxx'], # v6
# ['sxtab16', 'xxxx01101000xxxxxxxxxx000111xxxx'], # v6
# ['sxtah', 'xxxx01101011xxxxxxxxxx000111xxxx'], # v6
# ['sxtb', 'xxxx011010101111xxxxxx000111xxxx'], # v6
# ['sxtb16', 'xxxx011010001111xxxxxx000111xxxx'], # v6
# ['sxth', 'xxxx011010111111xxxxxx000111xxxx'], # v6
['teq', 'xxxx00x10011xxxx0000xxxxxxxxxxxx'], # v4
['tst', 'xxxx00x10001xxxx0000xxxxxxxxxxxx'], # v4
# ['uadd16', 'xxxx01100101xxxxxxxx11110001xxxx'], # v6
# ['uadd8', 'xxxx01100101xxxxxxxx11111001xxxx'], # v6
# ['uadd8subx','xxxx01100101xxxxxxxx11110011xxxx'], # v6
# ['uhadd16', 'xxxx01100111xxxxxxxx11110001xxxx'], # v6
# ['uhadd8', 'xxxx01100111xxxxxxxx11111001xxxx'], # v6
# ['uhaddsubx','xxxx01100111xxxxxxxx11110011xxxx'], # v6
# ['uhsub16', 'xxxx01100111xxxxxxxx11110111xxxx'], # v6
# ['uhsub8', 'xxxx01100111xxxxxxxx11111111xxxx'], # v6
# ['uhsubaddx','xxxx01100111xxxxxxxx11110101xxxx'], # v6
# ['umaal', 'xxxx00000100xxxxxxxxxxxx1001xxxx'], # v6
# ['umlal', 'xxxx0000101xxxxxxxxxxxxx1001xxxx'], # v4, SEE ABOVE
# ['umull', 'xxxx0000100xxxxxxxxxxxxx1001xxxx'], # v4, SEE ABOVE
# ['uqadd16', 'xxxx01100110xxxxxxxx11110001xxxx'], # v6
# ['uqadd8', 'xxxx01100110xxxxxxxx11111001xxxx'], # v6
# ['uqaddsubx','xxxx01100110xxxxxxxx11110011xxxx'], # v6
# ['uqsub16', 'xxxx01100110xxxxxxxx11110111xxxx'], # v6
# ['uqsub8', 'xxxx01100110xxxxxxxx11111111xxxx'], # v6
# ['uqsubaddx','xxxx01100110xxxxxxxx11110101xxxx'], # v6
# ['usad8', 'xxxx01111000xxxx1111xxxx0001xxxx'], # v6
# ['usada8', 'xxxx01111000xxxxxxxxxxxx0001xxxx'], # v6
# ['usat', 'xxxx0110111xxxxxxxxxxxxxxx01xxxx'], # v6
# ['usat16', 'xxxx01101110xxxxxxxx11110011xxxx'], # v6
# ['usub16', 'xxxx01100101xxxxxxxx11110111xxxx'], # v6
# ['usub8', 'xxxx01100101xxxxxxxx11111111xxxx'], # v6
# ['usubaddx', 'xxxx01100101xxxxxxxx11110101xxxx'], # v6
# ['uxtab', 'xxxx01101110xxxxxxxxxx000111xxxx'], # v6
# ['uxtab16', 'xxxx01101100xxxxxxxxxx000111xxxx'], # v6
# ['uxtah', 'xxxx01101111xxxxxxxxxx000111xxxx'], # v6
# ['uxtb', 'xxxx011011101111xxxxxx000111xxxx'], # v6
# ['uxtb16', 'xxxx011011001111xxxxxx000111xxxx'], # v6
# ['uxth', 'xxxx011011111111xxxxxx000111xxxx'], # v6
]
PC = reg_map['pc']
LR = reg_map['lr']
#=======================================================================
# Instruction Definitions
#=======================================================================
def execute_nop( s, inst ):
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# adc
#-----------------------------------------------------------------------
def execute_adc( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a + b + s.C
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = carry_from( result )
s.V = overflow_from_add( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# add
#-----------------------------------------------------------------------
def execute_add( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a + b
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = carry_from( result )
s.V = overflow_from_add( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# and
#-----------------------------------------------------------------------
def execute_and( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a & b
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
s.V = s.V
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# b
#-----------------------------------------------------------------------
def execute_b( s, inst ):
if condition_passed( s, inst.cond ):
offset = signed( sext_30( inst.imm_24 ) << 2 )
s.rf[PC] = trim_32( signed( s.rf[PC] ) + offset )
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# bl
#-----------------------------------------------------------------------
def execute_bl( s, inst ):
if condition_passed( s, inst.cond ):
s.rf[LR] = trim_32( s.fetch_pc() + 4 )
offset = signed( sext_30( inst.imm_24 ) << 2 )
s.rf[PC] = trim_32( signed( s.rf[PC] ) + offset )
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# bic
#-----------------------------------------------------------------------
def execute_bic( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a & trim_32(~b)
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# bkpt
#-----------------------------------------------------------------------
def execute_bkpt( s, inst ):
raise FatalError('"bkpt" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# blx1
#-----------------------------------------------------------------------
def execute_blx1( s, inst ):
raise FatalError('Called blx1: Entering THUMB mode! Unsupported')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# blx2
#-----------------------------------------------------------------------
def execute_blx2( s, inst ):
if condition_passed( s, inst.cond ):
s.rf[LR] = trim_32( s.fetch_pc() + 4 )
s.T = s.rf[ inst.rm ] & 0x00000001
s.rf[PC] = s.rf[ inst.rm ] & 0xFFFFFFFE
if s.T:
raise FatalError( "Entering THUMB mode! Unsupported!")
# no pc + 4 on success
else:
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# bx
#-----------------------------------------------------------------------
def execute_bx( s, inst ):
if condition_passed( s, inst.cond ):
s.T = s.rf[ inst.rm ] & 0x00000001
s.rf[PC] = s.rf[ inst.rm ] & 0xFFFFFFFE
if s.T:
raise FatalError( "Entering THUMB mode! Unsupported!")
# no pc + 4 on success
else:
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# cdp
#-----------------------------------------------------------------------
def execute_cdp( s, inst ):
raise FatalError('"cdp" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# clz
#-----------------------------------------------------------------------
@unroll_safe
def execute_clz( s, inst ):
if condition_passed( s, inst.cond ):
Rm = s.rf[ inst.rm ]
if Rm == 0:
s.rf[ inst.rd ] = 32
else:
mask = 0x80000000
leading_zeros = 32
for x in range(32):
if mask & Rm:
leading_zeros = x
break
mask >>= 1
assert leading_zeros != 32
s.rf[ inst.rd ] = leading_zeros
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# cmn
#-----------------------------------------------------------------------
def execute_cmn( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a + b
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = carry_from( result )
s.V = overflow_from_add( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# cmp
#-----------------------------------------------------------------------
def execute_cmp( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = intmask( a - b )
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = not_borrow_from( result )
s.V = overflow_from_sub( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# eor
#-----------------------------------------------------------------------
def execute_eor( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a ^ b
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
s.V = s.V
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldc
#-----------------------------------------------------------------------
def execute_ldc( s, inst ):
raise FatalError('"ldc" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldc2
#-----------------------------------------------------------------------
def execute_ldc2( s, inst ):
raise FatalError('"ldc2" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldm1
#-----------------------------------------------------------------------
@unroll_safe
def execute_ldm1( s, inst ):
if condition_passed( s, inst.cond ):
addr, end_addr = addressing_mode_4( s, inst )
register_mask = inst.register_list
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
for i in range(15):
if register_mask & 0b1:
s.rf[ i ] = s.mem.read( addr, 4 )
addr += 4
register_mask >>= 1
if register_mask & 0b1: # reg 15
s.rf[PC] = s.mem.read( addr, 4 ) & 0xFFFFFFFE
s.T = s.rf[PC] & 0b1
if s.T: raise FatalError( "Entering THUMB mode! Unsupported!")
assert end_addr == addr
return
assert end_addr == addr - 4
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldm2
#-----------------------------------------------------------------------
def execute_ldm2( s, inst ):
raise FatalError('"ldm2" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldm3
#-----------------------------------------------------------------------
def execute_ldm3( s, inst ):
raise FatalError('"ldm3" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldr
#-----------------------------------------------------------------------
def execute_ldr( s, inst ):
if condition_passed( s, inst.cond ):
addr = addressing_mode_2( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
# TODO: handle memory alignment?
# CP15_reg1_Ubit checks if the MMU is enabled
# if (CP15_reg1_Ubit == 0):
# data = Memory[address,4] Rotate_Right (8 * address[1:0])
# else
# data = Memory[address,4]
data = s.mem.read( addr, 4 )
if inst.rd == 15:
s.rf[PC] = data & 0xFFFFFFFE
s.T = data & 0b1
return
else:
s.rf[ inst.rd ] = data
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrb
#-----------------------------------------------------------------------
def execute_ldrb( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
addr = addressing_mode_2( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
s.rf[ inst.rd ] = s.mem.read( addr, 1 )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrbt
#-----------------------------------------------------------------------
def execute_ldrbt( s, inst ):
raise FatalError('"ldrbt" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrh
#-----------------------------------------------------------------------
def execute_ldrh( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
addr = addressing_mode_3( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
# TODO: alignment fault checking?
# if (CP15_reg1_Ubit == 0) and address[0] == 0b1:
# UNPREDICTABLE
s.rf[ inst.rd ] = s.mem.read( addr, 2 )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrsb
#-----------------------------------------------------------------------
def execute_ldrsb( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
addr = addressing_mode_3( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
# TODO: alignment fault checking?
# if (CP15_reg1_Ubit == 0) and address[0] == 0b1:
# UNPREDICTABLE
s.rf[ inst.rd ] = sext_8( s.mem.read( addr, 1 ) )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrsh
#-----------------------------------------------------------------------
def execute_ldrsh( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
addr = addressing_mode_3( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
# TODO: alignment fault checking?
# if (CP15_reg1_Ubit == 0) and address[0] == 0b1:
# UNPREDICTABLE
s.rf[ inst.rd ] = sext_16( s.mem.read( addr, 2 ) )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# ldrt
#-----------------------------------------------------------------------
def execute_ldrt( s, inst ):
raise FatalError('"ldrt" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mcr
#-----------------------------------------------------------------------
def execute_mcr( s, inst ):
raise FatalError('"mcr" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mcr2
#-----------------------------------------------------------------------
def execute_mcr2( s, inst ):
raise FatalError('"mcr2" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mcrr
#-----------------------------------------------------------------------
def execute_mcrr( s, inst ):
raise FatalError('"mcrr" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mcrr2
#-----------------------------------------------------------------------
def execute_mcrr2( s, inst ):
raise FatalError('"mcrr2" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mla
#-----------------------------------------------------------------------
def execute_mla( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
Rm, Rs, Rd = s.rf[ inst.rm ], s.rf[ inst.rs ], s.rf[ inst.rd ]
result = trim_32(Rm * Rs + Rd)
s.rf[ inst.rn ] = result
if inst.S:
s.N = (result >> 31)&1
s.Z = result == 0
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mov
#-----------------------------------------------------------------------
def execute_mov( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15 and inst.S:
# if not CurrentModeHasSPSR(): CPSR = SPSR
# else: UNPREDICTABLE
raise FatalError('UNPREDICTABLE in user and system mode!')
result, cout = shifter_operand( s, inst )
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
s.V = s.V
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mrc
#-----------------------------------------------------------------------
def execute_mrc( s, inst ):
raise FatalError('"mrc" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mrc2
#-----------------------------------------------------------------------
def execute_mrc2( s, inst ):
raise FatalError('"mrc2" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mrs
#-----------------------------------------------------------------------
def execute_mrs( s, inst ):
if condition_passed( s, inst.cond ):
if inst.R:
raise FatalError('Cannot read SPSR in "mrs"')
else:
s.rf[ inst.rd ] = s.cpsr()
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# msr
#-----------------------------------------------------------------------
def execute_msr( s, inst ):
raise FatalError('"msr" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mul
#-----------------------------------------------------------------------
def execute_mul( s, inst ):
if condition_passed( s, inst.cond ):
Rm, Rs = s.rf[ inst.rm ], s.rf[ inst.rs ]
result = trim_32(Rm * Rs)
s.rf[ inst.rn ] = result
if inst.S:
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
s.N = (result >> 31)&1
s.Z = result == 0
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# mvn
#-----------------------------------------------------------------------
def execute_mvn( s, inst ):
if condition_passed( s, inst.cond ):
a, cout = shifter_operand( s, inst )
result = trim_32( ~a )
s.rf[ inst.rd ] = result
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
s.V = s.V
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# orr
#-----------------------------------------------------------------------
def execute_orr( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = a | b
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = cout
s.V = s.V
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# rsb
#-----------------------------------------------------------------------
def execute_rsb( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = intmask( b - a )
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = not_borrow_from( result )
s.V = overflow_from_sub( b, a, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# rsc
#-----------------------------------------------------------------------
def execute_rsc( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = intmask( b - a - (not s.C) )
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = not_borrow_from( result )
s.V = overflow_from_sub( b, a, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# sbc
#-----------------------------------------------------------------------
def execute_sbc( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = intmask( a - b - (not s.C) )
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = not_borrow_from( result )
s.V = overflow_from_sub( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# smlal
#-----------------------------------------------------------------------
def execute_smlal( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
RdHi, RdLo = inst.rn, inst.rd
Rm, Rs = signed(s.rf[ inst.rm ]), signed(s.rf[ inst.rs ])
accumulate = (s.rf[ RdHi ] << 32) | s.rf[ RdLo ]
result = (Rm * Rs) + signed( accumulate )
if RdHi == RdLo: raise FatalError('UNPREDICTABLE')
s.rf[ RdHi ] = trim_32( result >> 32 )
s.rf[ RdLo ] = trim_32( result )
if inst.S:
s.N = (result >> 63)&1
s.Z = (s.rf[RdHi] == s.rf[RdLo] == 0)
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# smull
#-----------------------------------------------------------------------
def execute_smull( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
RdHi, RdLo = inst.rn, inst.rd
Rm, Rs = signed(s.rf[ inst.rm ]), signed(s.rf[ inst.rs ])
result = Rm * Rs
if RdHi == RdLo: raise FatalError('UNPREDICTABLE')
s.rf[ RdHi ] = trim_32( result >> 32 )
s.rf[ RdLo ] = trim_32( result )
if inst.S:
s.N = (result >> 63)&1
s.Z = result == 0
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# stc
#-----------------------------------------------------------------------
def execute_stc( s, inst ):
raise FatalError('"stc" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# stm1
#-----------------------------------------------------------------------
@unroll_safe
def execute_stm1( s, inst ):
if condition_passed( s, inst.cond ):
orig_Rn = s.rf[ inst.rn ]
addr, end_addr = addressing_mode_4( s, inst )
register_mask = inst.register_list
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
for i in range(16):
if register_mask & 0b1:
# Note from ISA document page A4-190:
# If <Rn> is specified in <registers> and base register write-back
# is specified:
# - If <Rn> is the lowest-numbered register specified in
# <registers>, the original value of <Rn> is stored.
# - Otherwise, the stored value of <Rn> is UNPREDICTABLE.
#
# We check if i is Rn, and if so, we use the original value
if i == inst.rn:
s.mem.write( addr, 4, orig_Rn )
else:
s.mem.write( addr, 4, s.rf[i] )
addr += 4
register_mask >>= 1
assert end_addr == addr - 4
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# stm2
#-----------------------------------------------------------------------
def execute_stm2( s, inst ):
raise FatalError('"stm2" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# str
#-----------------------------------------------------------------------
def execute_str( s, inst ):
if condition_passed( s, inst.cond ):
addr = addressing_mode_2( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
s.mem.write( addr, 4, s.rf[ inst.rd ] )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# strb
#-----------------------------------------------------------------------
def execute_strb( s, inst ):
if condition_passed( s, inst.cond ):
addr = addressing_mode_2( s, inst )
s.mem.write( addr, 1, trim_8( s.rf[ inst.rd ] ) )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# strbt
#-----------------------------------------------------------------------
def execute_strbt( s, inst ):
raise FatalError('"strbt" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# strh
#-----------------------------------------------------------------------
def execute_strh( s, inst ):
if condition_passed( s, inst.cond ):
addr = addressing_mode_3( s, inst )
# TODO: support multiple memory accessing modes?
# MemoryAccess( s.B, s.E )
# TODO: alignment fault checking?
# if (CP15_reg1_Ubit == 0) and address[0] == 0b1:
# UNPREDICTABLE
s.mem.write( addr, 2, s.rf[ inst.rd ] & 0xFFFF )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# strt
#-----------------------------------------------------------------------
def execute_strt( s, inst ):
raise FatalError('"strt" instruction unimplemented!')
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# sub
#-----------------------------------------------------------------------
def execute_sub( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, _) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = intmask( a - b )
s.rf[ inst.rd ] = trim_32( result )
if inst.S:
if inst.rd == 15: raise FatalError('Writing SPSR not implemented!')
s.N = (result >> 31)&1
s.Z = trim_32( result ) == 0
s.C = not_borrow_from( result )
s.V = overflow_from_sub( a, b, result )
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# swi
#-----------------------------------------------------------------------
from syscalls import do_syscall
def execute_swi( s, inst ):
if condition_passed( s, inst.cond ):
do_syscall( s )
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# swp
#-----------------------------------------------------------------------
def execute_swp( s, inst ):
if condition_passed( s, inst.cond ):
addr = s.rf[ inst.rn ]
temp = s.mem.read( addr, 4 )
s.mem.write( addr, 4, s.rf[ inst.rm ] )
s.rf[ inst.rd ] = temp
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# swpb
#-----------------------------------------------------------------------
def execute_swpb( s, inst ):
raise FatalError('"swpb" instruction unimplemented!')
if condition_passed( s, inst.cond ):
pass
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# teq
#-----------------------------------------------------------------------
def execute_teq( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = trim_32( a ^ b )
if inst.S:
s.N = (result >> 31)&1
s.Z = result == 0
s.C = cout
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# tst
#-----------------------------------------------------------------------
def execute_tst( s, inst ):
if condition_passed( s, inst.cond ):
a, (b, cout) = s.rf[ inst.rn ], shifter_operand( s, inst )
result = trim_32( a & b )
if inst.S:
s.N = (result >> 31)&1
s.Z = result == 0
s.C = cout
if inst.rd == 15:
return
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# umlal
#-----------------------------------------------------------------------
def execute_umlal( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
RdHi, RdLo = inst.rn, inst.rd
Rm, Rs = s.rf[ inst.rm ], s.rf[ inst.rs ]
accumulate = (s.rf[ RdHi ] << 32) | s.rf[ RdLo ]
result = (Rm * Rs) + accumulate
if RdHi == RdLo: raise FatalError('UNPREDICTABLE')
s.rf[ RdHi ] = trim_32( result >> 32 )
s.rf[ RdLo ] = trim_32( result )
if inst.S:
s.N = (result >> 63)&1
s.Z = (s.rf[RdHi] == s.rf[RdLo] == 0)
s.rf[PC] = s.fetch_pc() + 4
#-----------------------------------------------------------------------
# umull
#-----------------------------------------------------------------------
def execute_umull( s, inst ):
if condition_passed( s, inst.cond ):
if inst.rd == 15: raise FatalError('UNPREDICTABLE')
if inst.rm == 15: raise FatalError('UNPREDICTABLE')
if inst.rs == 15: raise FatalError('UNPREDICTABLE')
if inst.rn == 15: raise FatalError('UNPREDICTABLE')
RdHi, RdLo = inst.rn, inst.rd
Rm, Rs = s.rf[ inst.rm ], s.rf[ inst.rs ]
result = Rm * Rs
if RdHi == RdLo: raise FatalError('UNPREDICTABLE')
s.rf[ RdHi ] = trim_32( result >> 32 )
s.rf[ RdLo ] = trim_32( result )
if inst.S:
s.N = (result >> 63)&1
s.Z = (s.rf[RdHi] == s.rf[RdLo] == 0)
s.rf[PC] = s.fetch_pc() + 4
#=======================================================================
# Create Decoder
#=======================================================================
decode = create_risc_decoder( encodings, globals(), debug=True )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.