gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module exposes one function Login(), that authenticates user into the
Google services, returning an authentication token and a cookie."""
from datetime import datetime, timedelta
import time
import urllib
import urllib2
class AuthenticationError(urllib2.HTTPError):
"""Exception class to indicate an error when authenticating with Google's
ClientLogin.
"""
def __init__(self, url, code, message, headers, args):
"""Initialize the error with the specified arguments."""
super(AuthenticationError, self).__init__(url, code, message,
headers, None)
self.args = args
self.reason = args["Error"]
def _GetHTTPOpener():
"""Create an http opener used to interact with Google's ClientLogin.
Returns:
An http opener capable of handling anything needed to interact with
Google's ClientLogin.
"""
# Create an http opener capable of handling proxies, http and https.
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(urllib2.HTTPSHandler())
return opener
def _ParseBodyAsDict(body):
""" Parse the specified body as a dictionary with each element in a line, and
key value pairs separated by '='.
Args:
body: The string with the HTTP body to parse.
Returns:
A dictionary with the body contents.
"""
return dict(line.split('=') for line in body.split('\n') if line)
def _GetGoogleAuthtoken(account_type, user, password, service, source):
"""This function authenticates the user in the specified service using
the provided authentication data.
Args:
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
Returns:
The authentatication token for the user if the supplied data is correct.
Raises:
lib.AuthenticationError: This exception is raised if the HTTP response is
403 - Forbidden, in this case the error is parsed and returned to the
user in the exception.
urllib2.HTTPError: This exception is raised for any other HTTP error.
"""
# Create a request for Google's Client login, with the specied data.
auth_request_data_map = {
'accountType': account_type,
'Email': user,
'Passwd': password,
'service': service,
'source': source
}
auth_request_data = urllib.urlencode(auth_request_data_map)
auth_url = 'https://www.google.com/accounts/ClientLogin'
auth_request = urllib2.Request(auth_url, auth_request_data)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
auth_response = http_opener.open(auth_request)
auth_response_body = auth_response.read()
# Parse the response data as a dictionary and return the 'Auth' key.
auth_response_data = _ParseBodyAsDict(auth_response_body)
return auth_response_data['Auth']
except urllib2.HTTPError as e:
# Check if the error was a 403 - Forbidden. In that case, forward the
# exception as an authentication error. Otherwise, just forward the
# exception.
if e.code == 403:
# Parse the error body as a dictionary and forward the exception as an
# authentication error.
response_dict = _ParseBodyAsDict(e.read())
raise AuthenticationError(auth_request.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetGaeCookie(host, service, auth_token, secure):
"""This function creates a login cookie using the authentication token
obtained after logging in successfully in the Google account.
Args:
host: Host where the user wants to login.
service: Service code where the user wants to login.
auth_token: Authentication token obtained from ClientLogin.
secure: True if we want a secure cookie, false if not.
Returns:
A cookie for the specifed service.
Raises:
urllib2.HTTPError: This exception is raised when the cookie cannot be
obtained and the user is redirected to another place.
"""
# Create a request for Google's service with the authentication token.
continue_location = 'http://localhost/'
cookie_request_data_map = {
'continue' : continue_location,
'auth' : auth_token,
}
cookie_request_data = urllib.urlencode(cookie_request_data_map)
cookie_url = '{protocol}://{host}/_{service}/login?{data}'.format(
protocol=('https' if secure else 'http'), host=host, service=service,
data=cookie_request_data)
cookie_request = urllib2.Request(cookie_url)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
cookie_response = http_opener.open(cookie_request)
except urllib2.HTTPError as e:
# Keep the error as the cookie response.
cookie_response = e
# Check that a redirection was made to the required continue location.
# Otherwise, return an HTTP error.
response_code = cookie_response.code
if (response_code != 302 or
cookie_response.info()['location'] != continue_location):
raise urllib2.HTTPError(cookie_request.get_full_url(), response_code,
cookie_response.msg, cookie_response.headers,
cookie_response.fp)
# Extract the cookie from the headers and remove 'HttpOnly' from it.
cookie = cookie_response.headers.get('Set-Cookie')
return cookie.replace('; HttpOnly', '')
def Login(host, account_type, user, password, service, source, secure):
"""Retrieve the authentication token and cookie from the specified service,
using the given user and password to authenticate.
Args:
host: Host where the user wants to login.
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
secure: True if we want a secure cookie, false if not.
Returns:
A tuple with the authentication token and a cookie for the specifed service.
"""
auth_token = _GetGoogleAuthtoken(account_type, user, password, service,
source)
cookie = _GetGaeCookie(host, service, auth_token, secure)
return auth_token, cookie
def _ParseCookieFields(cookie):
# Fields inside the cookie are separated by a semicolon, so split the cookie
# and process each token as a field.
cookie_fields = {}
for token in cookie.split(';'):
# Keys and values are separated by a single equal in the field, or they
# might be keys without values. In this case, use True as the field value.
equal_index = token.find('=')
if equal_index == -1:
field_name = token.strip()
field_value = True
else:
field_name = token[:equal_index].strip()
field_value = token[equal_index + 1:].strip()
cookie_fields[field_name] = field_value
return cookie_fields
def GetCookieExpirationTime(cookie):
"""Extract and return the expiration time in the cookie.
Args:
cookie: String with the cookie whose expiration time must be retrieved.
Returns:
A string with the cookie expiration time, or None if the expiration field\
was not found. The expiration time is returned in UTC.
"""
# Parse the cookie fields and look for an expiration field, and return None if
# the cookie has no expiration date.
cookie_fields = _ParseCookieFields(cookie)
return cookie_fields.get('expires')
def CookieHasExpired(cookie):
"""Checks whether the specified cookie expired or not.
Args:
cookie: String with the cookie information.
Returns:
True if the cookie has expired, false otherwise.
"""
# Get the cookie expiration time, if it is not found just assume the cookie
# has not expired yet.
expiration_time_string = GetCookieExpirationTime(cookie)
if expiration_time_string is None:
return False
# Parse the cookie expiration time and check if there are at least 5 minutes
# before expiration, otherwise the cookie might expire after this function
# exits but before the user action is complete.
expiration_time = datetime.strptime(expiration_time_string,
'%a, %d-%b-%Y %H:%M:%S %Z')
offset = time.altzone if time.daylight else time.timezone
today_gmt_time = datetime.today() + timedelta(seconds=offset)
time_left = expiration_time - today_gmt_time
return time_left < timedelta(minutes=5)
| |
import datetime as dt
import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.element import Image, Points, Dataset, Histogram
from holoviews.operation import histogram
from bokeh.models import DatetimeAxis, CategoricalColorMapper, LinearColorMapper
from .testplot import TestBokehPlot, bokeh_renderer
class TestSideHistogramPlot(TestBokehPlot):
def test_side_histogram_no_cmapper(self):
points = Points(np.random.rand(100, 2))
plot = bokeh_renderer.get_plot(points.hist())
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
self.assertTrue('color_mapper' not in main_plot.handles)
self.assertTrue('color_mapper' not in right_plot.handles)
def test_side_histogram_cmapper(self):
"""Assert histogram shares colormapper"""
x,y = np.mgrid[-50:51, -50:51] * 0.1
img = Image(np.sin(x**2+y**2), bounds=(-1,-1,1,1))
plot = bokeh_renderer.get_plot(img.hist())
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
self.assertIs(main_plot.handles['color_mapper'],
right_plot.handles['color_mapper'])
self.assertEqual(main_plot.handles['color_dim'], img.vdims[0])
def test_side_histogram_cmapper_weighted(self):
"""Assert weighted histograms share colormapper"""
x,y = np.mgrid[-50:51, -50:51] * 0.1
img = Image(np.sin(x**2+y**2), bounds=(-1,-1,1,1))
adjoint = img.hist(dimension=['x', 'y'], weight_dimension='z',
mean_weighted=True)
plot = bokeh_renderer.get_plot(adjoint)
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
top_plot = adjoint_plot.subplots['top']
self.assertIs(main_plot.handles['color_mapper'],
right_plot.handles['color_mapper'])
self.assertIs(main_plot.handles['color_mapper'],
top_plot.handles['color_mapper'])
self.assertEqual(main_plot.handles['color_dim'], img.vdims[0])
def test_histogram_datetime64_plot(self):
dates = np.array([dt.datetime(2017, 1, i) for i in range(1, 5)])
hist = histogram(Dataset(dates, 'Date'), num_bins=4)
plot = bokeh_renderer.get_plot(hist)
source = plot.handles['source']
print(source.data)
data = {
'top': np.array([
3.85802469e-18, 3.85802469e-18, 3.85802469e-18, 3.85802469e-18]),
'left': np.array([
'2017-01-01T00:00:00.000000', '2017-01-01T18:00:00.000000',
'2017-01-02T12:00:00.000000', '2017-01-03T06:00:00.000000'],
dtype='datetime64[us]'),
'right': np.array([
'2017-01-01T18:00:00.000000', '2017-01-02T12:00:00.000000',
'2017-01-03T06:00:00.000000', '2017-01-04T00:00:00.000000'],
dtype='datetime64[us]')
}
for k, v in data.items():
self.assertEqual(source.data[k], v)
xaxis = plot.handles['xaxis']
range_x = plot.handles['x_range']
self.assertIsInstance(xaxis, DatetimeAxis)
self.assertEqual(range_x.start, np.datetime64('2017-01-01T00:00:00.000000', 'us'))
self.assertEqual(range_x.end, np.datetime64('2017-01-04T00:00:00.000000', 'us'))
def test_histogram_padding_square(self):
points = Histogram([(1, 2), (2, -1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.19999999999999996)
self.assertEqual(x_range.end, 3.8)
self.assertEqual(y_range.start, -1.4)
self.assertEqual(y_range.end, 3.4)
def test_histogram_padding_square_positive(self):
points = Histogram([(1, 2), (2, 1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.19999999999999996)
self.assertEqual(x_range.end, 3.8)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_histogram_padding_square_negative(self):
points = Histogram([(1, -2), (2, -1), (3, -3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.19999999999999996)
self.assertEqual(x_range.end, 3.8)
self.assertEqual(y_range.start, -3.2)
self.assertEqual(y_range.end, 0)
def test_histogram_padding_nonsquare(self):
histogram = Histogram([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(histogram)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.35)
self.assertEqual(x_range.end, 3.65)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_histogram_padding_logx(self):
histogram = Histogram([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(histogram)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.41158562699652224)
self.assertEqual(x_range.end, 4.2518491541367327)
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_histogram_padding_logy(self):
histogram = Histogram([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(histogram)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.19999999999999996)
self.assertEqual(x_range.end, 3.8)
self.assertEqual(y_range.start, 0.033483695221017122)
self.assertEqual(y_range.end, 3.3483695221017129)
def test_histogram_padding_datetime_square(self):
histogram = Histogram([(np.datetime64('2016-04-0%d' % i, 'ns'), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = bokeh_renderer.get_plot(histogram)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T04:48:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T19:12:00.000000000'))
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_histogram_padding_datetime_nonsquare(self):
histogram = Histogram([(np.datetime64('2016-04-0%d' % i, 'ns'), i) for i in range(1, 4)]).options(
padding=0.1, width=600
)
plot = bokeh_renderer.get_plot(histogram)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T08:24:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T15:36:00.000000000'))
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
###########################
# Styling mapping #
###########################
def test_histogram_color_op(self):
histogram = Histogram([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'color'})
self.assertEqual(glyph.line_color, 'black')
def test_histogram_linear_color_op(self):
histogram = Histogram([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, LinearColorMapper)
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 2)
self.assertEqual(cds.data['color'], np.array([0, 1, 2]))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_histogram_categorical_color_op(self):
histogram = Histogram([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'C')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['A', 'B', 'C'])
self.assertEqual(cds.data['color'], np.array(['A', 'B', 'C']))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_histogram_line_color_op(self):
histogram = Histogram([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(line_color='color')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_color'], np.array(['#000', '#F00', '#0F0']))
self.assertNotEqual(glyph.fill_color, {'field': 'line_color'})
self.assertEqual(glyph.line_color, {'field': 'line_color'})
def test_histogram_fill_color_op(self):
histogram = Histogram([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(fill_color='color')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'fill_color'})
self.assertNotEqual(glyph.line_color, {'field': 'fill_color'})
def test_histogram_alpha_op(self):
histogram = Histogram([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(alpha='alpha')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.fill_alpha, {'field': 'alpha'})
def test_histogram_line_alpha_op(self):
histogram = Histogram([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(line_alpha='alpha')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.line_alpha, {'field': 'line_alpha'})
self.assertNotEqual(glyph.fill_alpha, {'field': 'line_alpha'})
def test_histogram_fill_alpha_op(self):
histogram = Histogram([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(fill_alpha='alpha')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_alpha'], np.array([0, 0.2, 0.7]))
self.assertNotEqual(glyph.line_alpha, {'field': 'fill_alpha'})
self.assertEqual(glyph.fill_alpha, {'field': 'fill_alpha'})
def test_histogram_line_width_op(self):
histogram = Histogram([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims=['y', 'line_width']).options(line_width='line_width')
plot = bokeh_renderer.get_plot(histogram)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_width'], np.array([1, 4, 8]))
self.assertEqual(glyph.line_width, {'field': 'line_width'})
def test_op_ndoverlay_value(self):
colors = ['blue', 'red']
overlay = NdOverlay({color: Histogram(np.arange(i+2)) for i, color in enumerate(colors)}, 'Color').options('Histogram', fill_color='Color')
plot = bokeh_renderer.get_plot(overlay)
for subplot, color in zip(plot.subplots.values(), colors):
self.assertEqual(subplot.handles['glyph'].fill_color, color)
| |
"""
This inline script utilizes harparser.HAR from
https://github.com/JustusW/harparser to generate a HAR log object.
"""
from harparser import HAR
from datetime import datetime
class _HARLog(HAR.log):
# The attributes need to be registered here for them to actually be
# available later via self. This is due to HAREncodable linking __getattr__
# to __getitem__. Anything that is set only in __init__ will just be added
# as key/value pair to self.__classes__.
__page_list__ = []
__page_count__ = 0
__page_ref__ = {}
def __init__(self, page_list):
self.__page_list__ = page_list
self.__page_count__ = 0
self.__page_ref__ = {}
HAR.log.__init__(self, {"version": "1.2",
"creator": {"name": "MITMPROXY HARExtractor",
"version": "0.1",
"comment": ""},
"pages": [],
"entries": []})
def reset(self):
self.__init__(self.__page_list__)
def add(self, obj):
if isinstance(obj, HAR.pages):
self['pages'].append(obj)
if isinstance(obj, HAR.entries):
self['entries'].append(obj)
def create_page_id(self):
self.__page_count__ += 1
return "autopage_%s" % str(self.__page_count__)
def set_page_ref(self, page, ref):
self.__page_ref__[page] = ref
def get_page_ref(self, page):
return self.__page_ref__.get(page, None)
def get_page_list(self):
return self.__page_list__
def start(context, argv):
"""
On start we create a HARLog instance. You will have to adapt this to
suit your actual needs of HAR generation. As it will probably be
necessary to cluster logs by IPs or reset them from time to time.
"""
context.dump_file = None
if len(argv) > 1:
context.dump_file = argv[1]
else:
raise ValueError(
'Usage: -s "har_extractor.py filename" '
'(- will output to stdout, filenames ending with .zhar '
'will result in compressed har)'
)
context.HARLog = _HARLog(['https://github.com'])
context.seen_server = set()
def response(context, flow):
"""
Called when a server response has been received. At the time of this
message both a request and a response are present and completely done.
"""
# Values are converted from float seconds to int milliseconds later.
ssl_time = -.001
connect_time = -.001
if flow.server_conn not in context.seen_server:
# Calculate the connect_time for this server_conn. Afterwards add it to
# seen list, in order to avoid the connect_time being present in entries
# that use an existing connection.
connect_time = flow.server_conn.timestamp_tcp_setup - \
flow.server_conn.timestamp_start
context.seen_server.add(flow.server_conn)
if flow.server_conn.timestamp_ssl_setup is not None:
# Get the ssl_time for this server_conn as the difference between
# the start of the successful tcp setup and the successful ssl
# setup. If no ssl setup has been made it is left as -1 since it
# doesn't apply to this connection.
ssl_time = flow.server_conn.timestamp_ssl_setup - \
flow.server_conn.timestamp_tcp_setup
# Calculate the raw timings from the different timestamps present in the
# request and response object. For lack of a way to measure it dns timings
# can not be calculated. The same goes for HAR blocked: MITMProxy will open
# a server connection as soon as it receives the host and port from the
# client connection. So the time spent waiting is actually spent waiting
# between request.timestamp_end and response.timestamp_start thus it
# correlates to HAR wait instead.
timings_raw = {
'send': flow.request.timestamp_end - flow.request.timestamp_start,
'wait': flow.response.timestamp_start - flow.request.timestamp_end,
'receive': flow.response.timestamp_end - flow.response.timestamp_start,
'connect': connect_time,
'ssl': ssl_time
}
# HAR timings are integers in ms, so we have to re-encode the raw timings to
# that format.
timings = dict([(key, int(1000 * value))
for key, value in timings_raw.iteritems()])
# The full_time is the sum of all timings. Timings set to -1 will be ignored
# as per spec.
full_time = 0
for item in timings.values():
if item > -1:
full_time += item
started_date_time = datetime.fromtimestamp(
flow.request.timestamp_start,
tz=utc).isoformat()
request_query_string = [{"name": k, "value": v}
for k, v in flow.request.query]
request_http_version = flow.request.http_version
# Cookies are shaped as tuples by MITMProxy.
request_cookies = [{"name": k.strip(), "value": v[0]}
for k, v in flow.request.cookies.items()]
request_headers = [{"name": k, "value": v} for k, v in flow.request.headers]
request_headers_size = len(str(flow.request.headers))
request_body_size = len(flow.request.content)
response_http_version = flow.response.http_version
# Cookies are shaped as tuples by MITMProxy.
response_cookies = [{"name": k.strip(), "value": v[0]}
for k, v in flow.response.cookies.items()]
response_headers = [{"name": k, "value": v}
for k, v in flow.response.headers]
response_headers_size = len(str(flow.response.headers))
response_body_size = len(flow.response.content)
response_body_decoded_size = len(flow.response.get_decoded_content())
response_body_compression = response_body_decoded_size - response_body_size
response_mime_type = flow.response.headers.get('Content-Type', '')
response_redirect_url = flow.response.headers.get('Location', '')
entry = HAR.entries(
{
"startedDateTime": started_date_time,
"time": full_time,
"request": {
"method": flow.request.method,
"url": flow.request.url,
"httpVersion": request_http_version,
"cookies": request_cookies,
"headers": request_headers,
"queryString": request_query_string,
"headersSize": request_headers_size,
"bodySize": request_body_size,
},
"response": {
"status": flow.response.status_code,
"statusText": flow.response.msg,
"httpVersion": response_http_version,
"cookies": response_cookies,
"headers": response_headers,
"content": {
"size": response_body_size,
"compression": response_body_compression,
"mimeType": response_mime_type},
"redirectURL": response_redirect_url,
"headersSize": response_headers_size,
"bodySize": response_body_size,
},
"cache": {},
"timings": timings,
})
# If the current url is in the page list of context.HARLog or does not have
# a referrer we add it as a new pages object.
if flow.request.url in context.HARLog.get_page_list() or flow.request.headers.get(
'Referer',
None) is None:
page_id = context.HARLog.create_page_id()
context.HARLog.add(
HAR.pages({
"startedDateTime": entry['startedDateTime'],
"id": page_id,
"title": flow.request.url,
})
)
context.HARLog.set_page_ref(flow.request.url, page_id)
entry['pageref'] = page_id
# Lookup the referer in the page_ref of context.HARLog to point this entries
# pageref attribute to the right pages object, then set it as a new
# reference to build a reference tree.
elif context.HARLog.get_page_ref(flow.request.headers.get('Referer')) is not None:
entry['pageref'] = context.HARLog.get_page_ref(
flow.request.headers['Referer']
)
context.HARLog.set_page_ref(
flow.request.headers['Referer'], entry['pageref']
)
context.HARLog.add(entry)
def done(context):
"""
Called once on script shutdown, after any other events.
"""
from pprint import pprint
import json
json_dump = context.HARLog.json()
compressed_json_dump = context.HARLog.compress()
if context.dump_file == '-':
context.log(pprint.pformat(json.loads(json_dump)))
elif context.dump_file.endswith('.zhar'):
file(context.dump_file, "w").write(compressed_json_dump)
else:
file(context.dump_file, "w").write(json_dump)
context.log(
"HAR log finished with %s bytes (%s bytes compressed)" % (
len(json_dump), len(compressed_json_dump)
)
)
context.log(
"Compression rate is %s%%" % str(
100. * len(compressed_json_dump) / len(json_dump)
)
)
def print_attributes(obj, filter_string=None, hide_privates=False):
"""
Useful helper method to quickly get all attributes of an object and its
values.
"""
for attr in dir(obj):
if hide_privates and "__" in attr:
continue
if filter_string is not None and filter_string not in attr:
continue
value = getattr(obj, attr)
print("%s.%s" % ('obj', attr), value, type(value))
| |
#
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti utility module.
This module holds functions that can be used in both daemons (all) and
the command line scripts.
"""
# Allow wildcard import in pylint: disable=W0401
import os
import re
import errno
import pwd
import time
import itertools
import select
import logging
import signal
from ganeti import errors
from ganeti import constants
from ganeti import compat
from ganeti import pathutils
from ganeti.utils.algo import *
from ganeti.utils.filelock import *
from ganeti.utils.hash import *
from ganeti.utils.io import *
from ganeti.utils.livelock import *
from ganeti.utils.log import *
from ganeti.utils.lvm import *
from ganeti.utils.mlock import *
from ganeti.utils.nodesetup import *
from ganeti.utils.process import *
from ganeti.utils.retry import *
from ganeti.utils.security import *
from ganeti.utils.storage import *
from ganeti.utils.tags import *
from ganeti.utils.text import *
from ganeti.utils.wrapper import *
from ganeti.utils.version import *
from ganeti.utils.x509 import *
from ganeti.utils.bitarrays import *
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
UUID_RE = re.compile(constants.UUID_REGEX)
def ForceDictType(target, key_types, allowed_values=None):
"""Force the values of a dict to have certain types.
@type target: dict
@param target: the dict to update
@type key_types: dict
@param key_types: dict mapping target dict keys to types
in constants.ENFORCEABLE_TYPES
@type allowed_values: list
@keyword allowed_values: list of specially allowed values
"""
if allowed_values is None:
allowed_values = []
if not isinstance(target, dict):
msg = "Expected dictionary, got '%s'" % target
raise errors.TypeEnforcementError(msg)
for key in target:
if key not in key_types:
msg = "Unknown parameter '%s'" % key
raise errors.TypeEnforcementError(msg)
if target[key] in allowed_values:
continue
ktype = key_types[key]
if ktype not in constants.ENFORCEABLE_TYPES:
msg = "'%s' has non-enforceable type %s" % (key, ktype)
raise errors.ProgrammerError(msg)
if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
pass
elif not isinstance(target[key], basestring):
if isinstance(target[key], bool) and not target[key]:
target[key] = ""
else:
msg = "'%s' (value %s) is not a valid string" % (key, target[key])
raise errors.TypeEnforcementError(msg)
elif ktype == constants.VTYPE_BOOL:
if isinstance(target[key], basestring) and target[key]:
if target[key].lower() == constants.VALUE_FALSE:
target[key] = False
elif target[key].lower() == constants.VALUE_TRUE:
target[key] = True
else:
msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
raise errors.TypeEnforcementError(msg)
elif target[key]:
target[key] = True
else:
target[key] = False
elif ktype == constants.VTYPE_SIZE:
try:
target[key] = ParseUnit(target[key])
except errors.UnitParseError, err:
msg = "'%s' (value %s) is not a valid size. error: %s" % \
(key, target[key], err)
raise errors.TypeEnforcementError(msg)
elif ktype == constants.VTYPE_INT:
try:
target[key] = int(target[key])
except (ValueError, TypeError):
msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
raise errors.TypeEnforcementError(msg)
elif ktype == constants.VTYPE_FLOAT:
try:
target[key] = float(target[key])
except (ValueError, TypeError):
msg = "'%s' (value %s) is not a valid float" % (key, target[key])
raise errors.TypeEnforcementError(msg)
def ValidateServiceName(name):
"""Validate the given service name.
@type name: number or string
@param name: Service name or port specification
"""
try:
numport = int(name)
except (ValueError, TypeError):
# Non-numeric service name
valid = _VALID_SERVICE_NAME_RE.match(name)
else:
# Numeric port (protocols other than TCP or UDP might need adjustments
# here)
valid = (numport >= 0 and numport < (1 << 16))
if not valid:
raise errors.OpPrereqError("Invalid service name '%s'" % name,
errors.ECODE_INVAL)
return name
def _ComputeMissingKeys(key_path, options, defaults):
"""Helper functions to compute which keys a invalid.
@param key_path: The current key path (if any)
@param options: The user provided options
@param defaults: The default dictionary
@return: A list of invalid keys
"""
defaults_keys = frozenset(defaults.keys())
invalid = []
for key, value in options.items():
if key_path:
new_path = "%s/%s" % (key_path, key)
else:
new_path = key
if key not in defaults_keys:
invalid.append(new_path)
elif isinstance(value, dict):
invalid.extend(_ComputeMissingKeys(new_path, value, defaults[key]))
return invalid
def VerifyDictOptions(options, defaults):
"""Verify a dict has only keys set which also are in the defaults dict.
@param options: The user provided options
@param defaults: The default dictionary
@raise error.OpPrereqError: If one of the keys is not supported
"""
invalid = _ComputeMissingKeys("", options, defaults)
if invalid:
raise errors.OpPrereqError("Provided option keys not supported: %s" %
CommaJoin(invalid), errors.ECODE_INVAL)
def ListVolumeGroups():
"""List volume groups and their size
@rtype: dict
@return:
Dictionary with keys volume name and values
the size of the volume
"""
command = "vgs --noheadings --units m --nosuffix -o name,size"
result = RunCmd(command)
retval = {}
if result.failed:
return retval
for line in result.stdout.splitlines():
try:
name, size = line.split()
size = int(float(size))
except (IndexError, ValueError), err:
logging.error("Invalid output from vgs (%s): %s", err, line)
continue
retval[name] = size
return retval
def BridgeExists(bridge):
"""Check whether the given bridge exists in the system
@type bridge: str
@param bridge: the bridge name to check
@rtype: boolean
@return: True if it does
"""
return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
def TryConvert(fn, val):
"""Try to convert a value ignoring errors.
This function tries to apply function I{fn} to I{val}. If no
C{ValueError} or C{TypeError} exceptions are raised, it will return
the result, else it will return the original value. Any other
exceptions are propagated to the caller.
@type fn: callable
@param fn: function to apply to the value
@param val: the value to be converted
@return: The converted value if the conversion was successful,
otherwise the original value.
"""
try:
nv = fn(val)
except (ValueError, TypeError):
nv = val
return nv
def ParseCpuMask(cpu_mask):
"""Parse a CPU mask definition and return the list of CPU IDs.
CPU mask format: comma-separated list of CPU IDs
or dash-separated ID ranges
Example: "0-2,5" -> "0,1,2,5"
@type cpu_mask: str
@param cpu_mask: CPU mask definition
@rtype: list of int
@return: list of CPU IDs
"""
if not cpu_mask:
return []
cpu_list = []
for range_def in cpu_mask.split(","):
boundaries = range_def.split("-")
n_elements = len(boundaries)
if n_elements > 2:
raise errors.ParseError("Invalid CPU ID range definition"
" (only one hyphen allowed): %s" % range_def)
try:
lower = int(boundaries[0])
except (ValueError, TypeError), err:
raise errors.ParseError("Invalid CPU ID value for lower boundary of"
" CPU ID range: %s" % str(err))
try:
higher = int(boundaries[-1])
except (ValueError, TypeError), err:
raise errors.ParseError("Invalid CPU ID value for higher boundary of"
" CPU ID range: %s" % str(err))
if lower > higher:
raise errors.ParseError("Invalid CPU ID range definition"
" (%d > %d): %s" % (lower, higher, range_def))
cpu_list.extend(range(lower, higher + 1))
return cpu_list
def ParseMultiCpuMask(cpu_mask):
"""Parse a multiple CPU mask definition and return the list of CPU IDs.
CPU mask format: colon-separated list of comma-separated list of CPU IDs
or dash-separated ID ranges, with optional "all" as CPU value
Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
@type cpu_mask: str
@param cpu_mask: multiple CPU mask definition
@rtype: list of lists of int
@return: list of lists of CPU IDs
"""
if not cpu_mask:
return []
cpu_list = []
for range_def in cpu_mask.split(constants.CPU_PINNING_SEP):
if range_def == constants.CPU_PINNING_ALL:
cpu_list.append([constants.CPU_PINNING_ALL_VAL, ])
else:
# Uniquify and sort the list before adding
cpu_list.append(sorted(set(ParseCpuMask(range_def))))
return cpu_list
def GetHomeDir(user, default=None):
"""Try to get the homedir of the given user.
The user can be passed either as a string (denoting the name) or as
an integer (denoting the user id). If the user is not found, the
C{default} argument is returned, which defaults to C{None}.
"""
try:
if isinstance(user, basestring):
result = pwd.getpwnam(user)
elif isinstance(user, (int, long)):
result = pwd.getpwuid(user)
else:
raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
type(user))
except KeyError:
return default
return result.pw_dir
def FirstFree(seq, base=0):
"""Returns the first non-existing integer from seq.
The seq argument should be a sorted list of positive integers. The
first time the index of an element is smaller than the element
value, the index will be returned.
The base argument is used to start at a different offset,
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
Example: C{[0, 1, 3]} will return I{2}.
@type seq: sequence
@param seq: the sequence to be analyzed.
@type base: int
@param base: use this value as the base index of the sequence
@rtype: int
@return: the first non-used index in the sequence
"""
for idx, elem in enumerate(seq):
assert elem >= base, "Passed element is higher than base offset"
if elem > idx + base:
# idx is not used
return idx + base
return None
def SingleWaitForFdCondition(fdobj, event, timeout):
"""Waits for a condition to occur on the socket.
Immediately returns at the first interruption.
@type fdobj: integer or object supporting a fileno() method
@param fdobj: entity to wait for events on
@type event: integer
@param event: ORed condition (see select module)
@type timeout: float or None
@param timeout: Timeout in seconds
@rtype: int or None
@return: None for timeout, otherwise occured conditions
"""
check = (event | select.POLLPRI |
select.POLLNVAL | select.POLLHUP | select.POLLERR)
if timeout is not None:
# Poller object expects milliseconds
timeout *= 1000
poller = select.poll()
poller.register(fdobj, event)
try:
# TODO: If the main thread receives a signal and we have no timeout, we
# could wait forever. This should check a global "quit" flag or something
# every so often.
io_events = poller.poll(timeout)
except select.error, err:
if err[0] != errno.EINTR:
raise
io_events = []
if io_events and io_events[0][1] & check:
return io_events[0][1]
else:
return None
class FdConditionWaiterHelper(object):
"""Retry helper for WaitForFdCondition.
This class contains the retried and wait functions that make sure
WaitForFdCondition can continue waiting until the timeout is actually
expired.
"""
def __init__(self, timeout):
self.timeout = timeout
def Poll(self, fdobj, event):
result = SingleWaitForFdCondition(fdobj, event, self.timeout)
if result is None:
raise RetryAgain()
else:
return result
def UpdateTimeout(self, timeout):
self.timeout = timeout
def WaitForFdCondition(fdobj, event, timeout):
"""Waits for a condition to occur on the socket.
Retries until the timeout is expired, even if interrupted.
@type fdobj: integer or object supporting a fileno() method
@param fdobj: entity to wait for events on
@type event: integer
@param event: ORed condition (see select module)
@type timeout: float or None
@param timeout: Timeout in seconds
@rtype: int or None
@return: None for timeout, otherwise occured conditions
"""
if timeout is not None:
retrywaiter = FdConditionWaiterHelper(timeout)
try:
result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
except RetryTimeout:
result = None
else:
result = None
while result is None:
result = SingleWaitForFdCondition(fdobj, event, timeout)
return result
def EnsureDaemon(name):
"""Check for and start daemon if not alive.
@type name: string
@param name: daemon name
@rtype: bool
@return: 'True' if daemon successfully started,
'False' otherwise
"""
result = RunCmd([pathutils.DAEMON_UTIL, "check-and-start", name])
if result.failed:
logging.error("Can't start daemon '%s', failure %s, output: %s",
name, result.fail_reason, result.output)
return False
return True
def StopDaemon(name):
"""Stop daemon
@type name: string
@param name: daemon name
@rtype: bool
@return: 'True' if daemon successfully stopped,
'False' otherwise
"""
result = RunCmd([pathutils.DAEMON_UTIL, "stop", name])
if result.failed:
logging.error("Can't stop daemon '%s', failure %s, output: %s",
name, result.fail_reason, result.output)
return False
return True
def SplitTime(value):
"""Splits time as floating point number into a tuple.
@param value: Time in seconds
@type value: int or float
@return: Tuple containing (seconds, microseconds)
"""
(seconds, microseconds) = divmod(int(value * 1000000), 1000000)
# pylint: disable=C0122
assert 0 <= seconds, \
"Seconds must be larger than or equal to 0, but are %s" % seconds
assert 0 <= microseconds <= 999999, \
"Microseconds must be 0-999999, but are %s" % microseconds
return (int(seconds), int(microseconds))
def MergeTime(timetuple):
"""Merges a tuple into time as a floating point number.
@param timetuple: Time as tuple, (seconds, microseconds)
@type timetuple: tuple
@return: Time as a floating point number expressed in seconds
"""
(seconds, microseconds) = timetuple
# pylint: disable=C0122
assert 0 <= seconds, \
"Seconds must be larger than or equal to 0, but are %s" % seconds
assert 0 <= microseconds <= 999999, \
"Microseconds must be 0-999999, but are %s" % microseconds
return float(seconds) + (float(microseconds) * 0.000001)
def EpochNano():
"""Return the current timestamp expressed as number of nanoseconds since the
unix epoch
@return: nanoseconds since the Unix epoch
"""
return int(time.time() * 1000000000)
def FindMatch(data, name):
"""Tries to find an item in a dictionary matching a name.
Callers have to ensure the data names aren't contradictory (e.g. a regexp
that matches a string). If the name isn't a direct key, all regular
expression objects in the dictionary are matched against it.
@type data: dict
@param data: Dictionary containing data
@type name: string
@param name: Name to look for
@rtype: tuple; (value in dictionary, matched groups as list)
"""
if name in data:
return (data[name], [])
for key, value in data.items():
# Regex objects
if hasattr(key, "match"):
m = key.match(name)
if m:
return (value, list(m.groups()))
return None
def GetMounts(filename=constants.PROC_MOUNTS):
"""Returns the list of mounted filesystems.
This function is Linux-specific.
@param filename: path of mounts file (/proc/mounts by default)
@rtype: list of tuples
@return: list of mount entries (device, mountpoint, fstype, options)
"""
# TODO(iustin): investigate non-Linux options (e.g. via mount output)
data = []
mountlines = ReadFile(filename).splitlines()
for line in mountlines:
device, mountpoint, fstype, options, _ = line.split(None, 4)
data.append((device, mountpoint, fstype, options))
return data
def SignalHandled(signums):
"""Signal Handled decoration.
This special decorator installs a signal handler and then calls the target
function. The function must accept a 'signal_handlers' keyword argument,
which will contain a dict indexed by signal number, with SignalHandler
objects as values.
The decorator can be safely stacked with iself, to handle multiple signals
with different handlers.
@type signums: list
@param signums: signals to intercept
"""
def wrap(fn):
def sig_function(*args, **kwargs):
assert "signal_handlers" not in kwargs or \
kwargs["signal_handlers"] is None or \
isinstance(kwargs["signal_handlers"], dict), \
"Wrong signal_handlers parameter in original function call"
if "signal_handlers" in kwargs and kwargs["signal_handlers"] is not None:
signal_handlers = kwargs["signal_handlers"]
else:
signal_handlers = {}
kwargs["signal_handlers"] = signal_handlers
sighandler = SignalHandler(signums)
try:
for sig in signums:
signal_handlers[sig] = sighandler
return fn(*args, **kwargs)
finally:
sighandler.Reset()
return sig_function
return wrap
def TimeoutExpired(epoch, timeout, _time_fn=time.time):
"""Checks whether a timeout has expired.
"""
return _time_fn() > (epoch + timeout)
class SignalWakeupFd(object):
try:
# This is only supported in Python 2.5 and above (some distributions
# backported it to Python 2.4)
_set_wakeup_fd_fn = signal.set_wakeup_fd
except AttributeError:
# Not supported
def _SetWakeupFd(self, _): # pylint: disable=R0201
return -1
else:
def _SetWakeupFd(self, fd):
return self._set_wakeup_fd_fn(fd)
def __init__(self):
"""Initializes this class.
"""
(read_fd, write_fd) = os.pipe()
# Once these succeeded, the file descriptors will be closed automatically.
# Buffer size 0 is important, otherwise .read() with a specified length
# might buffer data and the file descriptors won't be marked readable.
self._read_fh = os.fdopen(read_fd, "r", 0)
self._write_fh = os.fdopen(write_fd, "w", 0)
self._previous = self._SetWakeupFd(self._write_fh.fileno())
# Utility functions
self.fileno = self._read_fh.fileno
self.read = self._read_fh.read
def Reset(self):
"""Restores the previous wakeup file descriptor.
"""
if hasattr(self, "_previous") and self._previous is not None:
self._SetWakeupFd(self._previous)
self._previous = None
def Notify(self):
"""Notifies the wakeup file descriptor.
"""
self._write_fh.write(chr(0))
def __del__(self):
"""Called before object deletion.
"""
self.Reset()
class SignalHandler(object):
"""Generic signal handler class.
It automatically restores the original handler when deconstructed or
when L{Reset} is called. You can either pass your own handler
function in or query the L{called} attribute to detect whether the
signal was sent.
@type signum: list
@ivar signum: the signals we handle
@type called: boolean
@ivar called: tracks whether any of the signals have been raised
"""
def __init__(self, signum, handler_fn=None, wakeup=None):
"""Constructs a new SignalHandler instance.
@type signum: int or list of ints
@param signum: Single signal number or set of signal numbers
@type handler_fn: callable
@param handler_fn: Signal handling function
"""
assert handler_fn is None or callable(handler_fn)
self.signum = set(signum)
self.called = False
self._handler_fn = handler_fn
self._wakeup = wakeup
self._previous = {}
try:
for signum in self.signum:
# Setup handler
prev_handler = signal.signal(signum, self._HandleSignal)
try:
self._previous[signum] = prev_handler
except:
# Restore previous handler
signal.signal(signum, prev_handler)
raise
except:
# Reset all handlers
self.Reset()
# Here we have a race condition: a handler may have already been called,
# but there's not much we can do about it at this point.
raise
def __del__(self):
self.Reset()
def Reset(self):
"""Restore previous handler.
This will reset all the signals to their previous handlers.
"""
for signum, prev_handler in self._previous.items():
signal.signal(signum, prev_handler)
# If successful, remove from dict
del self._previous[signum]
def Clear(self):
"""Unsets the L{called} flag.
This function can be used in case a signal may arrive several times.
"""
self.called = False
def _HandleSignal(self, signum, frame):
"""Actual signal handling function.
"""
# This is not nice and not absolutely atomic, but it appears to be the only
# solution in Python -- there are no atomic types.
self.called = True
if self._wakeup:
# Notify whoever is interested in signals
self._wakeup.Notify()
if self._handler_fn:
self._handler_fn(signum, frame)
class FieldSet(object):
"""A simple field set.
Among the features are:
- checking if a string is among a list of static string or regex objects
- checking if a whole list of string matches
- returning the matching groups from a regex match
Internally, all fields are held as regular expression objects.
"""
def __init__(self, *items):
self.items = [re.compile("^%s$" % value) for value in items]
def Extend(self, other_set):
"""Extend the field set with the items from another one"""
self.items.extend(other_set.items)
def Matches(self, field):
"""Checks if a field matches the current set
@type field: str
@param field: the string to match
@return: either None or a regular expression match object
"""
for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
return m
return None
def NonMatching(self, items):
"""Returns the list of fields not matching the current set
@type items: list
@param items: the list of fields to check
@rtype: list
@return: list of non-matching fields
"""
return [val for val in items if not self.Matches(val)]
def ValidateDeviceNames(kind, container):
"""Validate instance device names.
Check that a device container contains only unique and valid names.
@type kind: string
@param kind: One-word item description
@type container: list
@param container: Container containing the devices
"""
valid = []
for device in container:
if isinstance(device, dict):
if kind == "NIC":
name = device.get(constants.INIC_NAME, None)
elif kind == "disk":
name = device.get(constants.IDISK_NAME, None)
else:
raise errors.OpPrereqError("Invalid container kind '%s'" % kind,
errors.ECODE_INVAL)
else:
name = device.name
# Check that a device name is not the UUID of another device
valid.append(device.uuid)
try:
int(name)
except (ValueError, TypeError):
pass
else:
raise errors.OpPrereqError("Invalid name '%s'. Purely numeric %s names"
" are not allowed" % (name, kind),
errors.ECODE_INVAL)
if name is not None and name.lower() != constants.VALUE_NONE:
if name in valid:
raise errors.OpPrereqError("%s name '%s' already used" % (kind, name),
errors.ECODE_NOTUNIQUE)
else:
valid.append(name)
def AllDiskOfType(disks_info, dev_types):
"""Checks if the instance has only disks of any of the dev_types.
@type disks_info: list of L{Disk}
@param disks_info: all the disks of the instance.
@type dev_types: list of disk templates
@param dev_types: the disk type required.
@rtype: bool
@return: True iff the instance only has disks of type dev_type.
"""
assert not isinstance(dev_types, str)
if not disks_info and constants.DT_DISKLESS not in dev_types:
return False
for disk in disks_info:
if disk.dev_type not in dev_types:
return False
return True
def AnyDiskOfType(disks_info, dev_types):
"""Checks if the instance has some disks of any types in dev_types.
@type disks_info: list of L{Disk}
@param disks_info: all the disks of the instance.
@type dev_types: list of disk template
@param dev_types: the disk type required.
@rtype: bool
@return: True if the instance has disks of type dev_types or the instance has
no disks and the dev_types allow DT_DISKLESS.
"""
assert not isinstance(dev_types, str)
if not disks_info and constants.DT_DISKLESS in dev_types:
return True
for disk in disks_info:
if disk.dev_type in dev_types:
return True
return False
def GetDiskTemplateString(disk_types):
"""Gives a summary disk template from disk devtypes.
@type disk_types: list of string
@param disk_types: all the dev_types of the instance.
@rtype disk template
@returns the summarized disk template of the disk types.
"""
disk_types = set(dev_type for dev_type in disk_types)
if not disk_types:
return constants.DT_DISKLESS
elif len(disk_types) > 1:
return constants.DT_MIXED
else:
return disk_types.pop()
def GetDiskTemplate(disks_info):
"""Gives a summary disk template from disks.
@type disks_info: list of L{Disk}
@param disks_info: all the disks of the instance.
@rtype disk template
@returns the summarized disk template of the disk types.
"""
return GetDiskTemplateString(d.dev_type for d in disks_info)
| |
from panda3d.core import *
from panda3d.direct import *
from otp.otpbase import OTPGlobals
from direct.gui.DirectGui import *
from otp.otpgui import OTPDialog
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from direct.task.Task import Task
class GuiScreen:
notify = DirectNotifyGlobal.directNotify.newCategory('GuiScreen')
DGG.ENTERPRESS_ADVANCE = 0
DGG.ENTERPRESS_ADVANCE_IFNOTEMPTY = 1
DGG.ENTERPRESS_DONT_ADVANCE = 2
DGG.ENTERPRESS_REMOVE_FOCUS = 3
ENTRY_WIDTH = 20
def __init__(self):
self.waitingForDatabase = None
self.focusIndex = None
self.suppressClickSound = 0
return
def startFocusMgmt(self, startFocus = 0, enterPressBehavior = DGG.ENTERPRESS_ADVANCE_IFNOTEMPTY, overrides = {}, globalFocusHandler = None):
GuiScreen.notify.debug('startFocusMgmt:\nstartFocus=%s,\nenterPressBehavior=%s\noverrides=%s' % (startFocus, enterPressBehavior, overrides))
self.accept('tab', self.__handleTab)
self.accept('shift-tab', self.__handleShiftTab)
self.accept('enter', self.__handleEnter)
self.__startFrameStartTask()
self.userGlobalFocusHandler = globalFocusHandler
self.focusHandlerAbsorbCounts = {}
for i in xrange(len(self.focusList)):
item = self.focusList[i]
if isinstance(item, DirectEntry):
self.focusHandlerAbsorbCounts[item] = 0
self.userFocusHandlers = {}
self.userCommandHandlers = {}
for i in xrange(len(self.focusList)):
item = self.focusList[i]
if isinstance(item, DirectEntry):
self.userFocusHandlers[item] = (item['focusInCommand'], item['focusInExtraArgs'])
item['focusInCommand'] = self.__handleFocusChangeAbsorb
item['focusInExtraArgs'] = [i]
self.userCommandHandlers[item] = (item['command'], item['extraArgs'])
item['command'] = None
item['extraArgs'] = []
elif isinstance(item, DirectScrolledList):
self.userCommandHandlers[item] = (item['command'], item['extraArgs'])
item['command'] = self.__handleDirectScrolledListCommand
item['extraArgs'] = [i]
self.enterPressHandlers = {}
for i in xrange(len(self.focusList)):
item = self.focusList[i]
behavior = enterPressBehavior
if overrides.has_key(item):
behavior = overrides[item]
if callable(behavior):
self.enterPressHandlers[item] = behavior
else:
if not isinstance(item, DirectEntry) and behavior == GuiScreen_ENTERPRESS_ADVANCE_IFNOTEMPTY:
behavior = GuiScreen_ENTERPRESS_ADVANCE
commandHandlers = (self.__alwaysAdvanceFocus,
self.__advanceFocusIfNotEmpty,
self.__neverAdvanceFocus,
self.__ignoreEnterPress)
self.enterPressHandlers[item] = commandHandlers[behavior]
self.setFocus(startFocus)
return
def focusMgmtActive(self):
return self.focusIndex != None
def stopFocusMgmt(self):
GuiScreen.notify.debug('stopFocusMgmt')
if not self.focusMgmtActive():
return
self.ignore('tab')
self.ignore('shift-tab')
self.ignore('enter')
self.__stopFrameStartTask()
self.userGlobalFocusHandler = None
self.focusIndex = None
self.focusHandlerAbsorbCounts = {}
for item in self.focusList:
if isinstance(item, DirectEntry):
userHandler, userHandlerArgs = self.userFocusHandlers[item]
item['focusInCommand'] = userHandler
item['focusInExtraArgs'] = userHandlerArgs
userHandler, userHandlerArgs = self.userCommandHandlers[item]
item['command'] = userHandler
item['extraArgs'] = userHandlerArgs
elif isinstance(item, DirectScrolledList):
userHandler, userHandlerArgs = self.userCommandHandlers[item]
item['command'] = userHandler
item['extraArgs'] = userHandlerArgs
self.userFocusHandlers = {}
self.userCommandHandlers = {}
self.enterPressHandlers = {}
return
def setFocus(self, arg, suppressSound = 1):
if type(arg) == type(0):
index = arg
else:
index = self.focusList.index(arg)
if suppressSound:
self.suppressClickSound += 1
self.__setFocusIndex(index)
def advanceFocus(self, condition = 1):
index = self.getFocusIndex()
if condition:
index += 1
self.setFocus(index, suppressSound=0)
def getFocusIndex(self):
if not self.focusMgmtActive():
return None
return self.focusIndex
def getFocusItem(self):
if not self.focusMgmtActive():
return None
return self.focusList[self.focusIndex]
def removeFocus(self):
focusItem = self.getFocusItem()
if isinstance(focusItem, DirectEntry):
focusItem['focus'] = 0
if self.userGlobalFocusHandler:
self.userGlobalFocusHandler(None)
return
def restoreFocus(self):
self.setFocus(self.getFocusItem())
def __setFocusIndex(self, index):
focusIndex = index % len(self.focusList)
focusItem = self.focusList[focusIndex]
if isinstance(focusItem, DirectEntry):
focusItem['focus'] = 1
self.focusHandlerAbsorbCounts[focusItem] += 1
self.__handleFocusChange(focusIndex)
def __chainToUserCommandHandler(self, item):
userHandler, userHandlerArgs = self.userCommandHandlers[item]
if userHandler:
if isinstance(item, DirectEntry):
enteredText = item.get()
apply(userHandler, [enteredText] + userHandlerArgs)
elif isinstance(item, DirectScrolledList):
apply(userHandler, userHandlerArgs)
def __chainToUserFocusHandler(self, item):
if isinstance(item, DirectEntry):
userHandler, userHandlerArgs = self.userFocusHandlers[item]
if userHandler:
apply(userHandler, userHandlerArgs)
def __handleTab(self):
self.tabPressed = 1
self.focusDirection = 1
self.__setFocusIndex(self.getFocusIndex() + self.focusDirection)
def __handleShiftTab(self):
self.tabPressed = 1
self.focusDirection = -1
self.__setFocusIndex(self.getFocusIndex() + self.focusDirection)
def __handleFocusChangeAbsorb(self, index):
item = self.focusList[index]
if self.focusHandlerAbsorbCounts[item] > 0:
self.focusHandlerAbsorbCounts[item] -= 1
else:
self.__handleFocusChange(index)
def playFocusChangeSound(self):
base.playSfx(DGG.getDefaultClickSound())
def __handleFocusChange(self, index):
if index != self.focusIndex:
self.removeFocus()
self.__focusChangedThisFrame = 1
if hasattr(self, 'tabPressed'):
del self.tabPressed
else:
self.focusDirection = 1
self.focusIndex = index
if self.suppressClickSound > 0:
self.suppressClickSound -= 1
else:
self.playFocusChangeSound()
focusItem = self.getFocusItem()
if self.userGlobalFocusHandler:
self.userGlobalFocusHandler(focusItem)
if self.getFocusItem() != focusItem:
GuiScreen.notify.debug('focus changed by global focus handler')
if self.focusMgmtActive():
self.__chainToUserFocusHandler(focusItem)
def __startFrameStartTask(self):
self.__focusChangedThisFrame = 0
self.frameStartTaskName = 'GuiScreenFrameStart'
taskMgr.add(self.__handleFrameStart, self.frameStartTaskName, -100)
def __stopFrameStartTask(self):
taskMgr.remove(self.frameStartTaskName)
del self.frameStartTaskName
del self.__focusChangedThisFrame
def __handleFrameStart(self, task):
self.__focusChangedThisFrame = 0
return Task.cont
def __handleDirectScrolledListCommand(self, index):
self.__chainToUserCommandHandler(self.focusList[index])
self.setFocus(index, suppressSound=self.getFocusIndex() == index)
def __handleEnter(self):
if self.__focusChangedThisFrame:
return
focusItem = self.getFocusItem()
if isinstance(focusItem, DirectEntry):
self.__chainToUserCommandHandler(focusItem)
if self.focusMgmtActive() and focusItem == self.getFocusItem():
self.enterPressHandlers[focusItem]()
def __alwaysAdvanceFocus(self):
self.advanceFocus()
def __advanceFocusIfNotEmpty(self):
focusItem = self.getFocusItem()
enteredText = focusItem.get()
if enteredText != '':
self.advanceFocus()
else:
self.setFocus(self.getFocusIndex())
def __neverAdvanceFocus(self):
self.setFocus(self.getFocusIndex())
def __ignoreEnterPress(self):
pass
def waitForDatabaseTimeout(self, requestName = 'unknown'):
GuiScreen.notify.debug('waiting for database timeout %s at %s' % (requestName, globalClock.getFrameTime()))
globalClock.tick()
taskMgr.doMethodLater(OTPGlobals.DatabaseDialogTimeout, self.__showWaitingForDatabase, 'waitingForDatabase', extraArgs=[requestName])
def __showWaitingForDatabase(self, requestName):
GuiScreen.notify.info('timed out waiting for %s at %s' % (requestName, globalClock.getFrameTime()))
dialogClass = OTPGlobals.getDialogClass()
self.waitingForDatabase = dialogClass(text=OTPLocalizer.GuiScreenToontownUnavailable, dialogName='WaitingForDatabase', buttonTextList=[OTPLocalizer.GuiScreenCancel], style=OTPDialog.Acknowledge, command=self.__handleCancelWaiting)
self.waitingForDatabase.show()
taskMgr.doMethodLater(OTPGlobals.DatabaseGiveupTimeout, self.__giveUpWaitingForDatabase, 'waitingForDatabase', extraArgs=[requestName])
return Task.done
def __giveUpWaitingForDatabase(self, requestName):
GuiScreen.notify.info('giving up waiting for %s at %s' % (requestName, globalClock.getFrameTime()))
self.cleanupWaitingForDatabase()
messenger.send(self.doneEvent, [{'mode': 'failure'}])
return Task.done
def cleanupWaitingForDatabase(self):
if self.waitingForDatabase != None:
self.waitingForDatabase.cleanup()
self.waitingForDatabase = None
taskMgr.remove('waitingForDatabase')
return
def __handleCancelWaiting(self, value):
self.cleanupWaitingForDatabase()
messenger.send(self.doneEvent, [{'mode': 'quit'}])
| |
"""
Find out if Numeric of numarray is going to be used as its array package
WARNING: PyGSL has used Numeric as its core. This is to test to see
if we can support both, but sooner or later the favour has to be given
to one of these packages.
When imported this module:
1.) Looks on the command line if it can find a flag of type
--array-object=[Numeric|nummarray|numpy]
2.) Tries to import these modules.
3.) Tries to use the preferred one.
4.) Or goes with the other one found
"""
import re
import string
import sys
import os
from distutils.errors import DistutilsModuleError
packagedir = os.path.dirname(os.path.abspath(__file__))
includedir = os.path.join(packagedir, "Include", "pygsl")
pygsldir = os.path.join(packagedir, "pygsl")
gsl_dist = os.path.join(packagedir, "gsl_dist")
def extractpattern():
"""
Try to find if the array object was specified at the command line
"""
array_pattern = re.compile("--array-object=(.+)")
pos=0
array_preference = None
result = ""
while pos<len(sys.argv):
match=array_pattern.match(sys.argv[pos])
if match:
result=match.group(1)
result.strip()
sys.argv[pos:pos+1]=[]
break
pos+=1
return result
def switchpreference(array_preference):
"""
Find out if the set preference can be used ...
"""
have_numeric = 0
have_numarray = 0
have_numpy = 0
use_numeric = 0
use_numarray = 0
use_numpy = 0
try:
import numpy
have_numpy = 1
except ImportError:
pass
try:
import Numeric
have_numeric = 1
except ImportError:
pass
try:
import numarray
have_numarray = 1
except ImportError:
pass
#array_preference = 'numarray'
if array_preference != None:
if array_preference == 'numpy':
if have_numpy == 1:
use_numpy = 1
else:
print "Did not find the numpy module you asked for"
if array_preference == 'Numeric':
if have_numeric == 1:
use_numeric = 1
else:
print "Did not find the Numeric module you asked for"
else:
if array_preference == 'numarray':
if have_numarray == 1:
use_numarray = 1
else:
print "Did not find the numarray module you asked for"
if use_numeric == 0 and use_numarray == 0 and use_numpy == 0:
if have_numpy == 1:
use_numpy = 1
elif have_numarray == 1:
use_numarray = 1
elif have_numeric == 1:
use_numeric = 1
else:
raise DistutilsModuleError, "I need either numpy, nummarray, or Numeric!"
if use_numpy == 1:
use_numeric = 0
use_numarray = 0
nummodule = "numpy"
elif use_numeric == 1:
#print "Using Numeric as array Package"
use_numpy = 0
use_numarray = 0
nummodule = "Numeric"
elif use_numarray == 1:
#print "Using nummarray as array Package"
use_numeric = 0
use_numpy = 0
nummodule = "numarray"
else:
raise DistutilsModuleError, "I need either numpy, nummarray or Numeric!"
return nummodule
def writenumobj(nummodule):
# Write the chosen module to a file so it is automatically inculded when pygsl starts up
file = open(os.path.join(pygsldir, "_numobj.py"), "w")
warnmsg = """
WARNING: File Generated during build. DO NOT MODIFY!!!
"""
file.write('"""\n')
file.write(warnmsg)
file.write('"""\n')
file.write('\n')
file.write('from %s import *\n\n' % nummodule)
file.write('nummodule = "%s"\n' % nummodule)
file.close()
del file
file = open(os.path.join(gsl_dist, "array_includes.py"), "w")
file.write('"""\n')
file.write(warnmsg)
file.write('"""\n')
file.write('\n')
file.write('array_include_dirs = []\n')
if nummodule == "numpy":
file.write('from numpy.distutils.misc_util import get_numpy_include_dirs\n')
file.write('array_include_dirs = get_numpy_include_dirs()\n')
file.close()
del file
# Write the chosen module to a include header
file = open(os.path.join(includedir, "arrayobject.h"), "w")
file.write('/*')
file.write(warnmsg)
file.write('*/\n')
#file.write('#include <%s/arrayobject.h>\n' % nummodule)
file.write('#define PyGSL_%s 1\n' % string.upper(nummodule))
file.close()
del file
# Write the chosen module to a include header
file = open(os.path.join(pygsldir, "_mlab.py"), "w")
file.write('"""\n')
file.write(warnmsg)
file.write('"""\n')
if nummodule == "numpy":
file.write('from numpy.oldnumeric.mlab import *\n')
elif nummodule == "Numeric":
file.write('from MLab import *\n')
elif nummodule == "numarray":
file.write('from numarray.linear_algebra.mlab import *\n')
else:
raise ValueError, ("Unknown array object %s" % nummodule)
file.close()
del file
def read_numobj():
"""
read the nummodule from the file
"""
path = os.path.join(pygsldir, "_numobj.py")
g = {}
l = {}
module = None
try:
execfile(path, g, l)
module = l["nummodule"]
return module
except IOError:
print "No array object was selected."
return None
except ImportError:
pass
# Try to find the name of the set module
line = open(path).readlines()[-1]
lastobject = string.strip(string.split(line, "=")[1])
print "Array object %s found in pygsl._numobj can not be imported!" % (lastobject,)
return None
def build_guess(selectedmodule):
"""
Find out which array module to use ...
"""
# See if --array-object was given on the command line
lastmodule = read_numobj()
# If not return the last selection if possible ....
if selectedmodule == "" and lastmodule != None:
return lastmodule
print "Looking for a suitable array module"
# If given, find out if it can be used ...
nummodule = switchpreference(selectedmodule)
# find out if it is a change ...
if lastmodule == None or lastmodule != nummodule:
if lastmodule != nummodule:
print "SELECTED a NEW array module ->", nummodule
print "Please make sure that all modules are built with the same setting."
print "e.g. remove the build directory and start the build process again!"
else:
print "SELECTED as array module ->", nummodule
writenumobj(nummodule)
return nummodule
def read_from_numobj(default):
"""
tries to read from the file. If this fails it searches if one of the
array objects can be found
"""
tmp = read_numobj()
if tmp != None:
return tmp
return build_guess(default)
class _nummodule:
"""
Delay finding the array object until really needed
"""
def __init__(self):
self.__arrayobject = None
self.__preference = extractpattern()
pass
def __findarrayobject(self):
if "build" in sys.argv:
nummodule = build_guess(self.__preference)
else:
nummodule = read_from_numobj(self.__preference)
self.__arrayobject = nummodule
def __str__(self):
if self.__arrayobject == None:
self.__findarrayobject()
#print "Using '%s' as array object" % self.__arrayobject
return self.__arrayobject
nummodule = _nummodule()
print nummodule
| |
#!/usr/bin/env python
"""These are filesystem related flows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import fnmatch
import os
import re
import stat
from future.builtins import map
from future.utils import iteritems
from future.utils import iterkeys
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto import flows_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server import notification
from grr_response_server import server_stubs
from grr_response_server.rdfvalues import objects as rdf_objects
# This is all bits that define the type of the file in the stat mode. Equal to
# 0b1111000000000000.
stat_type_mask = (
stat.S_IFREG | stat.S_IFDIR | stat.S_IFLNK | stat.S_IFBLK
| stat.S_IFCHR | stat.S_IFIFO | stat.S_IFSOCK)
def _FilterOutPathInfoDuplicates(path_infos):
"""Filters out duplicates from passed PathInfo objects.
Args:
path_infos: An iterable with PathInfo objects.
Returns:
A list of PathInfo objects with duplicates removed. Duplicates are
removed following this logic: they're sorted by (ctime, mtime, atime,
inode number) in the descending order and then the first one is taken
and the others are dropped.
"""
pi_dict = {}
for pi in path_infos:
path_key = (pi.path_type, pi.GetPathID())
pi_dict.setdefault(path_key, []).append(pi)
def _SortKey(pi):
return (
pi.stat_entry.st_ctime,
pi.stat_entry.st_mtime,
pi.stat_entry.st_atime,
pi.stat_entry.st_ino,
)
for pi_values in pi_dict.values():
if len(pi_values) > 1:
pi_values.sort(key=_SortKey, reverse=True)
return [v[0] for v in pi_dict.values()]
def WriteStatEntries(stat_entries, client_id):
"""Persists information about stat entries.
Args:
stat_entries: A list of `StatEntry` instances.
client_id: An id of a client the stat entries come from.
"""
for stat_response in stat_entries:
if stat_response.pathspec.last.stream_name:
# This is an ads. In that case we always need to create a file or
# we won't be able to access the data. New clients send the correct mode
# already but to make sure, we set this to a regular file anyways.
# Clear all file type bits:
stat_response.st_mode &= ~stat_type_mask
stat_response.st_mode |= stat.S_IFREG
path_infos = [rdf_objects.PathInfo.FromStatEntry(s) for s in stat_entries]
# NOTE: TSK may return duplicate entries. This is may be either due to
# a bug in TSK implementation, or due to the fact that TSK is capable
# of returning deleted files information. Our VFS data model only supports
# storing multiple versions of the files when we collect the versions
# ourselves. At the moment we can't store multiple versions of the files
# "as returned by TSK".
#
# Current behaviour is to simply drop excessive version before the
# WritePathInfo call. This way files returned by TSK will still make it
# into the flow's results, but not into the VFS data.
data_store.REL_DB.WritePathInfos(client_id,
_FilterOutPathInfoDuplicates(path_infos))
class ListDirectoryArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.ListDirectoryArgs
rdf_deps = [
rdf_paths.PathSpec,
]
class ListDirectory(flow_base.FlowBase):
"""List files in a directory."""
category = "/Filesystem/"
args_type = ListDirectoryArgs
behaviours = flow_base.BEHAVIOUR_ADVANCED
def Start(self):
"""Issue a request to list the directory."""
self.state.urn = None
# TODO(hanuszczak): Support for old clients ends on 2021-01-01.
# This conditional should be removed after that date.
if self.client_version >= 3221:
stub = server_stubs.GetFileStat
request = rdf_client_action.GetFileStatRequest(
pathspec=self.args.pathspec)
else:
stub = server_stubs.StatFile
request = rdf_client_action.ListDirRequest(pathspec=self.args.pathspec)
self.CallClient(stub, request, next_state=compatibility.GetName(self.Stat))
# We use data to pass the path to the callback:
self.CallClient(
server_stubs.ListDirectory,
pathspec=self.args.pathspec,
next_state=compatibility.GetName(self.List))
def Stat(self, responses):
"""Save stat information on the directory."""
# Did it work?
if not responses.success:
raise flow_base.FlowError("Could not stat directory: %s" %
responses.status)
# Keep the stat response for later.
stat_entry = rdf_client_fs.StatEntry(responses.First())
self.state.stat = stat_entry
# The full path of the object is the combination of the client_id and the
# path.
self.state.urn = stat_entry.pathspec.AFF4Path(self.client_urn)
def List(self, responses):
"""Collect the directory listing and store in the datastore."""
if not responses.success:
raise flow_base.FlowError(str(responses.status))
self.Log("Listed %s", self.state.urn)
path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat)
data_store.REL_DB.WritePathInfos(self.client_id, [path_info])
stat_entries = list(map(rdf_client_fs.StatEntry, responses))
WriteStatEntries(stat_entries, client_id=self.client_id)
for stat_entry in stat_entries:
self.SendReply(stat_entry) # Send Stats to parent flows.
def NotifyAboutEnd(self):
"""Sends a notification that this flow is done."""
if not self.state.urn:
super(ListDirectory, self).NotifyAboutEnd()
return
st = self.state.stat
ps_path_type = st.pathspec.last.pathtype
path_type = rdf_objects.PathInfo.PathTypeFromPathspecPathType(ps_path_type)
full_path = st.pathspec.CollapsePath()
path_components = full_path.strip("/").split("/")
file_ref = rdf_objects.VfsFileReference(
client_id=self.client_id,
path_type=path_type,
path_components=path_components)
notification.Notify(
self.token.username,
rdf_objects.UserNotification.Type.TYPE_VFS_LIST_DIRECTORY_COMPLETED,
"Listed {0}".format(full_path),
rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.VFS_FILE,
vfs_file=file_ref))
class RecursiveListDirectoryArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RecursiveListDirectoryArgs
rdf_deps = [
rdf_paths.PathSpec,
]
class RecursiveListDirectory(flow_base.FlowBase):
"""Recursively list directory on the client."""
category = "/Filesystem/"
args_type = RecursiveListDirectoryArgs
def Start(self):
"""List the initial directory."""
# The first directory we listed.
self.state.first_directory = None
self.state.dir_count = 0
self.state.file_count = 0
self.CallClient(
server_stubs.ListDirectory,
pathspec=self.args.pathspec,
next_state=compatibility.GetName(self.ProcessDirectory))
def ProcessDirectory(self, responses):
"""Recursively list the directory, and add to the timeline."""
if responses.success:
response = responses.First()
if response is None:
return
directory_pathspec = response.pathspec.Dirname()
urn = directory_pathspec.AFF4Path(self.client_urn)
self.StoreDirectory(responses)
if self.state.first_directory is None:
self.state.first_directory = urn
# If the urn is too deep we quit to prevent recursion errors.
relative_name = urn.RelativeName(self.state.first_directory) or ""
if _Depth(relative_name) >= self.args.max_depth - 1:
self.Log("Exceeded maximum path depth at %s.",
urn.RelativeName(self.state.first_directory))
return
for stat_response in responses:
# Queue a list directory for each directory here, but do not follow
# symlinks.
is_dir = stat.S_ISDIR(int(stat_response.st_mode))
if not stat_response.symlink and is_dir:
self.CallClient(
server_stubs.ListDirectory,
pathspec=stat_response.pathspec,
next_state=compatibility.GetName(self.ProcessDirectory))
self.state.dir_count += 1
if self.state.dir_count % 100 == 0: # Log every 100 directories
self.Log("Reading %s. (%d nodes, %d directories done)",
urn.RelativeName(self.state.first_directory),
self.state.file_count, self.state.dir_count)
self.state.file_count += len(responses)
def StoreDirectory(self, responses):
"""Stores all stat responses."""
stat_entries = list(map(rdf_client_fs.StatEntry, responses))
WriteStatEntries(stat_entries, client_id=self.client_id)
for stat_entry in stat_entries:
self.SendReply(stat_entry) # Send Stats to parent flows.
def NotifyAboutEnd(self):
status_text = "Recursive Directory Listing complete %d nodes, %d dirs"
urn = self.state.first_directory
if not urn:
try:
urn = self.args.pathspec.AFF4Path(self.client_urn)
except ValueError:
pass
if urn:
components = urn.Split()
file_ref = None
if len(components) > 3:
file_ref = rdf_objects.VfsFileReference(
client_id=components[0],
path_type=components[2].upper(),
path_components=components[3:])
notification.Notify(
self.token.username, rdf_objects.UserNotification.Type
.TYPE_VFS_RECURSIVE_LIST_DIRECTORY_COMPLETED,
status_text % (self.state.file_count, self.state.dir_count),
rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.VFS_FILE,
vfs_file=file_ref))
def End(self, responses):
del responses
status_text = "Recursive Directory Listing complete %d nodes, %d dirs"
self.Log(status_text, self.state.file_count, self.state.dir_count)
class GlobArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.GlobArgs
rdf_deps = [
rdf_paths.GlobExpression,
rdf_paths.PathSpec,
]
def Validate(self):
"""Ensure that the glob paths are valid."""
self.paths.Validate()
class GlobLogic(object):
"""A MixIn to implement the glob functionality."""
def GlobForPaths(self,
paths,
pathtype="OS",
root_path=None,
process_non_regular_files=False,
collect_ext_attrs=False):
"""Starts the Glob.
This is the main entry point for this flow mixin.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
Args:
paths: A list of GlobExpression instances.
pathtype: The pathtype to use for creating pathspecs.
root_path: A pathspec where to start searching from.
process_non_regular_files: Work with all kinds of files - not only with
regular ones.
collect_ext_attrs: Whether to gather information about file extended
attributes.
"""
patterns = []
if not paths:
# Nothing to do.
return
self.state.pathtype = pathtype
self.state.root_path = root_path
self.state.process_non_regular_files = process_non_regular_files
self.state.collect_ext_attrs = collect_ext_attrs
# Transform the patterns by substitution of client attributes. When the
# client has multiple values for an attribute, this generates multiple
# copies of the pattern, one for each variation. e.g.:
# /home/%%Usernames%%/* -> [ /home/user1/*, /home/user2/* ]
for path in paths:
patterns.extend(
path.Interpolate(knowledge_base=self.client_knowledge_base))
# Sort the patterns so that if there are files whose paths conflict with
# directory paths, the files get handled after the conflicting directories
# have been added to the component tree.
patterns.sort(key=len, reverse=True)
# Expand each glob pattern into a list of components. A component is either
# a wildcard or a literal component.
# e.g. /usr/lib/*.exe -> ['/usr/lib', '.*.exe']
# We build a tree for each component such that duplicated components are
# merged. We do not need to reissue the same client requests for the same
# components. For example, the patterns:
# '/home/%%Usernames%%*' -> {'/home/': {
# 'syslog.*\\Z(?ms)': {}, 'test.*\\Z(?ms)': {}}}
# Note: The component tree contains serialized pathspecs in dicts.
for pattern in patterns:
# The root node.
curr_node = self.state.component_tree
components = self.ConvertGlobIntoPathComponents(pattern)
for i, curr_component in enumerate(components):
is_last_component = i == len(components) - 1
next_node = curr_node.get(curr_component.SerializeToBytes(), {})
if is_last_component and next_node:
# There is a conflicting directory already existing in the tree.
# Replace the directory node with a node representing this file.
curr_node[curr_component.SerializeToBytes()] = {}
else:
curr_node = curr_node.setdefault(curr_component.SerializeToBytes(),
{})
root_path = next(iter(iterkeys(self.state.component_tree)))
self.CallStateInline(
messages=[None],
next_state=compatibility.GetName(self.ProcessEntry),
request_data=dict(component_path=[root_path]))
def GlobReportMatch(self, stat_response):
"""Called when we've found a matching a StatEntry."""
# By default write the stat_response to the AFF4 VFS.
WriteStatEntries([stat_response], client_id=self.client_id)
# A regex indicating if there are shell globs in this path.
GLOB_MAGIC_CHECK = re.compile("[*?[]")
# Maximum number of files to inspect in a single directory
FILE_MAX_PER_DIR = 1000000
def ConvertGlobIntoPathComponents(self, pattern):
r"""Converts a glob pattern into a list of pathspec components.
Wildcards are also converted to regular expressions. The pathspec components
do not span directories, and are marked as a regex or a literal component.
We also support recursion into directories using the ** notation. For
example, /home/**2/foo.txt will find all files named foo.txt recursed 2
directories deep. If the directory depth is omitted, it defaults to 3.
Example:
/home/test/* -> ['home', 'test', '.*\\Z(?ms)']
Args:
pattern: A glob expression with wildcards.
Returns:
A list of PathSpec instances for each component.
Raises:
ValueError: If the glob is invalid.
"""
components = []
for path_component in pattern.split("/"):
# A ** in the path component means recurse into directories that match the
# pattern.
m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component)
if m:
path_component = path_component.replace(m.group(0), "*")
component = rdf_paths.PathSpec(
path=fnmatch.translate(path_component),
pathtype=self.state.pathtype,
path_options=rdf_paths.PathSpec.Options.RECURSIVE)
# Allow the user to override the recursion depth.
if m.group(1):
component.recursion_depth = int(m.group(1))
elif self.GLOB_MAGIC_CHECK.search(path_component):
component = rdf_paths.PathSpec(
path=fnmatch.translate(path_component),
pathtype=self.state.pathtype,
path_options=rdf_paths.PathSpec.Options.REGEX)
else:
component = rdf_paths.PathSpec(
path=path_component,
pathtype=self.state.pathtype,
path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE)
components.append(component)
return components
def Start(self, **_):
super(GlobLogic, self).Start()
self.state.component_tree = {}
def FindNode(self, component_path):
"""Find the node in the component_tree from component_path.
Args:
component_path: A list of components which reference a node in the
component tree. This allows us to resume processing in the tree.
Returns:
A node in the component_tree.
"""
# Find the node that the component path is referring to.
node = self.state.component_tree
for component in component_path:
node = node[component]
return node
def _MatchPath(self, pathspec, response):
"""Check if the responses matches the pathspec (considering options)."""
to_match = response.pathspec.Basename()
if pathspec.path_options == rdf_paths.PathSpec.Options.CASE_INSENSITIVE:
return to_match.lower() == pathspec.path.lower()
elif pathspec.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
return to_match == pathspec.path
elif pathspec.path_options == rdf_paths.PathSpec.Options.REGEX:
return bool(re.match(pathspec.path, to_match, flags=re.IGNORECASE))
elif pathspec.path_options == rdf_paths.PathSpec.Options.RECURSIVE:
return True
raise ValueError("Unknown Pathspec type.")
def ProcessEntry(self, responses):
"""Process the responses from the client."""
if not responses.success:
return
# The Find client action does not return a StatEntry but a
# FindSpec. Normalize to a StatEntry.
stat_responses = [
r.hit if isinstance(r, rdf_client_fs.FindSpec) else r for r in responses
]
# If this was a pure path matching call without any regex / recursion, we
# know exactly which node in the component tree we have to process next and
# get it from the component_path. If this was a regex match though, we
# sent the client a combined regex that matches all nodes in order to save
# round trips and client processing time. In that case we only get the
# base node and have to check for all subnodes if the response actually
# matches that subnode before we continue processing.
component_path = responses.request_data.get("component_path")
if component_path is not None:
for response in stat_responses:
self._ProcessResponse(response, [component_path])
else:
# This is a combined match.
base_path = responses.request_data["base_path"]
base_node = self.FindNode(base_path)
for response in stat_responses:
matching_components = []
for next_node in base_node:
pathspec = rdf_paths.PathSpec.FromSerializedBytes(next_node)
if self._MatchPath(pathspec, response):
matching_path = base_path + [next_node]
matching_components.append(matching_path)
if matching_components:
self._ProcessResponse(
response, matching_components, base_wildcard=True)
def _GetBasePathspec(self, response):
if response:
return response.pathspec.Copy()
else:
root_path = self.state.root_path
if root_path:
return root_path.Copy()
return None
def _ProcessResponse(self, response, component_paths, base_wildcard=False):
for component_path in component_paths:
regexes_to_get = []
recursions_to_get = {}
node = self.FindNode(component_path)
if not node:
# Node is empty representing a leaf node - we found a hit - report it.
self.GlobReportMatch(response)
return
# There are further components in the tree - iterate over them.
for component_str, next_node in iteritems(node):
component = rdf_paths.PathSpec.FromSerializedBytes(component_str)
next_component = component_path + [component_str]
# If we reach this point, we are instructed to go deeper into the
# directory structure. We only want to actually do this if
# - the last response was a proper directory,
# - or it was a file (an image) that was explicitly given meaning
# no wildcards or groupings,
# - or process_non_regular_files was set.
#
# This reduces the number of TSK opens on the client that may
# sometimes lead to instabilities due to bugs in the library.
if response and (not (stat.S_ISDIR(int(response.st_mode)) or
not base_wildcard or
self.state.process_non_regular_files)):
continue
if component.path_options == component.Options.RECURSIVE:
recursions_to_get.setdefault(component.recursion_depth,
[]).append(component)
elif component.path_options == component.Options.REGEX:
regexes_to_get.append(component)
elif component.path_options == component.Options.CASE_INSENSITIVE:
# Here we need to create the next pathspec by appending the current
# component to what we already have. If we don't have anything yet, we
# fall back to the root path. If there is no root path either, the
# current component becomes the new base.
base_pathspec = self._GetBasePathspec(response)
if base_pathspec:
pathspec = base_pathspec.Append(component)
else:
pathspec = component
if not next_node:
# Check for the existence of the last node.
if (response is None or
(response and (response.st_mode == 0 or
not stat.S_ISREG(int(response.st_mode))))):
# If next node is empty, this node is a leaf node, we therefore
# must stat it to check that it is there. There is a special case
# here where this pathspec points to a file/directory in the root
# directory. In this case, response will be None but we still need
# to stat it.
# TODO(hanuszczak): Support for old clients ends on 2021-01-01.
# This conditional should be removed after that date.
if self.client_version >= 3221:
stub = server_stubs.GetFileStat
request = rdf_client_action.GetFileStatRequest(
pathspec=pathspec,
collect_ext_attrs=self.state.collect_ext_attrs)
else:
stub = server_stubs.StatFile
request = rdf_client_action.ListDirRequest(pathspec=pathspec)
self.CallClient(
stub,
request,
next_state=compatibility.GetName(self.ProcessEntry),
request_data=dict(component_path=next_component))
else:
# There is no need to go back to the client for intermediate
# paths in the prefix tree, just emulate this by recursively
# calling this state inline.
self.CallStateInline(
[rdf_client_fs.StatEntry(pathspec=pathspec)],
next_state=compatibility.GetName(self.ProcessEntry),
request_data=dict(component_path=next_component))
if recursions_to_get or regexes_to_get:
# Recursions or regexes need a base pathspec to operate on. If we
# have neither a response or a root path, we send a default pathspec
# that opens the root with pathtype set to the pathtype expected by the
# user.
base_pathspec = self._GetBasePathspec(response)
if not base_pathspec:
base_pathspec = rdf_paths.PathSpec(
path="/", pathtype=self.state.pathtype)
for depth, recursions in iteritems(recursions_to_get):
path_regex = "(?i)^" + "$|^".join(set([c.path for c in recursions
])) + "$"
findspec = rdf_client_fs.FindSpec(
pathspec=base_pathspec,
cross_devs=True,
max_depth=depth,
path_regex=path_regex)
findspec.iterator.number = self.FILE_MAX_PER_DIR
self.CallClient(
server_stubs.Find,
findspec,
next_state=compatibility.GetName(self.ProcessEntry),
request_data=dict(base_path=component_path))
if regexes_to_get:
path_regex = "(?i)^" + "$|^".join(
set([c.path for c in regexes_to_get])) + "$"
findspec = rdf_client_fs.FindSpec(
pathspec=base_pathspec, max_depth=1, path_regex=path_regex)
findspec.iterator.number = self.FILE_MAX_PER_DIR
self.CallClient(
server_stubs.Find,
findspec,
next_state=compatibility.GetName(self.ProcessEntry),
request_data=dict(base_path=component_path))
class Glob(GlobLogic, flow_base.FlowBase):
"""Glob the filesystem for patterns.
Returns:
StatEntry messages, one for each matching file.
"""
category = "/Filesystem/"
behaviours = flow_base.BEHAVIOUR_ADVANCED
args_type = GlobArgs
def Start(self):
"""Starts the Glob.
First we convert the pattern into regex components, and then we
interpolate each component. Finally, we generate a cartesian product of all
combinations.
"""
super(Glob, self).Start()
self.GlobForPaths(
self.args.paths,
pathtype=self.args.pathtype,
root_path=self.args.root_path,
process_non_regular_files=self.args.process_non_regular_files)
def GlobReportMatch(self, stat_response):
"""Called when we've found a matching StatEntry."""
super(Glob, self).GlobReportMatch(stat_response)
self.SendReply(stat_response)
class DiskVolumeInfoArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.DiskVolumeInfoArgs
def PathHasDriveLetter(path):
"""Check path for windows drive letter.
Use 1:2 to avoid raising on single character paths.
Args:
path: path string
Returns:
True if this path has a drive letter.
"""
return path[1:2] == ":"
class DiskVolumeInfo(flow_base.FlowBase):
"""Get disk volume info for a given path.
On linux and OS X we call StatFS on each path and return the results. For
windows we collect all the volume information and filter it using the drive
letters in the supplied path list.
"""
args_type = DiskVolumeInfoArgs
category = "/Filesystem/"
behaviours = flow_base.BEHAVIOUR_ADVANCED
def Start(self):
self.state.drive_letters = set()
self.state.system_root_required = False
if self.client_os == "Windows":
# Handle the case where a path is specified without the drive letter by
# collecting systemroot and making sure we report the disk usage for it.
for path in self.args.path_list:
if PathHasDriveLetter(path):
self.state.drive_letters.add(path[0:2])
else:
self.state.system_root_required = True
if self.state.system_root_required:
self.CallFlow(
# TODO(user): dependency loop between collectors.py and
# filesystem.py.
# collectors.ArtifactCollectorFlow.__name__,
"ArtifactCollectorFlow",
artifact_list=["WindowsEnvironmentVariableSystemRoot"],
next_state=compatibility.GetName(self.StoreSystemRoot))
return
self.CallStateInline(
next_state=compatibility.GetName(self.CollectVolumeInfo))
def StoreSystemRoot(self, responses):
if not responses.success or not responses.First():
if self.state.drive_letters:
# We have at least one path that already has a drive letter so we'll log
# rather than raise.
self.Log("Error collecting SystemRoot artifact: %s", responses.status)
else:
raise flow_base.FlowError("Error collecting SystemRoot artifact: %s" %
responses.status)
drive = str(responses.First())[0:2]
if drive:
self.state.drive_letters.add(drive)
else:
self.Log("Bad result for systemdrive: %s", responses.First())
self.CallStateInline(
next_state=compatibility.GetName(self.CollectVolumeInfo))
def CollectVolumeInfo(self, responses):
del responses
if self.client_os == "Windows":
# No dependencies for WMI
deps = rdf_artifacts.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS
self.CallFlow(
# TODO(user): dependency loop between collectors.py and
# filesystem.py.
# collectors.ArtifactCollectorFlow.__name__,
"ArtifactCollectorFlow",
artifact_list=["WMILogicalDisks"],
next_state=compatibility.GetName(self.ProcessWindowsVolumes),
dependencies=deps)
else:
self.CallClient(
server_stubs.StatFS,
rdf_client_action.StatFSRequest(
path_list=self.args.path_list, pathtype=self.args.pathtype),
next_state=compatibility.GetName(self.ProcessVolumes))
def ProcessWindowsVolumes(self, responses):
if not responses.success:
self.Log("Error running WMILogicalDisks artifact: %s", responses.status)
for response in responses:
if response.windowsvolume.drive_letter in self.state.drive_letters:
self.SendReply(response)
def ProcessVolumes(self, responses):
if not responses.success:
self.Log("Error running StatFS: %s", responses.status)
for response in responses:
self.SendReply(response)
def _Depth(relative_path):
"""Calculates the depth of a given path."""
if not relative_path:
return 0
return len(os.path.normpath(relative_path).split("/"))
| |
PC_SETS = {
'3-1': [0, 1, 2],
'3-10': [0, 3, 6],
'3-11': [0, 3, 7],
'3-12': [0, 4, 8],
'3-2': [0, 1, 3],
'3-3': [0, 1, 4],
'3-4': [0, 1, 5],
'3-5': [0, 1, 6],
'3-6': [0, 2, 4],
'3-7': [0, 2, 5],
'3-8': [0, 2, 6],
'3-9': [0, 2, 7],
'4-1': [0, 1, 2, 3],
'4-10': [0, 2, 3, 5],
'4-11': [0, 1, 3, 5],
'4-12': [0, 2, 3, 6],
'4-13': [0, 1, 3, 6],
'4-14': [0, 2, 3, 7],
'4-16': [0, 1, 5, 7],
'4-17': [0, 3, 4, 7],
'4-18': [0, 1, 4, 7],
'4-19': [0, 1, 4, 8],
'4-2': [0, 1, 2, 4],
'4-20': [0, 1, 5, 8],
'4-21': [0, 2, 4, 6],
'4-22': [0, 2, 4, 7],
'4-23': [0, 2, 5, 7],
'4-24': [0, 2, 4, 8],
'4-25': [0, 2, 6, 8],
'4-26': [0, 3, 5, 8],
'4-27': [0, 2, 5, 8],
'4-28': [0, 3, 6, 9],
'4-3': [0, 1, 3, 4],
'4-4': [0, 1, 2, 5],
'4-5': [0, 1, 2, 6],
'4-6': [0, 1, 2, 7],
'4-7': [0, 1, 4, 5],
'4-8': [0, 1, 5, 6],
'4-9': [0, 1, 6, 7],
'4-Z15': [0, 1, 4, 6],
'4-Z29': [0, 1, 3, 7],
'5-1': [0, 1, 2, 3, 4],
'5-10': [0, 1, 3, 4, 6],
'5-11': [0, 2, 3, 4, 7],
'5-13': [0, 1, 2, 4, 8],
'5-14': [0, 1, 2, 5, 7],
'5-15': [0, 1, 2, 6, 8],
'5-16': [0, 1, 3, 4, 7],
'5-19': [0, 1, 3, 6, 7],
'5-2': [0, 1, 2, 3, 5],
'5-20': [0, 1, 5, 6, 8],
'5-21': [0, 1, 4, 5, 8],
'5-22': [0, 1, 4, 7, 8],
'5-23': [0, 2, 3, 5, 7],
'5-24': [0, 1, 3, 5, 7],
'5-25': [0, 2, 3, 5, 8],
'5-26': [0, 2, 4, 5, 8],
'5-27': [0, 1, 3, 5, 8],
'5-28': [0, 2, 3, 6, 8],
'5-29': [0, 1, 3, 6, 8],
'5-3': [0, 1, 2, 4, 5],
'5-30': [0, 1, 4, 6, 8],
'5-31': [0, 1, 3, 6, 9],
'5-32': [0, 1, 4, 6, 9],
'5-33': [0, 2, 4, 6, 8],
'5-34': [0, 2, 4, 6, 9],
'5-35': [0, 2, 4, 7, 9],
'5-4': [0, 1, 2, 3, 6],
'5-5': [0, 1, 2, 3, 7],
'5-6': [0, 1, 2, 5, 6],
'5-7': [0, 1, 2, 6, 7],
'5-8': [0, 2, 3, 4, 6],
'5-9': [0, 1, 2, 4, 6],
'5-Z12': [0, 1, 3, 5, 6],
'5-Z17': [0, 1, 3, 4, 8],
'5-Z18': [0, 1, 4, 5, 7],
'5-Z36': [0, 1, 2, 4, 7],
'5-Z37': [0, 3, 4, 5, 8],
'5-Z38': [0, 1, 2, 5, 8],
'6-1': [0, 1, 2, 3, 4, 5],
'6-14': [0, 1, 3, 4, 5, 8],
'6-15': [0, 1, 2, 4, 5, 8],
'6-16': [0, 1, 4, 5, 6, 8],
'6-18': [0, 1, 2, 5, 7, 8],
'6-2': [0, 1, 2, 3, 4, 6],
'6-20': [0, 1, 4, 5, 8, 9],
'6-21': [0, 2, 3, 4, 6, 8],
'6-22': [0, 1, 2, 4, 6, 8],
'6-27': [0, 1, 3, 4, 6, 9],
'6-30': [0, 1, 3, 6, 7, 9],
'6-31': [0, 1, 4, 5, 7, 9],
'6-32': [0, 2, 4, 5, 7, 9],
'6-33': [0, 2, 3, 5, 7, 9],
'6-34': [0, 1, 3, 5, 7, 9],
'6-35': [0, 2, 4, 6, 8, 10],
'6-5': [0, 1, 2, 3, 6, 7],
'6-7': [0, 1, 2, 6, 7, 8],
'6-8': [0, 2, 3, 4, 5, 7],
'6-9': [0, 1, 2, 3, 5, 7],
'6-Z10': [0, 1, 3, 4, 5, 7],
'6-Z11': [0, 1, 2, 4, 5, 7],
'6-Z12': [0, 1, 2, 4, 6, 7],
'6-Z13': [0, 1, 3, 4, 6, 7],
'6-Z17': [0, 1, 2, 4, 7, 8],
'6-Z19': [0, 1, 3, 4, 7, 8],
'6-Z23': [0, 2, 3, 5, 6, 8],
'6-Z24': [0, 1, 3, 4, 6, 8],
'6-Z25': [0, 1, 3, 5, 6, 8],
'6-Z26': [0, 1, 3, 5, 7, 8],
'6-Z28': [0, 1, 3, 5, 6, 9],
'6-Z29': [0, 2, 3, 6, 7, 9],
'6-Z3': [0, 1, 2, 3, 5, 6],
'6-Z36': [0, 1, 2, 3, 4, 7],
'6-Z37': [0, 1, 2, 3, 4, 8],
'6-Z38': [0, 1, 2, 3, 7, 8],
'6-Z39': [0, 2, 3, 4, 5, 8],
'6-Z4': [0, 1, 2, 4, 5, 6],
'6-Z40': [0, 1, 2, 3, 5, 8],
'6-Z41': [0, 1, 2, 3, 6, 8],
'6-Z42': [0, 1, 2, 3, 6, 9],
'6-Z43': [0, 1, 2, 5, 6, 8],
'6-Z44': [0, 1, 2, 5, 6, 9],
'6-Z45': [0, 2, 3, 4, 6, 9],
'6-Z46': [0, 1, 2, 4, 6, 9],
'6-Z47': [0, 1, 2, 4, 7, 9],
'6-Z48': [0, 1, 2, 5, 7, 9],
'6-Z49': [0, 1, 3, 4, 7, 9],
'6-Z50': [0, 1, 4, 6, 7, 9],
'6-Z6': [0, 1, 2, 5, 6, 7],
'7-1': [0, 1, 2, 3, 4, 5, 6],
'7-10': [0, 1, 2, 3, 4, 6, 9],
'7-11': [0, 1, 3, 4, 5, 6, 8],
'7-13': [0, 1, 2, 4, 5, 6, 8],
'7-14': [0, 1, 2, 3, 5, 7, 8],
'7-15': [0, 1, 2, 4, 6, 7, 8],
'7-16': [0, 1, 2, 3, 5, 6, 9],
'7-19': [0, 1, 2, 3, 6, 7, 9],
'7-2': [0, 1, 2, 3, 4, 5, 7],
'7-20': [0, 1, 2, 5, 6, 7, 9],
'7-21': [0, 1, 2, 4, 5, 8, 9],
'7-22': [0, 1, 2, 5, 6, 8, 9],
'7-23': [0, 2, 3, 4, 5, 7, 9],
'7-24': [0, 1, 2, 3, 5, 7, 9],
'7-25': [0, 2, 3, 4, 6, 7, 9],
'7-26': [0, 1, 3, 4, 5, 7, 9],
'7-27': [0, 1, 2, 4, 5, 7, 9],
'7-28': [0, 1, 3, 5, 6, 7, 9],
'7-29': [0, 1, 2, 4, 6, 7, 9],
'7-3': [0, 1, 2, 3, 4, 5, 8],
'7-30': [0, 1, 2, 4, 6, 8, 9],
'7-31': [0, 1, 3, 4, 6, 7, 9],
'7-32': [0, 1, 3, 4, 6, 8, 9],
'7-33': [0, 1, 2, 4, 6, 8, 10],
'7-34': [0, 1, 3, 4, 6, 8, 10],
'7-35': [0, 1, 3, 5, 6, 8, 10],
'7-4': [0, 1, 2, 3, 4, 6, 7],
'7-5': [0, 1, 2, 3, 5, 6, 7],
'7-6': [0, 1, 2, 3, 4, 7, 8],
'7-7': [0, 1, 2, 3, 6, 7, 8],
'7-8': [0, 2, 3, 4, 5, 6, 8],
'7-9': [0, 1, 2, 3, 4, 6, 8],
'7-Z12': [0, 1, 2, 3, 4, 7, 9],
'7-Z17': [0, 1, 2, 4, 5, 6, 9],
'7-Z18': [0, 2, 3, 4, 5, 8, 9],
'7-Z36': [0, 1, 2, 3, 5, 6, 8],
'7-Z37': [0, 1, 3, 4, 5, 7, 8],
'7-Z38': [0, 1, 2, 4, 5, 7, 8],
'8-1': [0, 1, 2, 3, 4, 5, 6, 7],
'8-10': [0, 2, 3, 4, 5, 6, 7, 9],
'8-11': [0, 1, 2, 3, 4, 5, 7, 9],
'8-12': [0, 1, 3, 4, 5, 6, 7, 9],
'8-13': [0, 1, 2, 3, 4, 6, 7, 9],
'8-14': [0, 1, 2, 4, 5, 6, 7, 9],
'8-16': [0, 1, 2, 3, 5, 7, 8, 9],
'8-17': [0, 1, 3, 4, 5, 6, 8, 9],
'8-18': [0, 1, 2, 3, 5, 6, 8, 9],
'8-19': [0, 1, 2, 4, 5, 6, 8, 9],
'8-2': [0, 1, 2, 3, 4, 5, 6, 8],
'8-20': [0, 1, 2, 4, 5, 7, 8, 9],
'8-21': [0, 1, 2, 3, 4, 6, 8, 10],
'8-22': [0, 1, 2, 3, 5, 6, 8, 10],
'8-23': [0, 1, 2, 3, 5, 7, 8, 10],
'8-24': [0, 1, 2, 4, 5, 6, 8, 10],
'8-25': [0, 1, 2, 4, 6, 7, 8, 10],
'8-26': [0, 1, 3, 4, 5, 7, 8, 10],
'8-27': [0, 1, 2, 4, 5, 7, 8, 10],
'8-28': [0, 1, 3, 4, 6, 7, 9, 10],
'8-3': [0, 1, 2, 3, 4, 5, 6, 9],
'8-4': [0, 1, 2, 3, 4, 5, 7, 8],
'8-5': [0, 1, 2, 3, 4, 6, 7, 8],
'8-6': [0, 1, 2, 3, 5, 6, 7, 8],
'8-7': [0, 1, 2, 3, 4, 5, 8, 9],
'8-8': [0, 1, 2, 3, 4, 7, 8, 9],
'8-9': [0, 1, 2, 3, 6, 7, 8, 9],
'8-Z15': [0, 1, 2, 3, 4, 6, 8, 9],
'8-Z29': [0, 1, 2, 3, 5, 6, 7, 9],
'9-1': [0, 1, 2, 3, 4, 5, 6, 7, 8],
'9-10': [0, 1, 2, 3, 4, 6, 7, 9, 10],
'9-11': [0, 1, 2, 3, 5, 6, 7, 9, 10],
'9-12': [0, 1, 2, 4, 5, 6, 8, 9, 10],
'9-2': [0, 1, 2, 3, 4, 5, 6, 7, 9],
'9-3': [0, 1, 2, 3, 4, 5, 6, 8, 9],
'9-4': [0, 1, 2, 3, 4, 5, 7, 8, 9],
'9-5': [0, 1, 2, 3, 4, 6, 7, 8, 9],
'9-6': [0, 1, 2, 3, 4, 5, 6, 8, 10],
'9-7': [0, 1, 2, 3, 4, 5, 7, 8, 10],
'9-8': [0, 1, 2, 3, 4, 6, 7, 8, 10],
'9-9': [0, 1, 2, 3, 5, 6, 7, 8, 10]
}
| |
#!/usr/bin/env python3
#
# Check that gerrit/projects.yaml contains valid entries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import contextlib
import git
import os
import re
import shutil
import sys
import tempfile
import yaml
@contextlib.contextmanager
def tempdir():
try:
reqroot = tempfile.mkdtemp()
yield reqroot
finally:
shutil.rmtree(reqroot, ignore_errors=True)
def check_repo(repo_path):
found_errors = 0
print("Checking git repo '%s':" % repo_path)
with tempdir() as repopath:
repo = git.Repo.clone_from(repo_path, repopath)
remotes = repo.git.branch('--remote')
branches = [r.strip() for r in remotes.splitlines() if r.strip()]
print(" Remote branches:")
for r in branches:
print(" %s" % r)
if 'origin/master' in branches:
print(" Master branch exists.")
else:
found_errors += 1
print(" ERROR: No master branch exists")
if 'origin/stable' in branches:
found_errors += 1
print(" ERROR: A branch named 'stable' exists, this will"
" break future\n"
" creation of stable/RELEASEbranches.\n"
" Delete the branch on your upstream project.")
if 'origin/feature' in branches:
found_errors += 1
print(" ERROR: A branch named 'feature' exists, this will break "
"future\n"
" creation of feature/NAME branches.\n"
" Delete the branch on your upstream project.")
if repo.tags:
print(" Found the following tags:")
for tag in repo.tags:
print(" %s" % tag)
else:
print(" Found no tags.")
# Check that no zuul files are in here
for branch in branches:
print("Testing branch %s" % branch)
if 'origin/HEAD' in branch:
continue
repo.git.checkout(branch)
head = repo.head.commit.tree
for z in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
if z in head:
found_errors += 1
print(" ERROR: Found %s on branch %s" % (z, branch))
print(" Remove any zuul config files before import.")
# Just an empty line for nicer formatting
print("")
return found_errors
# Check that name exists in set project_names
def check_project_exists(name, project_names):
if name not in project_names:
print(" Error: project %s does not exist in gerrit" % name)
return 1
return 0
def check_zuul_main(zuul_main, projects):
found_errors = 0
main_content = yaml.safe_load(open(zuul_main, 'r'))
print("Checking %s" % zuul_main)
project_names = set()
for p in projects:
name = p.get('project')
project_names.add(name)
# Check that for each gerrit source, we have a project defined in gerrit.
for tenant in main_content:
t = tenant.get('tenant')
if not t:
continue
sources = t.get('source')
if sources and sources.get('gerrit'):
for project_types in sources['gerrit']:
for entry in sources['gerrit'][project_types]:
if isinstance(entry, dict):
if 'projects' in entry:
for x in entry['projects']:
found_errors += check_project_exists(
x, project_names)
else:
for x in entry.keys():
found_errors += check_project_exists(
x, project_names)
else:
found_errors += check_project_exists(
entry, project_names)
# Just an empty line for nicer formatting
print("")
return found_errors
def main():
found_errors = 0
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action='store_true')
parser.add_argument(
'infile',
help='Path to gerrit/projects.yaml',
)
parser.add_argument(
'acldir',
help='Path to gerrit/acl',
)
parser.add_argument(
'zuul_main_file',
help='Path to zuul/main.yaml',
)
args = parser.parse_args()
projects = yaml.safe_load(open(args.infile, 'r'))
VALID_LABELS = ["acl-config", "description", "docimpact-group",
"groups", "homepage", "options", "project",
"upstream", "use-storyboard", "cgit-alias"]
VALID_SCHEMES = ['https://', 'http://', 'git://']
DESCRIPTION_REQUIRED = ['openstack', 'openstack-infra', 'openstack-dev',
'stackforge']
VALID_OPTIONS = ['delay-release', 'translate']
CGIT_ALIAS_SITES = ['zuul-ci.org']
for p in projects:
name = p.get('project')
repo_group, repo_name = name.split('/')
if not name:
# not a project
found_errors += 1
print("ERROR: Entry is not a project %s" % p)
continue
if args.verbose:
print('Checking %s' % name)
description = p.get('description')
# *very* simple check for common description mistakes
badwords = (
# (words), what_words_should_be
(('openstack', 'Openstack', 'Open Stack'), 'OpenStack'),
(('Devstack', 'devstack'), 'DevStack'),
(('astor', 'Astor', 'astra', 'Astra', 'astara'), 'Astara')
)
if description:
# newlines here mess up cgit "repo.desc
if '\n' in description:
found_errors += 1
print("ERROR: Descriptions should not contain newlines:")
print(' "%s"' % description)
for words, should_be in badwords:
for word in words:
# look for the bad word hanging out on it's own. Only
# trick is "\b" doesn't consider "-" or '.' as a
# word-boundary, so ignore it if it looks like some
# sort of job-description (e.g. "foo-devstack-bar") or
# a url ("foo.openstack.org")
if re.search(r'(?<![-.])\b%s\b' % word, description):
print("ERROR: project %s, description '%s': "
"contains wrong word '%s', it should be '%s'" %
(name, description, word, should_be))
found_errors += 1
if not description and repo_group in DESCRIPTION_REQUIRED:
found_errors += 1
print("ERROR: Project %s has no description" % name)
continue
# Check upstream URL
# Allow git:// and https:// URLs for importing upstream repositories,
# but not git@
upstream = p.get('upstream')
if upstream:
openstack_repo = 'https://opendev.org/%s' % name
try:
# Check to see if we have already imported the project into
# OpenStack, if so skip checking upstream.
check_repo(openstack_repo)
except git.exc.GitCommandError:
# We haven't imported the repo yet, make sure upstream is
# valid.
found_errors += check_repo(upstream)
for prefix in VALID_SCHEMES:
if upstream.startswith(prefix):
break
else:
found_errors += 1
print('ERROR: Upstream URLs should use a scheme in %s, '
'found %s in %s' %
(VALID_SCHEMES, p['upstream'], name))
# Check for any wrong entries
for entry in p:
for label in VALID_LABELS:
if entry == label:
break
else:
found_errors += 1
print("ERROR: Unknown keyword '%s' in project %s" %
(entry, name))
# Check for valid cgit aliases
cgit_alias = p.get('cgit_alias')
if cgit_alias:
if not isinstance(cgit_alias, dict):
found_errors += 1
print("ERROR: cgit alias in project %s must be a dict" %
(name,))
else:
if 'site' not in cgit_alias or 'path' not in cgit_alias:
found_errors += 1
print("ERROR: cgit alias in project %s must have "
"a site and path" % (name,))
else:
site = cgit_alias['site']
path = cgit_alias['path']
if path.startswith('/'):
found_errors += 1
print("ERROR: cgit alias path in project %s must "
"not begin with /" % (name,))
if site not in CGIT_ALIAS_SITES:
found_errors += 1
print("ERROR: cgit alias site in project %s is "
"not valid" % (name,))
# Check for valid options
for option in p.get('options', []):
if option not in VALID_OPTIONS:
found_errors += 1
print("ERROR: Unknown option '%s' in project %s" %
(option, name))
# Check redundant acl-config
acl_config = p.get('acl-config')
if acl_config:
if acl_config.endswith(name + '.config'):
found_errors += 1
print("ERROR: Project %s has redundant acl_config line, "
"remove it." % name)
if not acl_config.startswith('/home/gerrit2/acls/'):
found_errors += 1
print("ERROR: Project %s has wrong acl_config line, "
"fix the path." % name)
acl_file = os.path.join(args.acldir,
acl_config[len('/home/gerrit2/acls/'):])
if not os.path.isfile(acl_file):
found_errors += 1
print("ERROR: Project %s has non existing acl_config line" %
name)
else:
# Check that default file exists
acl_file = os.path.join(args.acldir, name + ".config")
if not os.path.isfile(acl_file):
found_errors += 1
print("ERROR: Project %s has no default acl-config file" %
name)
# Check redundant groups entry:
# By default the groups entry is repo_name, no need to add this.
groups = p.get('groups')
storyboard = p.get('use-storyboard', False)
if (groups and len(groups) == 1 and groups[0] == repo_name
and not storyboard):
found_errors += 1
print("ERROR: Non-StoryBoard project %s has default groups entry, "
"remove it" % name)
# Check that groups is a list
groups = p.get('groups')
if (groups and not isinstance(groups, list)):
found_errors += 1
print("Error: groups entry for project %s is not a list." % name)
found_errors += check_zuul_main(args.zuul_main_file, projects)
if found_errors:
print("Found %d error(s) in %s" % (found_errors, args.infile))
sys.exit(1)
if __name__ == '__main__':
main()
| |
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.core.test_rational import rational
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
dec
)
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
def test_dtype(self):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
for t in [np.int, np.float]:
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
self.assertTrue(left == right)
self.assertTrue(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
c = np.dtype([('ye', np.int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
self.assertRaises(TypeError, np.dtype,
dict(names=set(['A', 'B']), formats=['f8', 'i4']))
self.assertRaises(TypeError, np.dtype,
dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u2', '<u4', '<u2'],
'offsets':[2, 4, 0]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(2, 0, 1), (4, 3, -1)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
class TestSubarray(TestCase):
def test_single_subarray(self):
a = np.dtype((np.int, (2)))
b = np.dtype((np.int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike(object):
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('1i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
b = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
class TestMetadata(TestCase):
def test_no_metadata(self):
d = np.dtype(int)
self.assertEqual(d.metadata, None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
self.assertEqual(d.metadata, {'datum': 1})
def test_metadata_rejects_nondict(self):
self.assertRaises(TypeError, np.dtype, int, metadata='datum')
self.assertRaises(TypeError, np.dtype, int, metadata=1)
self.assertRaises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
self.assertEqual(d['a'].metadata, {'datum': 1})
def base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_equal(d.metadata, {'datum': 1})
class TestString(TestCase):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_complex_dtype_repr(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
@dec.skipif(sys.version_info[0] >= 3)
def test_dtype_str_with_long_in_shape(self):
# Pull request #376, should not error
np.dtype('(1L,)i4')
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
class TestDtypeAttributeDeletion(TestCase):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes(TestCase):
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
class TestDtypeAttributes(TestCase):
def test_name_builtin(self):
for t in np.typeDict.values():
name = t.__name__
if name.endswith('_'):
name = name[:-1]
assert_equal(np.dtype(t).name, name)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
if __name__ == "__main__":
run_module_suite()
| |
import copy
from unittest import TestCase
from argue import BoundsError
from pylinac.calibration import trs398
from tests_basic.utils import save_file
class TestFunctions(TestCase):
def test_k_s(self):
low_vals = (20, 20.05)
high_vals = (20, 20.1)
expected_pion = (1.0, 1.0025)
for low, high, exp in zip(low_vals, high_vals, expected_pion):
self.assertAlmostEqual(trs398.k_s(voltage_reference=300, voltage_reduced=150, m_reference=high, m_reduced=low), exp,
delta=0.001)
def test_override_k_s(self):
original_max_pion = copy.copy(trs398.MAX_PION)
trs398.MAX_PION = 1
with self.assertRaises(BoundsError):
trs398.k_s(voltage_reference=300, voltage_reduced=150, m_reference=22, m_reduced=20)
trs398.MAX_PION = original_max_pion
def test_m_corrected(self):
exp = 20.225
res = trs398.m_corrected(k_s=1.01, k_tp=0.995, k_elec=1, k_pol=1.005, m_reference=(20, 20.05))
self.assertAlmostEqual(exp, res, delta=0.002)
def test_kq_photon(self):
models = ('30010', 'A12')
tprs = (0.65, 0.75)
kqs = (0.994, 0.983)
for model, tpr, kq in zip(models, tprs, kqs):
self.assertAlmostEqual(trs398.kq_photon(chamber=model, tpr=tpr), kq, delta=0.001)
def test_kq_electron(self):
models = ('30013', '2571')
r_50s = (4.5, 8.2)
kqs = (0.909, 0.905)
for model, r_50, kq in zip(models, r_50s, kqs):
self.assertAlmostEqual(trs398.kq_electron(chamber=model, r_50=r_50), kq, delta=0.001)
class TRS398Base:
temp = float
press = float
chamber = '30013'
nd_w = float
k_elec = 1.000
voltage_reference = -300
voltage_reduced = -150
m_reference = tuple
m_opposite = tuple
m_reduced = tuple
dose_mu_zmax = 1.000
tissue_correction = 1.000
mu = 200
open_pdf = False
print_data = False
def test_dose_zmax(self):
self.assertAlmostEqual(self.dose_mu_zmax, self.trs398.dose_mu_zmax, delta=0.0005)
def test_dose_zref(self):
self.assertAlmostEqual(self.dose_mu_zref, self.trs398.dose_mu_zref, delta=0.0005)
def test_pdf(self):
save_file(self.trs398.publish_pdf)
if self.open_pdf:
self.trs398.publish_pdf('testtrs.pdf', open_file=True)
def print_results(self):
print('kQ determined', self.trs398.kq)
print('Pion', self.trs398.k_s)
print('Ppol', self.trs398.k_pol)
print('Ptp', self.trs398.k_tp)
class TRS398Photon(TRS398Base):
energy = 6
setup_condition = 'SSD'
# clinical_pdd_zref = None
clinical_tmr_zref = None
fff = False
def setUp(self):
self.trs398 = trs398.TRS398Photon(
setup=self.setup_condition,
tpr2010=self.tpr2010,
temp=self.temp, press=self.press,
chamber=self.chamber, n_dw=self.nd_w, k_elec=self.k_elec,
clinical_pdd_zref=self.clinical_pdd_zref,
voltage_reference=self.voltage_reference, voltage_reduced=self.voltage_reduced,
m_reference=self.m_reference, m_opposite=self.m_opposite, m_reduced=self.m_reduced,
clinical_tmr_zref=self.clinical_tmr_zref,
mu=self.mu, tissue_correction=self.tissue_correction, fff=self.fff, energy=self.energy
)
if self.print_data:
self.print_results()
class TRS398Electron(TRS398Base):
energy = None
k_ecal = None
i_50 = 7.5
cone = '15x15'
dose_mu_10 = 1.000
def setUp(self):
self.trs398 = trs398.TRS398Electron(temp=self.temp, press=self.press, energy=self.energy,
chamber=self.chamber, n_dw=self.nd_w, k_elec=self.k_elec,
clinical_pdd_zref=self.clinical_pdd_zref, i_50=self.i_50,
voltage_reference=self.voltage_reference, voltage_reduced=self.voltage_reduced,
m_reference=self.m_reference, m_opposite=self.m_opposite, m_reduced=self.m_reduced,
mu=self.mu, tissue_correction=self.tissue_correction, cone=self.cone)
class MDA_TB2_2015_15x(TRS398Photon, TestCase):
energy = 15
temp = 20.5
press = trs398.mmHg2kPa(760)
nd_w = 5.444
k_elec = 1.002
m_reference = 29.28
m_opposite = -29.33
m_reduced = 29.10
dose_mu_zref = 0.779
dose_mu_zmax = 1.007
clinical_pdd_zref = 77.4
tpr2010 = 0.762
class MDA_TB1_2015_10x(TRS398Photon, TestCase):
energy = 10
temp = 21
press = trs398.mmHg2kPa(763)
nd_w = 5.393
k_elec = 1.003
m_reference = 27.727
m_opposite = 27.784
m_reduced = 27.635
clinical_pdd_zref = 73.5
dose_mu_zref = 0.734
dose_mu_zmax = 0.998
tpr2010 = (73.42/73.7) * trs398.tpr2010_from_pdd2010(pdd2010=46.3/73.7)
# open_pdf = True
# print_data = True
class ACB5_2011_6x(TRS398Photon, TestCase):
temp = 22
press = trs398.mmHg2kPa(751.2)
nd_w = 5.450
tpr2010 = trs398.tpr2010_from_pdd2010(pdd2010=38.4/66.8)
m_reference = 24.82
m_opposite = -24.83
m_reduced = 24.79
clinical_pdd_zref = 66.8
tissue_correction = 0.99
dose_mu_zref = 0.673
dose_mu_zmax = 1.007
def test_zmax_adjusted(self):
self.trs398.m_reference_adjusted = 24.65
self.assertAlmostEqual(self.trs398.dose_mu_zmax_adjusted, 1.000, delta=0.0005)
def test_zref_adjusted(self):
self.trs398.m_reference_adjusted = 24.65
self.assertAlmostEqual(self.trs398.dose_mu_zref_adjusted, 0.668, delta=0.0005)
class ACB5_2012_6X(TRS398Photon, TestCase):
temp = 21.7
press = trs398.mmHg2kPa(757.2)
nd_w = 5.446
m_reference = 25.27
m_opposite = -25.19
m_reduced = 25.17
clinical_pdd_zref = 66.8
tpr2010 = trs398.tpr2010_from_pdd2010(pdd2010=38.4/66.8)
tissue_correction = 0.99
dose_mu_zref = 0.679
dose_mu_zmax = 1.0159
class ACB5_2012_18X(TRS398Photon, TestCase):
energy = 18
temp = 21.7
press = trs398.mmHg2kPa(757.2)
tpr2010 = trs398.tpr2010_from_pdd2010(pdd2010=52.5/79.4)
nd_w = 5.446
m_reference = 30.67
m_opposite = -30.65
m_reduced = 30.50
clinical_pdd_zref = 79.7
tissue_correction = 0.99
dose_mu_zref = 0.807
dose_mu_zmax = 1.0125
class IMMCTB_6FFF(TRS398Photon, TestCase):
energy = 6
fff = True
temp = 22.5
press = trs398.mmHg2kPa(749)
tpr2010 = (64.16 / 63.6) * trs398.tpr2010_from_pdd2010(pdd2010=34.5 / 63.6)
nd_w = 5.394
m_reference = 11.610
m_opposite = -11.613
m_reduced = 11.533
clinical_pdd_zref = 63.5
mu = 100
dose_mu_zref = 0.638
dose_mu_zmax = 1.005
print_data = True
class IMMCTB_10FFF(TRS398Photon, TestCase):
energy = 10
fff = True
temp = 22.4
press = trs398.mmHg2kPa(748.1)
nd_w = 5.394
m_reference = 13.00067
m_opposite = -13.013
m_reduced = 12.867
tpr2010 = trs398.tpr2010_from_pdd2010(pdd2010=(43/71.2))
clinical_pdd_zref = 71.1
mu = 100
dose_mu_zref = 0.712
dose_mu_zmax = 1.0005
# open_pdf = True
class IMMCTB_15X(TRS398Photon, TestCase):
energy = 15
temp = 22.4
press = trs398.mmHg2kPa(748.1)
nd_w = 5.394
m_reference = 14.307
m_opposite = -14.323
m_reduced = 14.220
clinical_pdd_zref = 76.7
tpr2010 = trs398.tpr2010_from_pdd2010(pdd2010=(49.9/76.9)) * (76.79/76.9)
mu = 100
dose_mu_zref = 0.770
dose_mu_zmax = 1.004
# print_data = True
# open_pdf = True
class IMMC_TB_20E(TRS398Electron, TestCase):
energy = 20
cone = '15x15'
mu = 100
temp = 22.1
press = trs398.mmHg2kPa(748.2)
k_elec = 0.999
nd_w = 5.394
m_reference = 19.670 * 0.99354 # * Pgradient because TRS398 is done at dref+0.5cm
m_opposite = 19.707 * 0.99354
m_reduced = 19.437 * 0.99354
i_50 = 8.22
clinical_pdd_zref = 96.8
dose_mu_zref = 0.972
dose_mu_zmax = 1.004
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of SQLAlchemy backend."""
import sys
from oslo_config import cfg
from blazar.db import exceptions as db_exc
from blazar.db.sqlalchemy import facade_wrapper
from blazar.db.sqlalchemy import models
from oslo_db import exception as common_db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
EXTRA_CAPABILITY_MODELS = {
'physical:host': models.ComputeHostExtraCapability,
'network': models.NetworkSegmentExtraCapability,
'device': models.DeviceExtraCapability,
}
FORBIDDEN_EXTRA_CAPABILITY_NAMES = ["id", "reservable"]
LOG = logging.getLogger(__name__)
get_engine = facade_wrapper.get_engine
get_session = facade_wrapper.get_session
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def _read_deleted_filter(query, db_model, deleted):
if 'deleted' not in db_model.__table__.columns:
return query
default_deleted_value = None
if not deleted:
query = query.filter(db_model.deleted == default_deleted_value)
return query
def model_query(model, session=None, deleted=False):
"""Query helper.
:param model: base model to query
"""
session = session or get_session()
return _read_deleted_filter(session.query(model), model, deleted)
def setup_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.error("Database registration exception: %s", e)
return False
return True
def drop_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.drop_all(engine)
except Exception as e:
LOG.error("Database shutdown exception: %s", e)
return False
return True
# Helpers for building constraints / equality checks
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return sa.or_([field == value for value in self.values])
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
# Reservation
def _reservation_get(session, reservation_id):
query = model_query(models.Reservation, session)
return query.filter_by(id=reservation_id).first()
def reservation_get(reservation_id):
return _reservation_get(get_session(), reservation_id)
def reservation_get_all():
query = model_query(models.Reservation, get_session())
return query.all()
def reservation_get_all_by_lease_id(lease_id):
reservations = (model_query(models.Reservation,
get_session()).filter_by(lease_id=lease_id))
return reservations.all()
def reservation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
reservation_query = model_query(models.Reservation, get_session())
for name, value in kwargs.items():
column = getattr(models.Reservation, name, None)
if column:
reservation_query = reservation_query.filter(column == value)
return reservation_query.all()
def reservation_create(values):
values = values.copy()
reservation = models.Reservation()
reservation.update(values)
session = get_session()
with session.begin():
try:
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
return reservation_get(reservation.id)
def reservation_update(reservation_id, values):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
reservation.update(values)
reservation.save(session=session)
return reservation_get(reservation_id)
def _reservation_destroy(session, reservation):
if reservation.instance_reservation:
reservation.instance_reservation.soft_delete(session=session)
if reservation.computehost_reservation:
reservation.computehost_reservation.soft_delete(session=session)
if reservation.network_reservation:
reservation.network_reservation.soft_delete(session=session)
if reservation.floatingip_reservation:
reservation.floatingip_reservation.soft_delete(session=session)
if reservation.computehost_allocations:
for computehost_allocation in reservation.computehost_allocations:
computehost_allocation.soft_delete(session=session)
if reservation.network_allocations:
for network_allocation in reservation.network_allocations:
network_allocation.soft_delete(session=session)
if reservation.floatingip_allocations:
for floatingip_allocation in reservation.floatingip_allocations:
floatingip_allocation.soft_delete(session=session)
reservation.soft_delete(session=session)
def reservation_destroy(reservation_id):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
if not reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(id=reservation_id,
model='Reservation')
_reservation_destroy(session, reservation)
# Lease
def _lease_get(session, lease_id):
query = model_query(models.Lease, session)
return query.filter_by(id=lease_id).first()
def lease_get(lease_id):
return _lease_get(get_session(), lease_id)
def lease_get_all():
query = model_query(models.Lease, get_session())
return query.all()
def lease_get_all_by_project(project_id):
raise NotImplementedError
def lease_get_all_by_user(user_id):
raise NotImplementedError
def lease_list(project_id=None):
query = model_query(models.Lease, get_session())
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
def lease_create(values):
values = values.copy()
lease = models.Lease()
reservations = values.pop("reservations", [])
events = values.pop("events", [])
lease.update(values)
session = get_session()
with session.begin():
try:
lease.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=lease.__class__.__name__, columns=e.columns)
try:
for r in reservations:
reservation = models.Reservation()
reservation.update({"lease_id": lease.id})
reservation.update(r)
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
try:
for e in events:
event = models.Event()
event.update({"lease_id": lease.id})
event.update(e)
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return lease_get(lease.id)
def lease_update(lease_id, values):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
lease.update(values)
lease.save(session=session)
return lease_get(lease_id)
def lease_destroy(lease_id):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
if not lease:
# raise not found error
raise db_exc.BlazarDBNotFound(id=lease_id, model='Lease')
for reservation in lease.reservations:
_reservation_destroy(session, reservation)
for event in lease.events:
event.soft_delete(session=session)
lease.soft_delete(session=session)
# Event
def _event_get(session, event_id, deleted=False):
query = model_query(models.Event, session, deleted=deleted)
return query.filter_by(id=event_id).first()
def _event_get_all(session):
query = model_query(models.Event, session)
return query
def event_get(event_id):
return _event_get(get_session(), event_id)
def event_get_all():
return _event_get_all(get_session()).all()
def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
"""Return an event query filtered and sorted by name of the field."""
sort_fn = {'desc': desc, 'asc': asc}
events_query = _event_get_all(get_session())
if 'status' in filters:
events_query = (
events_query.filter(models.Event.status == filters['status']))
if 'lease_id' in filters:
events_query = (
events_query.filter(models.Event.lease_id == filters['lease_id']))
if 'event_type' in filters:
events_query = events_query.filter(models.Event.event_type ==
filters['event_type'])
if 'time' in filters:
border = filters['time']['border']
if filters['time']['op'] == 'lt':
events_query = events_query.filter(models.Event.time < border)
elif filters['time']['op'] == 'le':
events_query = events_query.filter(models.Event.time <= border)
elif filters['time']['op'] == 'gt':
events_query = events_query.filter(models.Event.time > border)
elif filters['time']['op'] == 'ge':
events_query = events_query.filter(models.Event.time >= border)
elif filters['time']['op'] == 'eq':
events_query = events_query.filter(models.Event.time == border)
events_query = events_query.order_by(
sort_fn[sort_dir](getattr(models.Event, sort_key))
)
return events_query
def event_get_first_sorted_by_filters(sort_key, sort_dir, filters):
"""Return first result for events
Return the first result for all events matching the filters
and sorted by name of the field.
"""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).first()
def event_get_all_sorted_by_filters(sort_key, sort_dir, filters):
"""Return events filtered and sorted by name of the field."""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).all()
def event_create(values):
values = values.copy()
event = models.Event()
event.update(values)
session = get_session()
with session.begin():
try:
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return event_get(event.id)
def event_update(event_id, values):
session = get_session()
with session.begin():
# NOTE(jason): Allow updating soft-deleted events
event = _event_get(session, event_id, deleted=True)
event.update(values)
event.save(session=session)
return event_get(event_id)
def event_destroy(event_id):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
if not event:
# raise not found error
raise db_exc.BlazarDBNotFound(id=event_id, model='Event')
event.soft_delete(session=session)
# ComputeHostReservation
def _host_reservation_get(session, host_reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(id=host_reservation_id).first()
def host_reservation_get(host_reservation_id):
return _host_reservation_get(get_session(),
host_reservation_id)
def host_reservation_get_all():
query = model_query(models.ComputeHostReservation, get_session())
return query.all()
def _host_reservation_get_by_reservation_id(session, reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(reservation_id=reservation_id).first()
def host_reservation_get_by_reservation_id(reservation_id):
return _host_reservation_get_by_reservation_id(get_session(),
reservation_id)
def host_reservation_create(values):
values = values.copy()
host_reservation = models.ComputeHostReservation()
host_reservation.update(values)
session = get_session()
with session.begin():
try:
host_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_reservation.__class__.__name__, columns=e.columns)
return host_reservation_get(host_reservation.id)
def host_reservation_update(host_reservation_id, values):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
host_reservation.update(values)
host_reservation.save(session=session)
return host_reservation_get(host_reservation_id)
def host_reservation_destroy(host_reservation_id):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
if not host_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_reservation_id, model='ComputeHostReservation')
host_reservation.soft_delete(session=session)
# InstanceReservation
def instance_reservation_create(values):
value = values.copy()
instance_reservation = models.InstanceReservations()
instance_reservation.update(value)
session = get_session()
with session.begin():
try:
instance_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=instance_reservation.__class__.__name__,
columns=e.columns)
return instance_reservation_get(instance_reservation.id)
def instance_reservation_get(instance_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.InstanceReservations, session)
return query.filter_by(id=instance_reservation_id).first()
def instance_reservation_update(instance_reservation_id, values):
session = get_session()
with session.begin():
instance_reservation = instance_reservation_get(
instance_reservation_id, session)
if not instance_reservation:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
instance_reservation.update(values)
instance_reservation.save(session=session)
return instance_reservation_get(instance_reservation_id)
def instance_reservation_destroy(instance_reservation_id):
session = get_session()
with session.begin():
instance = instance_reservation_get(instance_reservation_id)
if not instance:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
instance.soft_delete(session=session)
# ComputeHostAllocation
def _host_allocation_get(session, host_allocation_id):
query = model_query(models.ComputeHostAllocation, session)
return query.filter_by(id=host_allocation_id).first()
def host_allocation_get(host_allocation_id):
return _host_allocation_get(get_session(),
host_allocation_id)
def host_allocation_get_all():
query = model_query(models.ComputeHostAllocation, get_session())
return query.all()
def host_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.ComputeHostAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.ComputeHostAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def host_allocation_create(values):
values = values.copy()
host_allocation = models.ComputeHostAllocation()
host_allocation.update(values)
session = get_session()
with session.begin():
try:
host_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_allocation.__class__.__name__, columns=e.columns)
return host_allocation_get(host_allocation.id)
def host_allocation_update(host_allocation_id, values):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
host_allocation.update(values)
host_allocation.save(session=session)
return host_allocation_get(host_allocation_id)
def host_allocation_destroy(host_allocation_id):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
if not host_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_allocation_id, model='ComputeHostAllocation')
host_allocation.soft_delete(session=session)
# ComputeHost
def _host_get(session, host_id):
query = model_query(models.ComputeHost, session)
return query.filter_by(id=host_id).first()
def _host_get_all(session):
query = model_query(models.ComputeHost, session)
return query
def host_get(host_id):
return _host_get(get_session(), host_id)
def host_list():
return model_query(models.ComputeHost, get_session()).all()
def host_get_all_by_filters(filters):
"""Returns hosts filtered by name of the field."""
hosts_query = _host_get_all(get_session())
if 'status' in filters:
hosts_query = hosts_query.filter(
models.ComputeHost.status == filters['status'])
return hosts_query.all()
def host_get_all_by_queries(queries):
"""Returns hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
hosts_query = model_query(models.ComputeHost, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
hosts = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.ComputeHost, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
hosts_query = hosts_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_host_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='ComputeHostExtraCapability')
for host, capability_name in extra_filter:
if op in oper and oper[op][1](host.capability_value, value):
hosts.append(host.computehost_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any host which doesn't have the
# extra capability present.
all_hosts = [h.id for h in hosts_query.all()]
extra_filter_hosts = [h.computehost_id for h, _ in extra_filter]
hosts += [h for h in all_hosts if h not in extra_filter_hosts]
return hosts_query.filter(~models.ComputeHost.id.in_(hosts)).all()
def reservable_host_get_all_by_queries(queries):
"""Returns reservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return host_get_all_by_queries(queries)
def unreservable_host_get_all_by_queries(queries):
"""Returns unreservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return host_get_all_by_queries(queries)
def host_create(values):
values = values.copy()
host = models.ComputeHost()
host.update(values)
session = get_session()
with session.begin():
try:
host.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host.__class__.__name__, columns=e.columns)
return host_get(host.id)
def host_update(host_id, values):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
host.update(values)
host.save(session=session)
return host_get(host_id)
def host_destroy(host_id):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
if not host:
# raise not found error
raise db_exc.BlazarDBNotFound(id=host_id, model='Host')
session.delete(host)
# ComputeHostExtraCapability
def _host_extra_capability_query(session):
return (
model_query(models.ComputeHostExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _host_extra_capability_get(session, host_extra_capability_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.id == host_extra_capability_id)
return query.first()
def host_extra_capability_get(host_extra_capability_id):
return _host_extra_capability_get(get_session(),
host_extra_capability_id)
def _host_extra_capability_get_all_per_host(session, host_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.computehost_id == host_id)
return query
def host_extra_capability_get_all_per_host(host_id):
return _host_extra_capability_get_all_per_host(get_session(),
host_id).all()
def host_extra_capability_create(values):
values = values.copy()
resource_property = resource_property_get_or_create(
'physical:host', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
host_extra_capability = models.ComputeHostExtraCapability()
host_extra_capability.update(values)
session = get_session()
with session.begin():
try:
host_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_extra_capability.__class__.__name__,
columns=e.columns)
return host_extra_capability_get(host_extra_capability.id)
def host_extra_capability_update(host_extra_capability_id, values):
session = get_session()
with session.begin():
host_extra_capability, _ = (
_host_extra_capability_get(session,
host_extra_capability_id))
host_extra_capability.update(values)
host_extra_capability.save(session=session)
return host_extra_capability_get(host_extra_capability_id)
def host_extra_capability_destroy(host_extra_capability_id):
session = get_session()
with session.begin():
host_extra_capability = _host_extra_capability_get(
session, host_extra_capability_id)
if not host_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_extra_capability_id,
model='ComputeHostExtraCapability')
session.delete(host_extra_capability[0])
def host_extra_capability_get_all_per_name(host_id, capability_name):
session = get_session()
with session.begin():
query = _host_extra_capability_get_all_per_host(session, host_id)
return query.filter(
models.ExtraCapability.capability_name == capability_name).all()
# FloatingIP reservation
def fip_reservation_create(fip_reservation_values):
values = fip_reservation_values.copy()
fip_reservation = models.FloatingIPReservation()
fip_reservation.update(values)
session = get_session()
with session.begin():
try:
fip_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_reservation.__class__.__name__, columns=e.columns)
return fip_reservation_get(fip_reservation.id)
def _fip_reservation_get(session, fip_reservation_id):
query = model_query(models.FloatingIPReservation, session)
return query.filter_by(id=fip_reservation_id).first()
def fip_reservation_get(fip_reservation_id):
return _fip_reservation_get(get_session(), fip_reservation_id)
def fip_reservation_update(fip_reservation_id, fip_reservation_values):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
fip_reservation.update(fip_reservation_values)
fip_reservation.save(session=session)
return fip_reservation_get(fip_reservation_id)
def fip_reservation_destroy(fip_reservation_id):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
if not fip_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=fip_reservation_id, model='FloatingIPReservation')
fip_reservation.soft_delete(session=session)
session.delete(fip_reservation)
# Required FIP
def required_fip_create(required_fip_values):
values = required_fip_values.copy()
required_fip = models.RequiredFloatingIP()
required_fip.update(values)
session = get_session()
with session.begin():
try:
required_fip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=required_fip.__class__.__name__, columns=e.columns)
return required_fip_get(required_fip.id)
def _required_fip_get(session, required_fip_id):
query = model_query(models.RequiredFloatingIP, session)
return query.filter_by(id=required_fip_id).first()
def required_fip_get(required_fip_id):
return _required_fip_get(get_session(), required_fip_id)
def required_fip_update(required_fip_id, required_fip_values):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
required_fip.update(required_fip_values)
required_fip.save(session=session)
return required_fip_get(required_fip_id)
def required_fip_destroy(required_fip_id):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
if not required_fip:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=required_fip_id, model='RequiredFloatingIP')
required_fip.soft_delete(session=session)
session.delete(required_fip)
def required_fip_destroy_by_fip_reservation_id(fip_reservation_id):
session = get_session()
with session.begin():
required_fips = model_query(
models.RequiredFloatingIP, session).filter_by(
floatingip_reservation_id=fip_reservation_id)
for required_fip in required_fips:
required_fip_destroy(required_fip['id'])
# FloatingIP Allocation
def _fip_allocation_get(session, fip_allocation_id):
query = model_query(models.FloatingIPAllocation, session)
return query.filter_by(id=fip_allocation_id).first()
def fip_allocation_get(fip_allocation_id):
return _fip_allocation_get(get_session(), fip_allocation_id)
def fip_allocation_create(allocation_values):
values = allocation_values.copy()
fip_allocation = models.FloatingIPAllocation()
fip_allocation.update(values)
session = get_session()
with session.begin():
try:
fip_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_allocation.__class__.__name__, columns=e.columns)
return fip_allocation_get(fip_allocation.id)
def fip_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.FloatingIPAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.FloatingIPAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def fip_allocation_destroy(allocation_id):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
if not fip_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=allocation_id, model='FloatingIPAllocation')
fip_allocation.soft_delete(session=session)
session.delete(fip_allocation)
def fip_allocation_update(allocation_id, allocation_values):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
fip_allocation.update(allocation_values)
fip_allocation.save(session=session)
return fip_allocation_get(allocation_id)
# Floating IP
def _floatingip_get(session, floatingip_id):
query = model_query(models.FloatingIP, session)
return query.filter_by(id=floatingip_id).first()
def _floatingip_get_all(session):
query = model_query(models.FloatingIP, session)
return query
def fip_get_all_by_queries(queries):
"""Returns Floating IPs filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
fips_query = model_query(models.FloatingIP, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.FloatingIP, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
fips_query = fips_query.filter(filt)
else:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
return fips_query.all()
def reservable_fip_get_all_by_queries(queries):
"""Returns reservable fips filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return fip_get_all_by_queries(queries)
def floatingip_get(floatingip_id):
return _floatingip_get(get_session(), floatingip_id)
def floatingip_list():
return model_query(models.FloatingIP, get_session()).all()
def floatingip_create(values):
values = values.copy()
floatingip = models.FloatingIP()
floatingip.update(values)
session = get_session()
with session.begin():
try:
floatingip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=floatingip.__class__.__name__, columns=e.columns)
return floatingip_get(floatingip.id)
def floatingip_destroy(floatingip_id):
session = get_session()
with session.begin():
floatingip = _floatingip_get(session, floatingip_id)
if not floatingip:
# raise not found error
raise db_exc.BlazarDBNotFound(id=floatingip_id, model='FloatingIP')
session.delete(floatingip)
# Networks
def _network_get(session, network_id):
query = model_query(models.NetworkSegment, session)
return query.filter_by(id=network_id).first()
def _network_get_all(session):
query = model_query(models.NetworkSegment, session)
return query
def network_get(network_id):
return _network_get(get_session(), network_id)
def network_list():
return model_query(models.NetworkSegment, get_session()).all()
def network_create(values):
values = values.copy()
network = models.NetworkSegment()
network.update(values)
session = get_session()
with session.begin():
try:
network.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network.__class__.__name__, columns=e.columns)
return network_get(network.id)
def network_update(network_id, values):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
network.update(values)
network.save(session=session)
return network_get(network_id)
def network_destroy(network_id):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
if not network:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_id, model='Network segment')
session.delete(network)
# NetworkAllocation
def _network_allocation_get(session, network_allocation_id):
query = model_query(models.NetworkAllocation, session)
return query.filter_by(id=network_allocation_id).first()
def network_allocation_get(network_allocation_id):
return _network_allocation_get(get_session(),
network_allocation_id)
def network_allocation_get_all():
query = model_query(models.NetworkAllocation, get_session())
return query.all()
def network_allocation_create(values):
values = values.copy()
network_allocation = models.NetworkAllocation()
network_allocation.update(values)
session = get_session()
with session.begin():
try:
network_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_allocation.__class__.__name__, columns=e.columns)
return network_allocation_get(network_allocation.id)
def network_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.NetworkAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.NetworkAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def network_allocation_destroy(network_allocation_id):
session = get_session()
with session.begin():
network_allocation = _network_allocation_get(session,
network_allocation_id)
if not network_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_allocation_id, model='NetworkAllocation')
network_allocation.soft_delete(session=session)
# NetworkReservation
def network_reservation_create(values):
value = values.copy()
network_reservation = models.NetworkReservation()
network_reservation.update(value)
session = get_session()
with session.begin():
try:
network_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_reservation.__class__.__name__,
columns=e.columns)
return network_reservation_get(network_reservation.id)
def network_reservation_get(network_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.NetworkReservation, session)
return query.filter_by(id=network_reservation_id).first()
def network_reservation_update(network_reservation_id, values):
session = get_session()
with session.begin():
network_reservation = network_reservation_get(
network_reservation_id, session)
if not network_reservation:
raise db_exc.BlazarDBNotFound(
id=network_reservation_id, model='NetworkReservation')
network_reservation.update(values)
network_reservation.save(session=session)
return network_reservation_get(network_reservation_id)
def network_reservation_destroy(network_reservation_id):
session = get_session()
with session.begin():
network = network_reservation_get(network_reservation_id)
if not network:
raise db_exc.BlazarDBNotFound(
id=network_reservation_id, model='NetworkReservation')
network.soft_delete(session=session)
def network_get_all_by_filters(filters):
"""Returns networks filtered by name of the field."""
networks_query = _network_get_all(get_session())
if 'status' in filters:
networks_query = networks_query.filter(
models.NetworkSegment.status == filters['status'])
return networks_query.all()
def network_get_all_by_queries(queries):
"""Return networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
networks_query = model_query(models.NetworkSegment, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
networks = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.NetworkSegment, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
networks_query = networks_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_network_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='NetworkSegmentExtraCapability')
for network, capability_name in extra_filter:
if op in oper and oper[op][1](network.capability_value, value):
networks.append(network.network_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any network which doesn't have the
# extra capability present.
all_networks = [h.id for h in networks_query.all()]
extra_filter_networks = [h.network_id for h, _ in extra_filter]
networks += [h for h in all_networks if h not in
extra_filter_networks]
return networks_query.filter(~models.NetworkSegment.id.in_(networks)).all()
def reservable_network_get_all_by_queries(queries):
"""Return reservable networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return network_get_all_by_queries(queries)
def unreservable_network_get_all_by_queries(queries):
"""Return unreservable networks filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return network_get_all_by_queries(queries)
# NetworkSegmentExtraCapability
def _network_extra_capability_query(session):
return (
model_query(models.NetworkSegmentExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _network_extra_capability_get(session, network_extra_capability_id):
query = _network_extra_capability_query(session).filter(
models.NetworkSegmentExtraCapability.id == network_extra_capability_id)
return query.first()
def network_extra_capability_get(network_extra_capability_id):
return _network_extra_capability_get(get_session(),
network_extra_capability_id)
def _network_extra_capability_get_all_per_network(session, network_id):
query = _network_extra_capability_query(session).filter(
models.NetworkSegmentExtraCapability.network_id == network_id)
return query
def network_extra_capability_get_all_per_network(network_id):
return _network_extra_capability_get_all_per_network(get_session(),
network_id).all()
def network_extra_capability_create(values):
values = values.copy()
resource_property = _resource_property_get_or_create(
get_session(), 'network', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
network_extra_capability = models.NetworkSegmentExtraCapability()
network_extra_capability.update(values)
session = get_session()
with session.begin():
try:
network_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_extra_capability.__class__.__name__,
columns=e.columns)
return network_extra_capability_get(network_extra_capability.id)
def network_extra_capability_update(network_extra_capability_id, values):
session = get_session()
with session.begin():
network_extra_capability, _ = (
_network_extra_capability_get(session,
network_extra_capability_id))
network_extra_capability.update(values)
network_extra_capability.save(session=session)
return network_extra_capability_get(network_extra_capability_id)
def network_extra_capability_destroy(network_extra_capability_id):
session = get_session()
with session.begin():
network_extra_capability = _network_extra_capability_get(
session, network_extra_capability_id)
if not network_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_extra_capability_id,
model='NetworkSegmentExtraCapability')
session.delete(network_extra_capability[0])
def network_extra_capability_get_all_per_name(network_id, capability_name):
session = get_session()
with session.begin():
query = _network_extra_capability_get_all_per_network(
session, network_id)
return query.filter_by(capability_name=capability_name).all()
def network_extra_capability_get_latest_per_name(network_id, capability_name):
session = get_session()
with session.begin():
query = _network_extra_capability_get_all_per_network(session,
network_id)
return (
query
.filter(models.ExtraCapability.capability_name == capability_name)
.order_by(models.NetworkSegmentExtraCapability.created_at.desc())
.first())
# Devices
def _device_get(session, device_id):
query = model_query(models.Device, session)
return query.filter_by(id=device_id).first()
def _device_get_all(session):
query = model_query(models.Device, session)
return query
def device_get(device_id):
return _device_get(get_session(), device_id)
def device_list():
return model_query(models.Device, get_session()).all()
def device_create(values):
values = values.copy()
device = models.Device()
device.update(values)
session = get_session()
with session.begin():
try:
device.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device.__class__.__name__, columns=e.columns)
return device_get(device.id)
def device_update(device_id, values):
session = get_session()
with session.begin():
device = _device_get(session, device_id)
device.update(values)
device.save(session=session)
return device_get(device_id)
def device_destroy(device_id):
session = get_session()
with session.begin():
device = _device_get(session, device_id)
if not device:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_id, model='Device')
session.delete(device)
# DeviceAllocation
def _device_allocation_get(session, device_allocation_id):
query = model_query(models.DeviceAllocation, session)
return query.filter_by(id=device_allocation_id).first()
def device_allocation_get(device_allocation_id):
return _device_allocation_get(get_session(),
device_allocation_id)
def device_allocation_get_all():
query = model_query(models.DeviceAllocation, get_session())
return query.all()
def device_allocation_create(values):
values = values.copy()
device_allocation = models.DeviceAllocation()
device_allocation.update(values)
session = get_session()
with session.begin():
try:
device_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_allocation.__class__.__name__, columns=e.columns)
return device_allocation_get(device_allocation.id)
def device_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.DeviceAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.DeviceAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def device_allocation_update(device_allocation_id, values):
session = get_session()
with session.begin():
device_allocation = _device_allocation_get(session,
device_allocation_id)
device_allocation.update(values)
device_allocation.save(session=session)
return device_allocation_get(device_allocation_id)
def device_allocation_destroy(device_allocation_id):
session = get_session()
with session.begin():
device_allocation = _device_allocation_get(session,
device_allocation_id)
if not device_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_allocation_id, model='DeviceAllocation')
device_allocation.soft_delete(session=session)
# DeviceReservation
def device_reservation_create(values):
value = values.copy()
device_reservation = models.DeviceReservation()
device_reservation.update(value)
session = get_session()
with session.begin():
try:
device_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_reservation.__class__.__name__,
columns=e.columns)
return device_reservation_get(device_reservation.id)
def device_reservation_get(device_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.DeviceReservation, session)
return query.filter_by(id=device_reservation_id).first()
def device_reservation_update(device_reservation_id, values):
session = get_session()
with session.begin():
device_reservation = device_reservation_get(
device_reservation_id, session)
if not device_reservation:
raise db_exc.BlazarDBNotFound(
id=device_reservation_id, model='DeviceReservation')
device_reservation.update(values)
device_reservation.save(session=session)
return device_reservation_get(device_reservation_id)
def device_reservation_destroy(device_reservation_id):
session = get_session()
with session.begin():
device = device_reservation_get(device_reservation_id)
if not device:
raise db_exc.BlazarDBNotFound(
id=device_reservation_id, model='DeviceReservation')
device.soft_delete(session=session)
def device_get_all_by_filters(filters):
"""Returns devices filtered by name of the field."""
devices_query = _device_get_all(get_session())
if 'status' in filters:
devices_query = devices_query.filter(
models.Device.status == filters['status'])
return devices_query.all()
def device_get_all_by_queries(queries):
"""Return devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
devices_query = model_query(models.Device, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
devices = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.Device, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
devices_query = devices_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_device_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='DeviceExtraCapability')
for device, capability_name in extra_filter:
if op in oper and oper[op][1](device.capability_value, value):
devices.append(device.device_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any device which doesn't have the
# extra capability present.
all_devices = [h.id for h in devices_query.all()]
extra_filter_devices = [h.device_id for h, _ in extra_filter]
devices += [h for h in all_devices if h not in
extra_filter_devices]
return devices_query.filter(~models.Device.id.in_(devices)).all()
def reservable_device_get_all_by_queries(queries):
"""Return reservable devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return device_get_all_by_queries(queries)
def unreservable_device_get_all_by_queries(queries):
"""Return unreservable devices filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return device_get_all_by_queries(queries)
# DeviceExtraCapability
def _device_extra_capability_query(session):
return (
model_query(models.DeviceExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _device_extra_capability_get(session, device_extra_capability_id):
query = _device_extra_capability_query(session).filter(
models.DeviceExtraCapability.id == device_extra_capability_id)
return query.first()
def device_extra_capability_get(device_extra_capability_id):
return _device_extra_capability_get(get_session(),
device_extra_capability_id)
def _device_extra_capability_get_all_per_device(session, device_id):
query = _device_extra_capability_query(session).filter(
models.DeviceExtraCapability.device_id == device_id)
return query
def device_extra_capability_get_all_per_device(device_id):
return _device_extra_capability_get_all_per_device(get_session(),
device_id).all()
def device_extra_capability_create(values):
values = values.copy()
resource_property = _resource_property_get_or_create(
get_session(), 'device', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
device_extra_capability = models.DeviceExtraCapability()
device_extra_capability.update(values)
session = get_session()
with session.begin():
try:
device_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=device_extra_capability.__class__.__name__,
columns=e.columns)
return device_extra_capability_get(device_extra_capability.id)
def device_extra_capability_update(device_extra_capability_id, values):
session = get_session()
with session.begin():
device_extra_capability, _ = (
_device_extra_capability_get(session,
device_extra_capability_id))
device_extra_capability.update(values)
device_extra_capability.save(session=session)
return device_extra_capability_get(device_extra_capability_id)
def device_extra_capability_destroy(device_extra_capability_id):
session = get_session()
with session.begin():
device_extra_capability = _device_extra_capability_get(
session, device_extra_capability_id)
if not device_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=device_extra_capability_id,
model='DeviceExtraCapability')
session.delete(device_extra_capability[0])
def device_extra_capability_get_all_per_name(device_id, capability_name):
session = get_session()
with session.begin():
query = _device_extra_capability_get_all_per_device(
session, device_id)
return query.filter_by(capability_name=capability_name).all()
def device_extra_capability_get_latest_per_name(device_id, capability_name):
session = get_session()
with session.begin():
query = _device_extra_capability_get_all_per_device(session,
device_id)
return (
query
.filter(models.ExtraCapability.capability_name == capability_name)
.order_by(models.DeviceExtraCapability.created_at.desc())
.first())
# Resource Properties
def _resource_property_get(session, resource_type, capability_name):
query = (
model_query(models.ExtraCapability, session)
.filter_by(resource_type=resource_type)
.filter_by(capability_name=capability_name))
return query.first()
def resource_property_get(resource_type, capability_name):
return _resource_property_get(get_session(), resource_type,
capability_name)
def resource_properties_list(resource_type):
if resource_type not in EXTRA_CAPABILITY_MODELS:
raise db_exc.BlazarDBExtraCapabilitiesNotEnabled(
resource_type=resource_type)
session = get_session()
with session.begin():
resource_model = EXTRA_CAPABILITY_MODELS[resource_type]
query = session.query(
models.ExtraCapability.capability_name,
models.ExtraCapability.private,
resource_model.capability_value).join(resource_model).distinct()
return query.all()
def _resource_property_create(session, values):
values = values.copy()
resource_property = models.ExtraCapability()
resource_property.update(values)
with session.begin():
try:
resource_property.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=resource_property.__class__.__name__,
columns=e.columns)
return resource_property_get(values.get('resource_type'),
values.get('capability_name'))
def resource_property_create(values):
return _resource_property_create(get_session(), values)
def resource_property_update(resource_type, property_name, values):
if resource_type not in EXTRA_CAPABILITY_MODELS:
raise db_exc.BlazarDBExtraCapabilitiesNotEnabled(
resource_type=resource_type)
values = values.copy()
session = get_session()
with session.begin():
resource_property = _resource_property_get(
session, resource_type, property_name)
if not resource_property:
raise db_exc.BlazarDBInvalidExtraCapability(
property_name=property_name,
resource_type=resource_type)
resource_property.update(values)
resource_property.save(session=session)
return resource_property_get(resource_type, property_name)
def _resource_property_get_or_create(session, resource_type, capability_name):
if capability_name in FORBIDDEN_EXTRA_CAPABILITY_NAMES:
raise db_exc.BlazarDBForbiddenExtraCapability(
property_name=capability_name)
resource_property = _resource_property_get(
session, resource_type, capability_name)
if resource_property:
return resource_property
else:
rp_values = {
'resource_type': resource_type,
'capability_name': capability_name}
return resource_property_create(rp_values)
def resource_property_get_or_create(resource_type, capability_name):
return _resource_property_get_or_create(
get_session(), resource_type, capability_name)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import os
import random
import tempfile
if os.name != 'nt':
import crypt
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import paths
from nova import utils
from nova.virt.disk.mount import api as mount
from nova.virt.disk.vfs import api as vfs
from nova.virt import images
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/interfaces.template'),
help='Template file for injected network'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
cfg.BoolOpt('resize_fs_using_block_device',
default=False,
help='Attempt to resize the filesystem by accessing the '
'image over a block device. This is done by the host '
'and may not be necessary if the image contains a recent '
'version of cloud-init. Possible mechanisms require '
'the nbd driver (for qcow and raw), or loop (for raw).'),
]
CONF = cfg.CONF
CONF.register_opts(disk_opts)
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
_DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3',
'windows': 'ntfs'}
for s in CONF.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
def mkfs(os_type, fs_label, target, run_as_root=True):
"""Format a file or block device using
a user provided command for each os type.
If user has not provided any configuration,
format type will be used according to a
default_ephemeral_format configuration
or a system defaults.
"""
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % {'fs_label': fs_label, 'target': target}
if mkfs_command:
utils.execute(*mkfs_command.split(), run_as_root=run_as_root)
else:
default_fs = CONF.default_ephemeral_format
if not default_fs:
default_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3')
utils.mkfs(default_fs, target, fs_label, run_as_root=run_as_root)
def resize2fs(image, check_exit_code=False, run_as_root=False):
utils.execute('e2fsck', '-fp', image,
check_exit_code=check_exit_code,
run_as_root=run_as_root)
utils.execute('resize2fs', image,
check_exit_code=check_exit_code,
run_as_root=run_as_root)
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
return images.qemu_img_info(path).virtual_size
def extend(image, size, use_cow=False):
"""Increase image to size."""
if not can_resize_image(image, size):
return
utils.execute('qemu-img', 'resize', image, size)
# if we can't access the filesystem, we can't do anything more
if not is_image_partitionless(image, use_cow):
return
# NOTE(vish): attempts to resize filesystem
if use_cow:
if CONF.resize_fs_using_block_device:
# in case of non-raw disks we can't just resize the image, but
# rather the mounted device instead
mounter = mount.Mount.instance_for_format(image, None, None,
'qcow2')
if mounter.get_dev():
resize2fs(mounter.device, run_as_root=True)
mounter.unget_dev()
else:
resize2fs(image)
def can_resize_image(image, size):
"""Check whether we can resize the container image file."""
LOG.debug(_('Checking if we can resize image %(image)s. '
'size=%(size)s'), {'image': image, 'size': size})
# Check that we're increasing the size
virt_size = get_disk_size(image)
if virt_size >= size:
LOG.debug(_('Cannot resize image %s to a smaller size.'),
image)
return False
return True
def is_image_partitionless(image, use_cow=False):
"""Check whether we can resize contained file system."""
LOG.debug(_('Checking if we can resize filesystem inside %(image)s. '
'CoW=%(use_cow)s'), {'image': image, 'use_cow': use_cow})
# Check the image is unpartitioned
if use_cow:
try:
fs = vfs.VFS.instance_for_image(image, 'qcow2', None)
fs.setup()
fs.teardown()
except exception.NovaException as e:
LOG.debug(_('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.'),
{'image': image,
'error': e})
return False
else:
# For raw, we can directly inspect the file system
try:
utils.execute('e2label', image)
except processutils.ProcessExecutionError as e:
LOG.debug(_('Unable to determine label for image %(image)s with '
'error %(errror)s. Cannot resize.'),
{'image': image,
'error': e})
return False
return True
class _DiskImage(object):
"""Provide operations on a disk image file."""
tmp_prefix = 'openstack-disk-mount-tmp'
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
self.use_cow = use_cow
self.device = None
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
if mount_dir:
device = self._device_for_path(mount_dir)
if device:
self._reset(device)
@staticmethod
def _device_for_path(path):
device = None
path = os.path.realpath(path)
with open("/proc/mounts", 'r') as ifp:
for line in ifp:
fields = line.split()
if fields[1] == path:
device = fields[0]
break
return device
def _reset(self, device):
"""Reset internal state for a previously mounted directory."""
self._mounter = mount.Mount.instance_for_device(self.image,
self.mount_dir,
self.partition,
device)
mount_name = os.path.basename(self.mount_dir or '')
self._mkdir = mount_name.startswith(self.tmp_prefix)
self.device = self._mounter.device
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.NovaException(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix)
self._mkdir = True
imgfmt = "raw"
if self.use_cow:
imgfmt = "qcow2"
mounter = mount.Mount.instance_for_format(self.image,
self.mount_dir,
self.partition,
imgfmt)
if mounter.do_mount():
self._mounter = mounter
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
return bool(self._mounter)
def umount(self):
"""Umount a mount point from the filesystem."""
if self._mounter:
self._mounter.do_umount()
self._mounter = None
def teardown(self):
"""Remove a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_teardown()
self._mounter = None
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
files=None, partition=None, use_cow=False, mandatory=()):
"""Inject the specified items into a disk image.
If an item name is not specified in the MANDATORY iterable, then a warning
is logged on failure to inject that item, rather than raising an exception.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If PARTITION is not specified the image is mounted as a single partition.
Returns True if all requested operations completed without issue.
Raises an exception if a mandatory item can't be injected.
"""
LOG.debug(_("Inject data image=%(image)s key=%(key)s net=%(net)s "
"metadata=%(metadata)s admin_password=<SANITIZED> "
"files=%(files)s partition=%(partition)s use_cow=%(use_cow)s"),
{'image': image, 'key': key, 'net': net, 'metadata': metadata,
'files': files, 'partition': partition, 'use_cow': use_cow})
fmt = "raw"
if use_cow:
fmt = "qcow2"
try:
# Note(mrda): Test if the image exists first to short circuit errors
os.stat(image)
fs = vfs.VFS.instance_for_image(image, fmt, partition)
fs.setup()
except Exception as e:
# If a mandatory item is passed to this function,
# then reraise the exception to indicate the error.
for inject in mandatory:
inject_val = locals()[inject]
if inject_val:
raise
LOG.warn(_('Ignoring error injecting data into image '
'(%(e)s)'), {'e': e})
return False
try:
return inject_data_into_fs(fs, key, net, metadata,
admin_password, files, mandatory)
finally:
fs.teardown()
def setup_container(image, container_dir, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
Returns path of image device which is mounted to the container directory.
"""
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if not img.mount():
LOG.error(_("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s"),
{"image": img, "target": container_dir,
"errors": img.errors})
raise exception.NovaException(img.errors)
else:
return img.device
def teardown_container(container_dir, container_root_device=None):
"""Teardown the container rootfs mounting once it is spawned.
It will umount the container that is mounted,
and delete any linked devices.
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
img.teardown()
# Make sure container_root_device is released when teardown container.
if container_root_device:
if 'loop' in container_root_device:
LOG.debug(_("Release loop device %s"), container_root_device)
utils.execute('losetup', '--detach', container_root_device,
run_as_root=True, attempts=3)
else:
LOG.debug(_('Release nbd device %s'), container_root_device)
utils.execute('qemu-nbd', '-d', container_root_device,
run_as_root=True)
except Exception as exn:
LOG.exception(_('Failed to teardown container filesystem: %s'), exn)
def clean_lxc_namespace(container_dir):
"""Clean up the container namespace rootfs mounting one spawned.
It will umount the mounted names that are mounted
but leave the linked devices alone.
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
img.umount()
except Exception as exn:
LOG.exception(_('Failed to umount container filesystem: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
mandatory=()):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data.
If an item name is not specified in the MANDATORY iterable, then a warning
is logged on failure to inject that item, rather than raising an exception.
Returns True if all requested operations completed without issue.
Raises an exception if a mandatory item can't be injected.
"""
status = True
for inject in ('key', 'net', 'metadata', 'admin_password', 'files'):
inject_val = locals()[inject]
inject_func = globals()['_inject_%s_into_fs' % inject]
if inject_val:
try:
inject_func(inject_val, fs)
except Exception as e:
if inject in mandatory:
raise
LOG.warn(_('Ignoring error injecting %(inject)s into image '
'(%(e)s)'), {'e': e, 'inject': inject})
status = False
return status
def _inject_files_into_fs(files, fs):
for (path, contents) in files:
_inject_file_into_fs(fs, path, contents)
def _inject_file_into_fs(fs, path, contents, append=False):
LOG.debug(_("Inject file fs=%(fs)s path=%(path)s append=%(append)s"),
{'fs': fs, 'path': path, 'append': append})
if append:
fs.append_file(path, contents)
else:
fs.replace_file(path, contents)
def _inject_metadata_into_fs(metadata, fs):
LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s"),
{'fs': fs, 'metadata': metadata})
metadata = dict([(m['key'], m['value']) for m in metadata])
_inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
def _setup_selinux_for_keys(fs, sshdir):
"""Get selinux guests to ensure correct context on injected keys."""
if not fs.has_file(os.path.join("etc", "selinux")):
return
rclocal = os.path.join('etc', 'rc.local')
rc_d = os.path.join('etc', 'rc.d')
if not fs.has_file(rclocal) and fs.has_file(rc_d):
rclocal = os.path.join(rc_d, 'rc.local')
# Note some systems end rc.local with "exit 0"
# and so to append there you'd need something like:
# utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True)
restorecon = [
'\n',
'# Added by Nova to ensure injected ssh keys have the right context\n',
'restorecon -RF %s 2>/dev/null || :\n' % sshdir,
]
if not fs.has_file(rclocal):
restorecon.insert(0, '#!/bin/sh')
_inject_file_into_fs(fs, rclocal, ''.join(restorecon), append=True)
fs.set_permissions(rclocal, 0o700)
def _inject_key_into_fs(key, fs):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
LOG.debug(_("Inject key fs=%(fs)s key=%(key)s"), {'fs': fs, 'key': key})
sshdir = os.path.join('root', '.ssh')
fs.make_path(sshdir)
fs.set_ownership(sshdir, "root", "root")
fs.set_permissions(sshdir, 0o700)
keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
fs.set_permissions(keyfile, 0o600)
_setup_selinux_for_keys(fs, sshdir)
def _inject_net_into_fs(net, fs):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
LOG.debug(_("Inject key fs=%(fs)s net=%(net)s"), {'fs': fs, 'net': net})
netdir = os.path.join('etc', 'network')
fs.make_path(netdir)
fs.set_ownership(netdir, "root", "root")
fs.set_permissions(netdir, 0o744)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
def _inject_admin_password_into_fs(admin_passwd, fs):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
LOG.debug(_("Inject admin password fs=%(fs)s "
"admin_passwd=<SANITIZED>"), {'fs': fs})
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
os.close(fd)
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
passwd_path = os.path.join('etc', 'passwd')
shadow_path = os.path.join('etc', 'shadow')
passwd_data = fs.read_file(passwd_path)
shadow_data = fs.read_file(shadow_path)
new_shadow_data = _set_passwd(admin_user, admin_passwd,
passwd_data, shadow_data)
fs.replace_file(shadow_path, new_shadow_data)
def _generate_salt():
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
salt = 16 * ' '
return ''.join([random.choice(salt_set) for c in salt])
def _set_passwd(username, admin_passwd, passwd_data, shadow_data):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.NovaException(), IOError()
"""
if os.name == 'nt':
raise exception.NovaException(_('Not implemented on Windows'))
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = _generate_salt()
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
p_file = passwd_data.split("\n")
s_file = shadow_data.split("\n")
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.NovaException(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.NovaException(msg % username)
return "\n".join(new_shadow)
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pwd
import six
import sys
import traceback
import tempfile
from fabric.api import (put, run, sudo)
from fabric.context_managers import shell_env
from fabric.context_managers import settings
from fabric.tasks import WrappedCallableTask
from st2common import log as logging
from st2common.util.shell import quote_unix
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2common.exceptions.fabricrunner import FabricExecutionFailureException
import st2common.util.jsonify as jsonify
__all__ = [
'ShellCommandAction',
'ShellScriptAction',
'RemoteAction',
'RemoteScriptAction',
'ParamikoSSHCommandAction',
'FabricRemoteAction',
'FabricRemoteScriptAction'
]
LOG = logging.getLogger(__name__)
LOGGED_USER_USERNAME = pwd.getpwuid(os.getuid())[0]
class ShellCommandAction(object):
def __init__(self, name, action_exec_id, command, user, env_vars=None, sudo=False,
timeout=None, cwd=None):
self.name = name
self.action_exec_id = action_exec_id
self.command = command
self.env_vars = env_vars or {}
self.user = user
self.sudo = sudo
self.timeout = timeout
self.cwd = cwd
def get_full_command_string(self):
# Note: We pass -E to sudo because we want to preserve user provided
# environment variables
if self.sudo:
command = quote_unix(self.command)
command = 'sudo -E -- bash -c %s' % (command)
else:
if self.user and self.user != LOGGED_USER_USERNAME:
# Need to use sudo to run as a different user
user = quote_unix(self.user)
command = quote_unix(self.command)
command = 'sudo -E -u %s -- bash -c %s' % (user, command)
else:
command = self.command
return command
def _get_command_string(self, cmd, args):
"""
Escape the command arguments and form a command string.
:type cmd: ``str``
:type args: ``list``
:rtype: ``str``
"""
assert isinstance(args, (list, tuple))
args = [quote_unix(arg) for arg in args]
args = ' '.join(args)
result = '%s %s' % (cmd, args)
return result
def _get_error_result(self):
"""
Prepares a structured error result based on the exception.
:type e: ``Exception``
:rtype: ``dict``
"""
_, exc_value, exc_traceback = sys.exc_info()
is_fabric_failure = isinstance(exc_value, FabricExecutionFailureException)
exc_value = str(exc_value)
exc_traceback = ''.join(traceback.format_tb(exc_traceback))
if is_fabric_failure:
# Invalid authentication information
if 'get_transport().open_session()' in exc_traceback:
exc_value = 'Cannot connect to the server - invalid authentication info provided'
elif 'sudo password' in exc_value:
# sudo is not setup or it requires password
exc_value = 'Passwordless sudo needs to be setup for user: %s' % (self.user)
result = {}
result['failed'] = True
result['succeeded'] = False
result['error'] = exc_value
result['traceback'] = exc_traceback
return result
class ShellScriptAction(ShellCommandAction):
def __init__(self, name, action_exec_id, script_local_path_abs, named_args=None,
positional_args=None, env_vars=None, user=None, sudo=False, timeout=None,
cwd=None):
super(ShellScriptAction, self).__init__(name=name, action_exec_id=action_exec_id,
command=None, user=user, env_vars=env_vars,
sudo=sudo, timeout=timeout, cwd=cwd)
self.script_local_path_abs = script_local_path_abs
self.named_args = named_args
self.positional_args = positional_args
def get_full_command_string(self):
script_arguments = self._get_script_arguments(named_args=self.named_args,
positional_args=self.positional_args)
if self.sudo:
if script_arguments:
command = quote_unix('%s %s' % (self.script_local_path_abs, script_arguments))
else:
command = quote_unix(self.script_local_path_abs)
command = 'sudo -E -- bash -c %s' % (command)
else:
if self.user and self.user != LOGGED_USER_USERNAME:
# Need to use sudo to run as a different user
user = quote_unix(self.user)
if script_arguments:
command = quote_unix('%s %s' % (self.script_local_path_abs, script_arguments))
else:
command = quote_unix(self.script_local_path_abs)
command = 'sudo -E -u %s -- bash -c %s' % (user, command)
else:
script_path = quote_unix(self.script_local_path_abs)
if script_arguments:
command = '%s %s' % (script_path, script_arguments)
else:
command = script_path
return command
def _get_script_arguments(self, named_args=None, positional_args=None):
"""
Build a string of named and positional arguments which are passed to the
script.
:param named_args: Dictionary with named arguments.
:type named_args: ``dict``.
:param positional_args: List with positional arguments.
:type positional_args: ``dict``.
:rtype: ``str``
"""
command_parts = []
# add all named_args in the format <kwarg_op>name=value (e.g. --name=value)
if named_args is not None:
for (arg, value) in six.iteritems(named_args):
if value is None or (isinstance(value, (str, unicode)) and len(value) < 1):
LOG.debug('Ignoring arg %s as its value is %s.', arg, value)
continue
if isinstance(value, bool):
if value is True:
command_parts.append(arg)
else:
command_parts.append('%s=%s' % (arg, quote_unix(str(value))))
# add the positional args
if positional_args:
command_parts.append(positional_args)
return ' '.join(command_parts)
class SSHCommandAction(ShellCommandAction):
def __init__(self, name, action_exec_id, command, env_vars, user, password=None, pkey=None,
hosts=None, parallel=True, sudo=False, timeout=None, cwd=None):
super(SSHCommandAction, self).__init__(name=name, action_exec_id=action_exec_id,
command=command, env_vars=env_vars, user=user,
sudo=sudo, timeout=timeout, cwd=cwd)
self.hosts = hosts
self.parallel = parallel
self.pkey = pkey
self.password = password
def is_parallel(self):
return self.parallel
def is_sudo(self):
return self.sudo
def get_user(self):
return self.user
def get_hosts(self):
return self.hosts
def is_pkey_authentication(self):
return self.pkey is not None
def get_pkey(self):
return self.pkey
def get_password(self):
return self.password
def get_command(self):
return self.command
def __str__(self):
str_rep = []
str_rep.append('%s@%s(name: %s' % (self.__class__.__name__, id(self), self.name))
str_rep.append('id: %s' % self.action_exec_id)
str_rep.append('command: %s' % self.command)
str_rep.append('user: %s' % self.user)
str_rep.append('sudo: %s' % str(self.sudo))
str_rep.append('parallel: %s' % str(self.parallel))
str_rep.append('hosts: %s)' % str(self.hosts))
return ', '.join(str_rep)
class RemoteAction(SSHCommandAction):
def __init__(self, name, action_exec_id, command, env_vars=None, on_behalf_user=None,
user=None, password=None, private_key=None, hosts=None, parallel=True, sudo=False,
timeout=None, cwd=None):
super(RemoteAction, self).__init__(name=name, action_exec_id=action_exec_id,
command=command, env_vars=env_vars, user=user,
hosts=hosts, parallel=parallel, sudo=sudo,
timeout=timeout, cwd=cwd)
self.password = password
self.private_key = private_key
self.on_behalf_user = on_behalf_user # Used for audit purposes.
self.timeout = timeout
def get_on_behalf_user(self):
return self.on_behalf_user
def __str__(self):
str_rep = []
str_rep.append('%s@%s(name: %s' % (self.__class__.__name__, id(self), self.name))
str_rep.append('id: %s' % self.action_exec_id)
str_rep.append('command: %s' % self.command)
str_rep.append('user: %s' % self.user)
str_rep.append('on_behalf_user: %s' % self.on_behalf_user)
str_rep.append('sudo: %s' % str(self.sudo))
str_rep.append('parallel: %s' % str(self.parallel))
str_rep.append('hosts: %s)' % str(self.hosts))
return ', '.join(str_rep)
class RemoteScriptAction(ShellScriptAction):
def __init__(self, name, action_exec_id, script_local_path_abs, script_local_libs_path_abs,
named_args=None, positional_args=None, env_vars=None, on_behalf_user=None,
user=None, password=None, private_key=None, remote_dir=None, hosts=None,
parallel=True, sudo=False, timeout=None, cwd=None):
super(RemoteScriptAction, self).__init__(name=name, action_exec_id=action_exec_id,
script_local_path_abs=script_local_path_abs,
user=user,
named_args=named_args,
positional_args=positional_args, env_vars=env_vars,
sudo=sudo, timeout=timeout, cwd=cwd)
self.script_local_libs_path_abs = script_local_libs_path_abs
self.script_local_dir, self.script_name = os.path.split(self.script_local_path_abs)
self.remote_dir = remote_dir if remote_dir is not None else '/tmp'
self.remote_libs_path_abs = os.path.join(self.remote_dir, ACTION_LIBS_DIR)
self.on_behalf_user = on_behalf_user
self.password = password
self.private_key = private_key
self.remote_script = os.path.join(self.remote_dir, quote_unix(self.script_name))
self.hosts = hosts
self.parallel = parallel
self.command = self._format_command()
LOG.debug('RemoteScriptAction: command to run on remote box: %s', self.command)
def _format_command(self):
script_arguments = self._get_script_arguments(named_args=self.named_args,
positional_args=self.positional_args)
if script_arguments:
command = '%s %s' % (self.remote_script, script_arguments)
else:
command = self.remote_script
return command
def __str__(self):
str_rep = []
str_rep.append('%s@%s(name: %s' % (self.__class__.__name__, id(self), self.name))
str_rep.append('id: %s' % self.action_exec_id)
str_rep.append('local_script: %s' % self.script_local_path_abs)
str_rep.append('local_libs: %s' % self.script_local_libs_path_abs)
str_rep.append('remote_dir: %s' % self.remote_dir)
str_rep.append('remote_libs: %s' % self.remote_libs_path_abs)
str_rep.append('named_args: %s' % self.named_args)
str_rep.append('positional_args: %s' % self.positional_args)
str_rep.append('user: %s' % self.user)
str_rep.append('on_behalf_user: %s' % self.on_behalf_user)
str_rep.append('sudo: %s' % self.sudo)
str_rep.append('parallel: %s' % self.parallel)
str_rep.append('hosts: %s)' % self.hosts)
return ', '.join(str_rep)
class ParamikoSSHCommandAction(SSHCommandAction):
pass
class FabricRemoteAction(RemoteAction):
KEYS_TO_TRANSFORM = ['stdout', 'stderr']
def get_fabric_task(self):
action_method = self._get_action_method()
LOG.debug('action_method is %s', action_method)
task = WrappedCallableTask(action_method, name=self.name, alias=self.action_exec_id,
parallel=self.parallel, sudo=self.sudo)
# We need to explicitly set that since WrappedCallableTask abuses kwargs
# and doesn't do anything with "parallel" and "serial" kwarg.
# We also need to explicitly set serial since we default to
# parallel=True in the environment so just "parallel" won't do.
task.parallel = self.parallel
task.serial = not self.parallel
return task
def _get_action_method(self):
if (self.sudo):
return self._sudo
return self._run
def _run(self):
fabric_env_vars = self.env_vars
fabric_settings = self._get_settings()
try:
with shell_env(**fabric_env_vars), settings(**fabric_settings):
output = run(self.command, combine_stderr=False, pty=False, quiet=True)
except Exception:
LOG.exception('Failed executing remote action.')
result = self._get_error_result()
else:
result = {
'stdout': output.stdout,
'stderr': output.stderr,
'return_code': output.return_code,
'succeeded': output.succeeded,
'failed': output.failed
}
finally:
self._cleanup(settings=fabric_settings)
return jsonify.json_loads(result, FabricRemoteAction.KEYS_TO_TRANSFORM)
def _sudo(self):
fabric_env_vars = self.env_vars
fabric_settings = self._get_settings()
try:
with shell_env(**fabric_env_vars), settings(**fabric_settings):
output = sudo(self.command, combine_stderr=False, pty=True, quiet=True)
except Exception:
LOG.exception('Failed executing remote action.')
result = self._get_error_result()
else:
result = {
'stdout': output.stdout,
'stderr': output.stderr,
'return_code': output.return_code,
'succeeded': output.succeeded,
'failed': output.failed
}
finally:
self._cleanup(settings=fabric_settings)
# XXX: For sudo, fabric requires to set pty=True. This basically combines stdout and
# stderr into a single stdout stream. So if the command fails, we explictly set stderr
# to stdout and stdout to ''.
if result['failed'] and result.get('stdout', None):
result['stderr'] = result['stdout']
result['stdout'] = ''
return jsonify.json_loads(result, FabricRemoteAction.KEYS_TO_TRANSFORM)
def _get_settings(self):
"""
Retrieve settings used for the fabric command execution.
"""
settings = {
'user': self.user,
'command_timeout': self.timeout,
'cwd': self.cwd
}
if self.password:
settings['password'] = self.password
if self.private_key:
# Fabric doesn't support passing key as string so we need to write
# it to a temporary file
key_file_path = self._write_private_key(private_key_material=self.private_key)
settings['key_filename'] = key_file_path
return settings
def _get_env_vars(self):
"""
Retrieve environment variables used for the fabric command execution.
"""
env_vars = self.env_vars or {}
return env_vars
def _cleanup(self, settings):
"""
Clean function which is ran after executing a fabric command.
:param settings: Fabric settings.
"""
temporary_key_file_path = settings.get('key_filename', None)
if temporary_key_file_path:
self._remove_private_key_file(file_path=temporary_key_file_path)
def _write_private_key(self, private_key_material):
"""
Write private key to a temporary file and return path to the file.
"""
_, key_file_path = tempfile.mkstemp()
with open(key_file_path, 'w') as fp:
fp.write(private_key_material)
return key_file_path
def _remove_private_key_file(self, file_path):
"""
Remove private key file if temporary private key is used to log in.
"""
if not file_path or '/tmp' not in file_path:
return False
try:
os.remove(file_path)
except Exception:
pass
return True
class FabricRemoteScriptAction(RemoteScriptAction, FabricRemoteAction):
def get_fabric_task(self):
return self._get_script_action_method()
def _get_script_action_method(self):
task = WrappedCallableTask(self._run_script_with_settings, name=self.name,
alias=self.action_exec_id, parallel=self.parallel,
sudo=self.sudo)
task.parallel = self.parallel
task.serial = not self.parallel
return task
def _run_script_with_settings(self):
fabric_env_vars = self.env_vars
fabric_settings = self._get_settings()
with shell_env(**fabric_env_vars), settings(**fabric_settings):
return self._run_script()
def _run_script(self):
try:
self._execute_remote_command('mkdir %s' % self.remote_dir)
# Copy script.
output_put = self._put(self.script_local_path_abs,
mirror_local_mode=False, mode=0744)
if output_put.get('failed'):
return output_put
# Copy libs.
if self.script_local_libs_path_abs and os.path.exists(self.script_local_libs_path_abs):
output_put_libs = self._put(self.script_local_libs_path_abs)
if output_put_libs.get('failed'):
return output_put_libs
# Execute action.
action_method = self._get_action_method()
result = action_method()
# Cleanup.
cmd1 = self._get_command_string(cmd='rm -f', args=[self.remote_script])
cmd2 = self._get_command_string(cmd='rm -rf', args=[self.remote_dir])
self._execute_remote_command(cmd1)
self._execute_remote_command(cmd2)
except Exception:
LOG.exception('Failed executing remote action.')
result = self._get_error_result()
return result
def _get_command_string(self, cmd, args):
"""
Escape the command arguments and form a command string.
:type cmd: ``str``
:type args: ``list``
:rtype: ``str``
"""
assert isinstance(args, (list, tuple))
args = [quote_unix(arg) for arg in args]
args = ' '.join(args)
result = '%s %s' % (cmd, args)
return result
def _execute_remote_command(self, command):
action_method = sudo if self.sudo else run
output = action_method(command, combine_stderr=False, pty=False, quiet=True)
if output.failed:
msg = 'Remote command %s failed.' % command
# XXX: Note Fabric doesn't handle unicode correctly if stdout or stderr contains
# unicode and action fails. For now, just log stdout and stderr so we can debug
# from logs.
# Fabric will show an exception traceback like:
# 'ascii' codec can't encode character u'\u2018' in position 93:
# ordinal not in range(128)
#
LOG.error('stderr: %s', output.stderr)
LOG.error('stdout: %s', output.stdout)
LOG.error(msg)
raise Exception(msg)
LOG.debug('Remote command %s succeeded.', command)
return True
def _put(self, file_or_dir, mirror_local_mode=True, mode=None):
output = put(file_or_dir, self.remote_dir, use_sudo=self.sudo,
mirror_local_mode=mirror_local_mode, mode=mode)
result = {
'succeeded': output.succeeded,
'failed': output.failed
}
if output.failed:
msg = 'Failed copying %s to %s on remote box' % (file_or_dir, self.remote_dir)
LOG.error(msg)
result['error'] = msg
return result
| |
from __future__ import print_function
from ply import lex, yacc
reserved_list = [
'begin',
'end',
'parameters',
'molecule_types',
'species',
'reaction_rules',
'observables',
]
reserved = dict((r, r.upper()) for r in reserved_list)
tokens = [
'ID',
'FLOAT',
'INTEGER',
'COMMA',
'PLUS',
'TILDE',
'EXCLAMATION',
'QUESTION',
'PERIOD',
'IRRARROW',
'REVARROW',
'LPAREN',
'RPAREN',
'NEWLINE',
] + list(reserved.values())
t_COMMA = r','
t_PLUS = r'\+'
t_TILDE = r'~'
t_EXCLAMATION = '!'
t_QUESTION = '\?'
t_PERIOD = '\.'
t_IRRARROW = r'-->'
t_REVARROW = r'<->'
t_LPAREN = r'\('
t_RPAREN = r'\)'
# Define a rule so we can track line numbers
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += 1
return t
def t_FLOAT(t):
r'[+-]?(\d*\.\d+([eE][+-]?\d+)?|\d+[eE][+-]?\d+)'
try:
t.value = float(t.value)
except ValueError:
print("Line %d: Number '%s' has some kind of problem (ValueError)!" % (t.lineno,t.value))
t.value = float("nan")
return t
def t_INTEGER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Line %d: Number '%s' has some kind of problem (ValueError)!" % (t.lineno,t.value))
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # check for reserved words
return t
# Match and ignore comments (# to end of line)
def t_comment(t):
r'\#[^\n]*'
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s' on line %d" % (t.value[0], t.lineno))
t.lexer.skip(1)
#from toymodels import Model, Species, RuleReversible, RuleIrreversible
def list_helper(p):
if len(p) == 1:
p[0] = []
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 3:
p[0] = p[1] + [p[2]]
p[0] = [v for v in p[0] if v != None] # filter out Nones
def p_model(p):
'model : block_list'
p[0] = p[1]
print("model:", p[0])
def p_block_list(p):
'''block_list : block_list block
| block'''
list_helper(p)
def p_block(p):
'''block : parameter_block
| molecule_type_block
| species_block
| reaction_rules_block
| observables_block'''
p[0] = p[1]
def p_block_empty(p):
'''block : NEWLINE'''
def p_parameter_block(p):
'parameter_block : BEGIN PARAMETERS NEWLINE parameter_st_list END PARAMETERS NEWLINE'
p[0] = p[4]
print("block:", p[2])
def p_molecule_type_block(p):
'molecule_type_block : BEGIN MOLECULE_TYPES NEWLINE END MOLECULE_TYPES NEWLINE'
p[0] = p[2]
print("block:", p[2])
def p_species_block(p):
'species_block : BEGIN SPECIES NEWLINE END SPECIES NEWLINE'
p[0] = p[2]
print("block:", p[2])
def p_reaction_rules_block(p):
'reaction_rules_block : BEGIN REACTION_RULES NEWLINE END REACTION_RULES NEWLINE'
p[0] = p[2]
print("block:", p[2])
def p_observables_block(p):
'observables_block : BEGIN OBSERVABLES NEWLINE END OBSERVABLES NEWLINE'
p[0] = p[2]
print("block:", p[2])
def p_parameter_st_list(p):
'''parameter_st_list : parameter_st_list parameter_st
| parameter_st
| '''
list_helper(p)
def p_parameter_st(p):
'''parameter_st : INTEGER ID number NEWLINE
| NEWLINE'''
if len(p) > 2:
p[0] = p[1:4]
def p_number(p):
'''number : FLOAT
| INTEGER'''
p[0] = p[1]
# def p_statement_list(p):
# '''statement_list : statement_list statement'''
# p[0] = p[1] + [p[2]]
# #print("statement_list:", p[0])
# def p_statement_list_trivial(p):
# '''statement_list : statement'''
# p[0] = [p[1]]
# #print("statement_list_trivial:", p[0])
# def p_statement_empty(p):
# 'statement : NEWLINE'
# #print("statement_empty:", p[0])
# def p_statement(p):
# 'statement : rule NEWLINE'
# p[0] = p[1]
# #print("statement:", p[0])
# def p_rule(p):
# '''rule : irr_rule
# | rev_rule'''
# p[0] = p[1]
# #print("rule:", p[0])
# def p_irr_rule(p):
# 'irr_rule : expression IRRARROW expression LPAREN FLOAT RPAREN'
# #print("irr_rule")
# p[0] = RuleIrreversible(reactants=p[1], products=p[3], rate=p[5])
# def p_rev_rule(p):
# 'rev_rule : expression REVARROW expression LPAREN FLOAT COMMA FLOAT RPAREN'
# #print("rev_rule")
# p[0] = RuleReversible(reactants=p[1], products=p[3], rates=[p[5], p[7]])
# def p_expression_plus(p):
# 'expression : expression PLUS expression'
# p[0] = p[1] + p[3]
# #print("expression_plus:", p[0])
# def p_expression_species(p):
# 'expression : SPECIES'
# #print("expression_species:", p[1])
# p[0] = [Species(name=p[1])]
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input:")
print(p)
precedence = (
('left', 'PLUS'),
)
# Build the parser
lex.lex()
yacc.yacc(write_tables=0)
def parse(*args, **kwargs):
yacc.parse(*args, **kwargs)
| |
from datetime import date, timedelta
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.template.loader import get_template
from django.utils.decorators import method_decorator
from django.views.generic import (
MonthArchiveView, YearArchiveView, CreateView, DeleteView, DetailView, ListView, TemplateView
)
from stronghold.decorators import public
from status.models import Incident, IncidentUpdate
from status.forms import IncidentCreateForm, IncidentUpdateCreateForm
import slack
import slack.chat
import logging
logger = logging.getLogger(__name__)
def send_to_slack(message, channel='engineering', username='statusbot', emoji=':statusbot:', override_debug=False):
slack.api_token = settings.SLACK_TOKEN
if settings.DEBUG and not override_debug:
logger.info('Diverting from %s to dev while in debug mode as %s: %s' % (channel, username, message))
slack.chat.post_message('dev', 'DEBUG: ' + message, username=username, icon_emoji=emoji)
else:
logger.info('Sending to channel %s as %s: %s' % (channel, username, message))
slack.chat.post_message(channel, message, username=username, icon_emoji=emoji)
def create_incident(request):
if request.method == 'POST':
form = IncidentCreateForm(request.POST)
form2 = IncidentUpdateCreateForm(request.POST)
if form.is_valid() and form2.is_valid():
i = form.save(commit=False)
i.user = request.user
print i
i.save()
f = form2.save(commit=False)
f.incident = i
f.user = request.user
f.save()
if settings.SLACK_CHANNEL and settings.SLACK_TOKEN:
if len(f.description) > 50:
description = f.description[:50] + '...'
else:
description = f.description
try:
message = "<https://%s%s|%s> (%s): %s" % (
get_current_site(request),
reverse('status:incident_detail', args=[i.pk, ]),
i.name,
f.status.name,
description
)
send_to_slack(message, username=settings.SLACK_USERNAME, channel=settings.SLACK_CHANNEL)
except Exception as e:
logger.warn('Unable to send to slack: %s' % (e))
return HttpResponseRedirect('/')
else:
form = IncidentCreateForm()
form2 = IncidentUpdateCreateForm()
request_context = RequestContext(request)
request_context.push({'form': form, 'form2': form2})
t = get_template('status/incident_create_form.html')
rendered_template = t.render(request_context.flatten(), request)
return HttpResponse(rendered_template)
#return get_template('status/incident_create_form.html').render(request_context.flatten(), request)
#return render(request, template_name='status/incident_create_form.html', context=request_context)
class DashboardView(ListView):
model = Incident
def get_queryset(self):
return Incident.objects.exclude(hidden=True)
class HiddenDashboardView(ListView):
model = Incident
class IncidentHideView(DeleteView):
model = Incident
template_name = 'status/incident_hide.html'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden = True
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('status:dashboard')
class IncidentDeleteView(DeleteView):
model = Incident
def get_success_url(self):
return reverse('status:dashboard')
class IncidentUpdateUpdateView(CreateView):
model = IncidentUpdate
form_class = IncidentUpdateCreateForm
template_name = 'status/incident_form.html'
def get_success_url(self):
return reverse('status:incident_detail', args=[self.kwargs['pk']])
def form_valid(self, form):
iu = form.save(commit=False)
i = Incident.objects.get(pk=self.kwargs['pk'])
i.hidden = False
i.save()
iu.incident = i
iu.incident.hidden = False
iu.incident.save()
iu.user = self.request.user
iu.save()
return HttpResponseRedirect(self.get_success_url())
class IncidentDetailView(DetailView):
model = Incident
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IncidentDetailView, self).get_context_data(**kwargs)
context.update({
'form': IncidentUpdateCreateForm(),
})
return context
class IncidentArchiveYearView(YearArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveYearView, self).dispatch(*args, **kwargs)
class IncidentArchiveMonthView(MonthArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
month_format = '%m'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveMonthView, self).dispatch(*args, **kwargs)
class HomeView(TemplateView):
http_method_names = ['get', ]
template_name = 'status/home.html'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(HomeView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
incident_list = Incident.objects.filter(hidden=False).order_by('-updated')
context.update({
'incident_list': incident_list
})
if hasattr(settings, 'STATUS_TICKET_URL'):
context.update({'STATUS_TICKET_URL': settings.STATUS_TICKET_URL})
if hasattr(settings, 'STATUS_LOGO_URL'):
context.update({'STATUS_LOGO_URL': settings.STATUS_LOGO_URL})
if hasattr(settings, 'STATUS_TITLE'):
context.update({'STATUS_TITLE': settings.STATUS_TITLE})
status_level = 'success'
for incident in incident_list:
try:
if incident.get_latest_update().status.type == 'danger':
status_level = 'danger'
break
elif incident.get_latest_update().status.type == 'warning':
status_level = 'warning'
elif incident.get_latest_update().status.type == 'info' and not status_level == 'warning':
status_level = 'info'
except AttributeError:
# Unable to get_latest_update(), 'None' has no .status
pass
context.update({
'status_level': status_level
})
return context
| |
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import MLP
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc.ext import compile_function
from rllab.optimizers.lbfgs_optimizer import LbfgsOptimizer
from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from rllab.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.misc.ext import iterate_minibatches_generic
class GaussianMLPRegressor(LasagnePowered):
"""
A class for performing regression by fitting a Gaussian distribution to the outputs.
"""
def __init__(
self,
input_shape,
output_dim,
mean_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
optimizer=None,
use_trust_region=True,
step_size=0.01,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
name=None,
batchsize=None,
subsample_factor=1.,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
`std_share_network` is False. It defaults to the same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
is False. It defaults to the same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
self._batchsize = batchsize
self._subsample_factor = subsample_factor
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self._optimizer = optimizer
if mean_network is None:
mean_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix("ys")
old_means_var = TT.matrix("old_means")
old_log_stds_var = TT.matrix("old_log_stds")
x_mean_var = theano.shared(
np.zeros((1,) + input_shape, dtype=theano.config.floatX),
name="x_mean",
broadcastable=(True,) + (False,) * len(input_shape)
)
x_std_var = theano.shared(
np.ones((1,) + input_shape, dtype=theano.config.floatX),
name="x_std",
broadcastable=(True,) + (False,) * len(input_shape)
)
y_mean_var = theano.shared(
np.zeros((1, output_dim), dtype=theano.config.floatX),
name="y_mean",
broadcastable=(True, False)
)
y_std_var = theano.shared(
np.ones((1, output_dim), dtype=theano.config.floatX),
name="y_std",
broadcastable=(True, False)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(
l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(
l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - TT.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(
mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(dist.kl_sym(
dict(mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - \
TT.mean(dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [
xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {
self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags)
| |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import pickle
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
import tests.helpers.utils as tutils
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import (
CometLogger,
CSVLogger,
MLFlowLogger,
NeptuneLogger,
TensorBoardLogger,
TestTubeLogger,
WandbLogger,
)
from pytorch_lightning.loggers.base import DummyExperiment
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
from tests.loggers.test_comet import _patch_comet_atexit
from tests.loggers.test_mlflow import mock_mlflow_run_creation
def _get_logger_args(logger_class, save_dir):
logger_args = {}
if "save_dir" in inspect.getfullargspec(logger_class).args:
logger_args.update(save_dir=str(save_dir))
if "offline_mode" in inspect.getfullargspec(logger_class).args:
logger_args.update(offline_mode=True)
if "offline" in inspect.getfullargspec(logger_class).args:
logger_args.update(offline=True)
return logger_args
def _instantiate_logger(logger_class, save_dir, **override_kwargs):
args = _get_logger_args(logger_class, save_dir)
args.update(**override_kwargs)
logger = logger_class(**args)
return logger
def test_loggers_fit_test_all(tmpdir, monkeypatch):
"""Verify that basic functionality of all loggers."""
_test_loggers_fit_test(tmpdir, TensorBoardLogger)
with mock.patch("pytorch_lightning.loggers.comet.comet_ml"), mock.patch(
"pytorch_lightning.loggers.comet.CometOfflineExperiment"
):
_patch_comet_atexit(monkeypatch)
_test_loggers_fit_test(tmpdir, CometLogger)
with mock.patch("pytorch_lightning.loggers.mlflow.mlflow"), mock.patch(
"pytorch_lightning.loggers.mlflow.MlflowClient"
):
_test_loggers_fit_test(tmpdir, MLFlowLogger)
with mock.patch("pytorch_lightning.loggers.neptune.neptune"):
_test_loggers_fit_test(tmpdir, NeptuneLogger)
with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"):
_test_loggers_fit_test(tmpdir, TestTubeLogger)
with mock.patch("pytorch_lightning.loggers.wandb.wandb") as wandb:
wandb.run = None
wandb.init().step = 0
_test_loggers_fit_test(tmpdir, WandbLogger)
def _test_loggers_fit_test(tmpdir, logger_class):
class CustomModel(BoringModel):
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("train_some_val", loss)
return {"loss": loss}
def validation_epoch_end(self, outputs) -> None:
avg_val_loss = torch.stack([x["x"] for x in outputs]).mean()
self.log_dict({"early_stop_on": avg_val_loss, "val_loss": avg_val_loss ** 0.5})
def test_epoch_end(self, outputs) -> None:
avg_test_loss = torch.stack([x["y"] for x in outputs]).mean()
self.log("test_loss", avg_test_loss)
class StoreHistoryLogger(logger_class):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.history = []
def log_metrics(self, metrics, step):
super().log_metrics(metrics, step)
self.history.append((step, metrics))
logger_args = _get_logger_args(logger_class, tmpdir)
logger = StoreHistoryLogger(**logger_args)
if logger_class == WandbLogger:
# required mocks for Trainer
logger.experiment.id = "foo"
logger.experiment.project_name.return_value = "bar"
if logger_class == CometLogger:
logger.experiment.id = "foo"
logger.experiment.project_name = "bar"
if logger_class == TestTubeLogger:
logger.experiment.version = "foo"
logger.experiment.name = "bar"
if logger_class == MLFlowLogger:
logger = mock_mlflow_run_creation(logger, experiment_id="foo", run_id="bar")
model = CustomModel()
trainer = Trainer(
max_epochs=1,
logger=logger,
limit_train_batches=1,
limit_val_batches=1,
log_every_n_steps=1,
default_root_dir=tmpdir,
)
trainer.fit(model)
trainer.test()
log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
if logger_class == TensorBoardLogger:
expected = [
(0, ["hp_metric"]),
(0, ["epoch", "train_some_val"]),
(0, ["early_stop_on", "epoch", "val_loss"]),
(0, ["hp_metric"]),
(1, ["epoch", "test_loss"]),
]
assert log_metric_names == expected
else:
expected = [
(0, ["epoch", "train_some_val"]),
(0, ["early_stop_on", "epoch", "val_loss"]),
(1, ["epoch", "test_loss"]),
]
assert log_metric_names == expected
def test_loggers_save_dir_and_weights_save_path_all(tmpdir, monkeypatch):
"""Test the combinations of save_dir, weights_save_path and default_root_dir."""
_test_loggers_save_dir_and_weights_save_path(tmpdir, TensorBoardLogger)
with mock.patch("pytorch_lightning.loggers.comet.comet_ml"), mock.patch(
"pytorch_lightning.loggers.comet.CometOfflineExperiment"
):
_patch_comet_atexit(monkeypatch)
_test_loggers_save_dir_and_weights_save_path(tmpdir, CometLogger)
with mock.patch("pytorch_lightning.loggers.mlflow.mlflow"), mock.patch(
"pytorch_lightning.loggers.mlflow.MlflowClient"
):
_test_loggers_save_dir_and_weights_save_path(tmpdir, MLFlowLogger)
with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"):
_test_loggers_save_dir_and_weights_save_path(tmpdir, TestTubeLogger)
with mock.patch("pytorch_lightning.loggers.wandb.wandb"):
_test_loggers_save_dir_and_weights_save_path(tmpdir, WandbLogger)
def _test_loggers_save_dir_and_weights_save_path(tmpdir, logger_class):
class TestLogger(logger_class):
# for this test it does not matter what these attributes are
# so we standardize them to make testing easier
@property
def version(self):
return "version"
@property
def name(self):
return "name"
model = BoringModel()
trainer_args = dict(default_root_dir=tmpdir, max_steps=1)
# no weights_save_path given
save_dir = tmpdir / "logs"
weights_save_path = None
logger = TestLogger(**_get_logger_args(TestLogger, save_dir))
trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer.weights_save_path == trainer.default_root_dir
assert trainer.checkpoint_callback.dirpath == os.path.join(logger.save_dir, "name", "version", "checkpoints")
assert trainer.default_root_dir == tmpdir
# with weights_save_path given, the logger path and checkpoint path should be different
save_dir = tmpdir / "logs"
weights_save_path = tmpdir / "weights"
logger = TestLogger(**_get_logger_args(TestLogger, save_dir))
trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer.weights_save_path == weights_save_path
assert trainer.logger.save_dir == save_dir
assert trainer.checkpoint_callback.dirpath == weights_save_path / "name" / "version" / "checkpoints"
assert trainer.default_root_dir == tmpdir
# no logger given
weights_save_path = tmpdir / "weights"
trainer = Trainer(**trainer_args, logger=False, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer.weights_save_path == weights_save_path
assert trainer.checkpoint_callback.dirpath == weights_save_path / "checkpoints"
assert trainer.default_root_dir == tmpdir
@pytest.mark.parametrize(
"logger_class",
[
CometLogger,
CSVLogger,
MLFlowLogger,
NeptuneLogger,
TensorBoardLogger,
TestTubeLogger,
# The WandbLogger gets tested for pickling in its own test.
],
)
def test_loggers_pickle_all(tmpdir, monkeypatch, logger_class):
"""Test that the logger objects can be pickled. This test only makes sense if the packages are installed."""
_patch_comet_atexit(monkeypatch)
try:
_test_loggers_pickle(tmpdir, monkeypatch, logger_class)
except (ImportError, ModuleNotFoundError):
pytest.xfail(f"pickle test requires {logger_class.__class__} dependencies to be installed.")
def _test_loggers_pickle(tmpdir, monkeypatch, logger_class):
"""Verify that pickling trainer with logger works."""
_patch_comet_atexit(monkeypatch)
logger_args = _get_logger_args(logger_class, tmpdir)
logger = logger_class(**logger_args)
# this can cause pickle error if the experiment object is not picklable
# the logger needs to remove it from the state before pickle
_ = logger.experiment
# test pickling loggers
pickle.dumps(logger)
trainer = Trainer(max_epochs=1, logger=logger)
pkl_bytes = pickle.dumps(trainer)
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
# make sure we restord properly
assert trainer2.logger.name == logger.name
assert trainer2.logger.save_dir == logger.save_dir
@pytest.mark.parametrize(
"extra_params",
[
pytest.param(dict(max_epochs=1, auto_scale_batch_size=True), id="Batch-size-Finder"),
pytest.param(dict(max_epochs=3, auto_lr_find=True), id="LR-Finder"),
],
)
def test_logger_reset_correctly(tmpdir, extra_params):
"""Test that the tuners do not alter the logger reference"""
class CustomModel(BoringModel):
def __init__(self, lr=0.1, batch_size=1):
super().__init__()
self.save_hyperparameters()
tutils.reset_seed()
model = CustomModel()
trainer = Trainer(default_root_dir=tmpdir, **extra_params)
logger1 = trainer.logger
trainer.tune(model)
logger2 = trainer.logger
logger3 = model.logger
assert logger1 == logger2, "Finder altered the logger of trainer"
assert logger2 == logger3, "Finder altered the logger of model"
class RankZeroLoggerCheck(Callback):
# this class has to be defined outside the test function, otherwise we get pickle error
# due to the way ddp process is launched
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
is_dummy = isinstance(trainer.logger.experiment, DummyExperiment)
if trainer.is_global_zero:
assert not is_dummy
else:
assert is_dummy
assert pl_module.logger.experiment.something(foo="bar") is None
@pytest.mark.parametrize(
"logger_class", [CometLogger, CSVLogger, MLFlowLogger, NeptuneLogger, TensorBoardLogger, TestTubeLogger]
)
@RunIf(skip_windows=True)
def test_logger_created_on_rank_zero_only(tmpdir, monkeypatch, logger_class):
"""Test that loggers get replaced by dummy loggers on global rank > 0"""
_patch_comet_atexit(monkeypatch)
try:
_test_logger_created_on_rank_zero_only(tmpdir, logger_class)
except (ImportError, ModuleNotFoundError):
pytest.xfail(f"multi-process test requires {logger_class.__class__} dependencies to be installed.")
def _test_logger_created_on_rank_zero_only(tmpdir, logger_class):
logger_args = _get_logger_args(logger_class, tmpdir)
logger = logger_class(**logger_args)
model = BoringModel()
trainer = Trainer(
logger=logger,
default_root_dir=tmpdir,
accelerator="ddp_cpu",
num_processes=2,
max_steps=1,
checkpoint_callback=True,
callbacks=[RankZeroLoggerCheck()],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_logger_with_prefix_all(tmpdir, monkeypatch):
"""
Test that prefix is added at the beginning of the metric keys.
"""
prefix = "tmp"
# Comet
with mock.patch("pytorch_lightning.loggers.comet.comet_ml"), mock.patch(
"pytorch_lightning.loggers.comet.CometOfflineExperiment"
):
_patch_comet_atexit(monkeypatch)
logger = _instantiate_logger(CometLogger, save_dir=tmpdir, prefix=prefix)
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.log_metrics.assert_called_once_with({"tmp-test": 1.0}, epoch=None, step=0)
# MLflow
with mock.patch("pytorch_lightning.loggers.mlflow.mlflow"), mock.patch(
"pytorch_lightning.loggers.mlflow.MlflowClient"
):
logger = _instantiate_logger(MLFlowLogger, save_dir=tmpdir, prefix=prefix)
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.log_metric.assert_called_once_with(ANY, "tmp-test", 1.0, ANY, 0)
# Neptune
with mock.patch("pytorch_lightning.loggers.neptune.neptune"):
logger = _instantiate_logger(NeptuneLogger, save_dir=tmpdir, prefix=prefix)
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.log_metric.assert_called_once_with("tmp-test", 1.0)
# TensorBoard
with mock.patch("pytorch_lightning.loggers.tensorboard.SummaryWriter"):
logger = _instantiate_logger(TensorBoardLogger, save_dir=tmpdir, prefix=prefix)
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.add_scalar.assert_called_once_with("tmp-test", 1.0, 0)
# TestTube
with mock.patch("pytorch_lightning.loggers.test_tube.Experiment"):
logger = _instantiate_logger(TestTubeLogger, save_dir=tmpdir, prefix=prefix)
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.log.assert_called_once_with({"tmp-test": 1.0}, global_step=0)
# WandB
with mock.patch("pytorch_lightning.loggers.wandb.wandb") as wandb:
logger = _instantiate_logger(WandbLogger, save_dir=tmpdir, prefix=prefix)
wandb.run = None
wandb.init().step = 0
logger.log_metrics({"test": 1.0}, step=0)
logger.experiment.log.assert_called_once_with({"tmp-test": 1.0, "trainer/global_step": 0})
| |
import os
from functools import reduce
import rethinkdb as r
from rethinkdb.errors import RqlDriverError
from flask import Flask, g, request, abort, render_template, flash, redirect, url_for, make_response
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, login_required, confirm_login, logout_user, login_user, current_user
from flask_misaka import markdown
from waitress import serve
from copy import copy
from forms import LoginForm, SignupForm
from models import User, Card, Collection, UserCard, CardSet, CardSetOrdering
from services.color_service import get_colors
from const import RARITY_HASH, NORMALIZED_REPLACE_PAIRS
from services.fulltext_service import check_and_add_fulltext_search_field, refresh_fulltext_search_field
RDB_HOST = os.environ.get('RDB_HOST') or 'localhost'
RDB_PORT = os.environ.get('RDB_PORT') or 28015
PKMN_DB = 'pkmntcgdb'
app = Flask(__name__)
Bootstrap(app)
app.config.from_object('config')
login_manager = LoginManager()
login_manager.init_app(app)
CARDS_PER_PAGE = 30
def _get_color_gradients(count):
return get_colors(2 * count)
def _card_text_for_edition_type(edition_type):
return edition_type.replace('-', ' ').replace('_', ' ').capitalize()
def _total_number_of_cards_for_user(user_id):
return UserCard.filter({'user_id': user_id}).sum('count')
def _total_number_of_cards_for_user_with_collection(user_id, collection_id):
return UserCard.filter({'user_id': user_id, 'collection_id': collection_id}).sum('count')
def _user_collections(user_id):
return Collection.filter({'user_id': user_id}).order_by('created_at').run()
def _selected_collection_for_user(user_id):
return Collection.get(User.get(user_id).selected_collection) if user_id else None
def _user_card_primary_id(user_id, card_id, collection_id, edition):
return UserCard.uuid('{}_{}_{}_{}'.format(user_id, card_id, collection_id, edition))
def _set_editions_in_order(normalized_set_name):
card_set = CardSet.find_first({'normalized_set_name': normalized_set_name})
return card_set.editions.keys()
def _user_set_editions_in_order(normalized_set_name):
card_set = CardSet.find_first({'normalized_set_name': normalized_set_name})
return [_card_text_for_edition_type(edition) for edition in card_set.editions]
def _get_user_cards(user_id, page):
cards = Card.get_all(*list(UserCard.filter({'user_id': user_id}).filter(r.row['count'] > 0).get_field('card_id').run()))
total_pages = ((copy(cards).count().run() - 1) // CARDS_PER_PAGE) + 1
cards = cards.skip(CARDS_PER_PAGE * (page - 1))
cards = cards.limit(CARDS_PER_PAGE).run()
cards = list(cards)
return cards, page, str(total_pages)
def _get_user_cards_data(user_id, collection_id):
if collection_id:
user_hash = {'user_id': user_id, 'collection_id': collection_id}
else:
user_hash = {'user_id': user_id}
user_card_query = UserCard.filter(user_hash).filter(r.row['count'] > 0).pluck(
'set_code', 'edition_superset', 'card_id'
).distinct().eq_join('card_id', Card.table()).without({'right': 'id'}).zip()
card_edition_data = copy(user_card_query).group('set_code', 'edition_superset').count()
total_cards_data = user_card_query.group('set_code').pluck('number').distinct().count()
return card_edition_data, total_cards_data
def _get_collection_id(user_id, collection_name):
if not collection_name:
return None
return Collection.find_first({'user_id': user_id, 'name': collection_name}).get_id() or None
def _get_collection_data(user_id, collection_name):
# list of set data, sorted by set number
# for each set, I want a list of editions + a special 'collection' list
# each of these editions need a color, current_number, total_number and name of the edition
collection_id = _get_collection_id(user_id, collection_name)
card_edition_data, total_cards_data = _get_user_cards_data(user_id, collection_id)
sets = {}
for set_card_edition in card_edition_data:
set_code, edition = set_card_edition
count = card_edition_data[set_card_edition]
if set_code not in sets:
sets[set_code] = {}
sets[set_code][edition] = count
if not total_cards_data:
return {'sets': [], 'ordered_sets': [],
'collection_name': collection_name,
'user_id': user_id, 'username': User.get(user_id).username}
for set_code in total_cards_data:
sets[set_code]['total_cards'] = total_cards_data[set_code]
for card_set in CardSet.filter(lambda cs: cs['code'].match('|'.join(sets.keys()))).run():
if card_set['code'] not in sets.keys():
continue
sets[card_set['code']]['set_total_cards'] = card_set['total_cards']
sets[card_set['code']]['raw_data'] = card_set
for edition in card_set['editions']:
sets[card_set['code']]['set_{}'.format(edition)] = card_set['editions'][edition]
ordered_sets = [set_code for set_code in list(CardSetOrdering.ordered_sets(1)) if set_code in sets.keys()]
sets = {k[0]: k[1] for k in filter(lambda kv: 'raw_data' in kv[1], sets.items())}
for set_code in sets:
card_set_hash = sets[set_code]
sets[set_code]['normalized_card_set'] = card_set_hash['raw_data']['normalized_set_name']
sets[set_code]['card_set'] = card_set_hash['raw_data']['set_name']
number_of_editions = len(card_set_hash['raw_data']['editions']) + 1
colors = _get_color_gradients(number_of_editions)
card_set_data = 'Set cards#{}#{}#{}#{}'.format(card_set_hash['total_cards'], card_set_hash['set_total_cards'], colors[0], colors[1])
set_lines = [('Set cards', card_set_hash['total_cards'], card_set_hash['set_total_cards'], colors[0])]
current_color = 0
for edition in card_set_hash['raw_data']['editions']:
current_color += 1
card_set_data += '${}#{}#{}#{}#{}'.format(_card_text_for_edition_type(edition),
card_set_hash[edition] if edition in card_set_hash else 0,
card_set_hash['set_{}'.format(edition)],
colors[2 * current_color],
colors[2 * current_color + 1])
set_lines.append(
(
edition,
card_set_hash[edition] if edition in card_set_hash else 0,
card_set_hash['set_{}'.format(edition)],
colors[2 * current_color]
)
)
sets[set_code]['data'] = card_set_data
sets[set_code]['lines'] = set_lines
return {
'sets': sets,
'ordered_sets': ordered_sets,
'collection_name': collection_name,
'user_id': user_id,
'username': User.get(user_id).username
}
def _get_card_set_data(user_id, collection_name, set_name):
cards_in_set = list(Card.filter({'card_set': set_name}).order_by('number').run())
user_hash = {'user_id': user_id}
if collection_name:
user_hash['collection_id'] = _get_collection_id(user_id, collection_name)
user_cards_data = list(UserCard.filter(r.row['count'] > 0).filter(user_hash).run())
for card in cards_in_set:
card['edition_counts'] = {}
for edition in card['editions']:
edition_superset = edition['type'].split('-')[0]
user_card_data = list(filter(lambda user_card: (user_card['card_code'], user_card['edition_superset']) == (card['card_code'], edition_superset), user_cards_data))
card['edition_counts'][edition_superset] = reduce(lambda left, right: {'count': left['count'] + right['count']}, user_card_data)['count'] if user_card_data else 0
return {
'normalized_set_name': _normalized_card_set(set_name),
'set_name': set_name,
'cards': cards_in_set,
'editions': _user_set_editions_in_order(_normalized_card_set(set_name)),
'edition_keys': _set_editions_in_order(_normalized_card_set(set_name)),
'collection_name': collection_name
}
def _normalized_card_set(card_set):
normalized_card_set = str(card_set).strip().lower()
for original, replace in NORMALIZED_REPLACE_PAIRS:
normalized_card_set = normalized_card_set.replace(original, replace)
return normalized_card_set
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
card = Card.find_first({'card_code': 'xy11-77'})
return render_template("index.html")
@app.route('/fulltext_refresh', methods=['GET'])
def fulltext_refresh():
check_and_add_fulltext_search_field()
refresh_fulltext_search_field()
return 'Done'
@login_required
@app.route('/profile', methods=['GET'])
def profile():
return render_template('profile.html')
@app.route('/u/<path:path>', methods=['GET'])
def user_cards(path):
return render_template('user_cards.html')
@login_required
@app.route('/set_selected_collection', methods=['POST'])
def set_selected_collection():
collection_id = request.json['collection_id']
User.get(current_user.get_id()).update({'selected_collection': collection_id})
return ''
@app.route('/set_data_head', methods=['POST'])
def set_data_head():
split_path = request.json['url'].split('/')
user_query = split_path[2].split('@')
# TODO: ERROR IndexError: list index out of range
username = user_query[0]
user = User.find_by_username(username)
if not user:
abort(404)
return render_template('widgets/user_cards_head.jinja2', user_id=user.get_id(), username=username)
@app.route('/set_data', methods=['POST'])
def set_data():
split_path = request.json['url'].split('/')
user_query = split_path[2].split('@')
collection_name = user_query[1] if len(user_query) > 1 else ''
collection_name = collection_name.replace('%20', ' ')
username = user_query[0]
user = User.find_by_username(username)
if not user:
abort(404)
if len(split_path) > 3:
normalized_set_name = split_path[3]
card_set = CardSet.find_first({'normalized_set_name': normalized_set_name}).set_name
if not card_set:
abort(404)
return render_template('widgets/single_set_dashboard.jinja2', **_get_card_set_data(user.get_id(), collection_name, card_set))
else:
return render_template('widgets/set_dashboard.jinja2', **_get_collection_data(user.get_id(), collection_name))
@app.route("/cards", methods=['GET'])
def list_cards():
return render_template('cards.html')
@app.route("/cards", methods=['POST'])
def change_cards():
count = request.json['count']
card_id, card_code, card_number, card_set_n, edition = request.json['card_ct_id'].split('|')
collection_id = request.json['collection_id']
user_id = request.json['user_id']
UserCard.insert({
'id': _user_card_primary_id(user_id, card_id, collection_id, edition),
'count': count,
'user_id': user_id,
'card_id': card_id,
'card_code': card_code,
'edition': edition.lower(),
'edition_superset': edition.lower().split('-')[0],
'collection_id': collection_id
}, conflict='update')
return ''
@app.route("/search_cards", methods=['POST'])
def search_cards():
search_query = request.json['url'][1:]
if not search_query:
searched_terms = []
search_args = {}
else:
search_args = {arg.split('=')[0]: arg.split('=')[1] for arg in search_query.split('&')}
searched_terms = search_args['q'].lower().split('%20')
searched_cards = Card.query()
card_set_orderings = CardSetOrdering.table()
if searched_terms:
for searched_term in searched_terms:
searched_cards = searched_cards.filter(lambda card: card['fulltext'].split().contains(searched_term))
total_pages = ((copy(searched_cards).count() - 1) // CARDS_PER_PAGE) + 1
searched_cards = searched_cards.eq_join('set_code', card_set_orderings, index='set_code').without({
'right': 'id'
}).zip().order_by('number', 'set_order')
if 'page' in search_args:
searched_cards = searched_cards.skip(CARDS_PER_PAGE * (int(search_args['page']) - 1)).limit(CARDS_PER_PAGE)
page = search_args['page']
else:
searched_cards = searched_cards.limit(CARDS_PER_PAGE)
page = 1
searched_cards = searched_cards.run()
cards = list(searched_cards)
per_line = str(search_args['per_line']) if 'per_line' in search_args else None
if not per_line:
per_line = request.cookies.get('per_line')
if not per_line:
per_line = int(request.json['w']) // 350
else:
cards = []
page = 0
total_pages = 0
per_line = 0
response = make_response(render_template('widgets/card_widget.jinja2', cards=cards,
page=page, total_pages=total_pages, per_line=per_line,
searched_terms=searched_terms))
if 'per_line' in search_args:
response.set_cookie('per_line', per_line)
return response
@app.route("/collection", methods=["POST"])
def collection():
args = request.json
user_id = args['user_id']
collection_op_type = args['op_type']
collection_name = args['collection_name']
if collection_op_type == 'new':
Collection.create(name=collection_name, user_id=user_id)
elif collection_op_type == 'edit':
Collection.get(args['collection_id']).update({'name': collection_name})
return ''
@app.route("/delete_collection", methods=["POST"])
def delete_collection():
args = request.json
user_id = args['user_id']
collection_id = args['collection_id']
selected_collection_id = User.get(user_id).selected_collection
Collection.get(collection_id).delete()
UserCard.filter({'collection_id': collection_id, 'user_id': user_id}).delete()
if selected_collection_id == collection_id:
User.get(user_id).update({
'selected_collection': Collection.find_first({'user_id': user_id}).get_id()
})
return ''
@app.route("/profile_collections", methods=['POST'])
def profile_collections():
return render_template('widgets/profile_collections.jinja2', collections=_user_collections(current_user.get_id()))
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if request.method == "POST" and form.validate():
username = form.username.data
password = form.password.data
user = User.find_by_username(username)
if user:
if user.check_password(password):
remember = form.remember_me.data
if login_user(user, remember=remember):
flash('Logged in!', 'success')
return redirect(url_for("index"))
else:
flash('Sorry, but you could not log in.', 'danger')
else:
flash('Sorry, but you could not log in.', 'danger')
else:
flash('Invalid username.', 'danger')
else:
for field_name, error_messages in form.errors.items():
for err in error_messages:
flash('error with {}: {}'.format(field_name.replace('_', ' '), err), 'danger')
return render_template("login.html", form=form)
@app.route("/signup", methods=['GET', 'POST'])
def signup():
form = SignupForm()
if request.method == "POST" and form.validate():
username = form.username.data
password = form.password.data
email = form.email.data
if User.find_by_username(username):
flash('Sorry, but this username is already taken.', 'warning')
else:
user = User.get(User.new(username, password, email))
collection_id = Collection.create(name=form.collection_name.data or 'My collection', user_id=user.get_id())
user.update({'selected_collection': collection_id})
remember = form.remember_me.data
if login_user(user, remember=remember):
flash("Logged in!", 'success')
return redirect(url_for("index"))
else:
flash("Sorry, but you could not log in.", 'danger')
else:
for field_name, error_messages in form.errors.items():
for err in error_messages:
flash('error with {}: {}'.format(field_name.replace('_', ' '), err), 'danger')
return render_template("signup.html", form=form)
@app.route("/reauth", methods=["GET", "POST"])
@login_required
def reauth():
if request.method == "POST":
confirm_login()
flash(u"Reauthenticated.", 'success')
return redirect(request.args.get("next") or url_for("index"))
login()
@app.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.", 'Success')
return redirect(url_for("index"))
@app.route("/changelog")
def changelog():
with open(os.path.join(app.root_path, 'CHANGELOG.md')) as file:
text = file.read()
return markdown(text)
@app.errorhandler(404)
def page_not_found(_):
return "not found :'(", 404
@app.before_request
def before_request():
try:
g.rdb_conn = r.connect(host=RDB_HOST, port=RDB_PORT, db=PKMN_DB)
except RqlDriverError:
abort(503, "No database connection could be established.")
@app.teardown_request
def teardown_request(_):
try:
g.rdb_conn.close()
except AttributeError:
pass
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values['filename']
if 'css' in filename or 'js' in filename:
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.context_processor
def utility_processor():
def unique_card_edition_id(card, edition_type):
return '|'.join([card['id'], card['card_code'], str(card['number']), normalized_card_set(card['card_set']), edition_type])
def unique_card_id(card):
return card['id'] + '|' + str(card['number']) + '|' + normalized_card_set(card['card_set'])
def normalized_card_set(card_set):
return _normalized_card_set(card_set)
def image_exists(path):
return os.path.exists(os.path.join(app.root_path, 'static', path))
def card_count_for_edition_and_user(user_id, card_id, collection_id, edition_type):
user_card = UserCard.get(_user_card_primary_id(user_id, card_id, collection_id, edition_type))
if user_card:
return user_card.count
else:
return 0
def total_number_of_cards_for_user(user_id):
return _total_number_of_cards_for_user(user_id)
def user_collections(user_id):
return _user_collections(user_id)
def selected_collection_for_user(user_id):
return _selected_collection_for_user(user_id)
def card_text_for_edition_type(edition_type):
return _card_text_for_edition_type(edition_type)
def find_card_with_code(card_code):
return Card.find_first({'card_code': card_code}) or None
return dict(unique_card_edition_id=unique_card_edition_id, unique_card_id=unique_card_id,
normalized_card_set=normalized_card_set,
card_count_for_edition_and_user=card_count_for_edition_and_user,
total_number_of_cards_for_user=total_number_of_cards_for_user,
user_collections=user_collections, selected_collection_for_user=selected_collection_for_user,
find_card_with_code=find_card_with_code,
image_exists=image_exists, card_text_for_edition_type=card_text_for_edition_type,
RARITY_HASH=RARITY_HASH)
if __name__ == '__main__':
# app.run(debug=True)
serve(app, host='127.0.0.1', port=5000)
| |
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <jorgean@lavabit.org>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Particle system engine'''
__docformat__ = 'restructuredtext'
import random
import pyglet
from pyglet import gl
import math
import copy
import numpy
import ctypes
from .summanode import SummaNode
from euclid import Point2
# for dev and diagnostic, None means real automatic, True / False means
# return this value inconditionally
forced_point_sprites = None
def point_sprites_available():
"""returns a bool telling if point sprites are available
For development and diagonostic summa.particle.forced_point_sprites could
be set to force the desired return value
"""
if forced_point_sprites is not None:
have_point_sprites = True
return forced_point_sprites
try:
gl.glEnable(gl.GL_POINT_SPRITE)
gl.glDisable(gl.GL_POINT_SPRITE)
except:
have_point_sprites = False
return have_point_sprites
class ExceptionNoEmptyParticle(Exception):
"""particle system have no room for another particle"""
pass
rand = lambda: random.random() * 2 - 1
# PointerToNumpy by Gary Herron
# from pyglet's user list
def PointerToNumpy(a, ptype=ctypes.c_float):
a = numpy.ascontiguousarray(a) # Probably a NO-OP, but perhaps not
return a.ctypes.data_as(ctypes.POINTER(ptype)) # Ugly and undocumented!
class Color( object ):
def __init__( self, r,g,b,a ):
self.r = r
self.g = g
self.b = b
self.a = a
def to_array(self):
return self.r, self.g, self.b, self.a
class ParticleSystem( SummaNode ):
"""
Base class for many flawors of cocos particle systems
The most easy way to customize is subclass and redefine some class members;
see particle_systems by example.
If you want to use a custom texture remember it should hold only one image,
so don't use texture = pyglet.resource.image(...) (it would produce an atlas,
ie multiple images in a texture); using texture = pyglet.image.load(...) is fine
"""
# type of particle
POSITION_FREE, POSITION_GROUPED = range(2)
#: is the particle system active ?
active = True
#: duration in seconds of the system. -1 is infinity
duration = 0
#: time elapsed since the start of the system (in seconds)
elapsed = 0
#: Gravity of the particles
gravity = Point2(0.0, 0.0)
#: position is from "superclass" SummaNode
#: Position variance
pos_var = Point2(0.0, 0.0)
#: The angle (direction) of the particles measured in degrees
angle = 0.0
#: Angle variance measured in degrees;
angle_var = 0.0
#: The speed the particles will have.
speed = 0.0
#: The speed variance
speed_var = 0.0
#: Tangential acceleration
tangential_accel = 0.0
#: Tangential acceleration variance
tangential_accel_var = 0.0
#: Radial acceleration
radial_accel = 0.0
#: Radial acceleration variance
radial_accel_var = 0.0
#: Size of the particles
size = 0.0
#: Size variance
size_var = 0.0
#: How many seconds will the particle live
life = 0
#: Life variance
life_var = 0
#: Start color of the particles
start_color = Color(0.0,0.0,0.0,0.0)
#: Start color variance
start_color_var = Color(0.0,0.0,0.0,0.0)
#: End color of the particles
end_color = Color(0.0,0.0,0.0,0.0)
#: End color variance
end_color_var = Color(0.0,0.0,0.0,0.0)
#: Maximum particles
total_particles = 0
#:texture for the particles
pic = pyglet.image.load('fire.png', file=pyglet.resource.file('fire.png'))
texture = pic.get_texture()
#:blend additive
blend_additive = False
#:color modulate
color_modulate = True
# position type
position_type = POSITION_GROUPED
def __init__(self, fallback=None):
"""
fallback can be None, True, False; default is None
False: use point sprites, faster, not always availabel
True: use quads, slower but always available)
None: autodetect, use the faster available
"""
super(ParticleSystem,self).__init__()
# particles
# position x 2
self.particle_pos = numpy.zeros( (self.total_particles, 2),
numpy.float32 )
# direction x 2
self.particle_dir = numpy.zeros( (self.total_particles, 2),
numpy.float32 )
# rad accel x 1
self.particle_rad = numpy.zeros( (self.total_particles, 1),
numpy.float32 )
# tan accel x 1
self.particle_tan = numpy.zeros( (self.total_particles, 1),
numpy.float32 )
# gravity x 2
self.particle_grav = numpy.zeros( (self.total_particles, 2),
numpy.float32 )
# colors x 4
self.particle_color = numpy.zeros( (self.total_particles, 4),
numpy.float32 )
# delta colors x 4
self.particle_delta_color = numpy.zeros( (self.total_particles, 4),
numpy.float32 )
# life x 1
self.particle_life = numpy.zeros( (self.total_particles, 1),
numpy.float32 )
self.particle_life.fill(-1.0)
# size x 1
self.particle_size = numpy.zeros( (self.total_particles, 1),
numpy.float32 )
# start position
self.start_pos = numpy.zeros( (self.total_particles, 2),
numpy.float32 )
#: How many particles can be emitted per second
self.emit_counter = 0
#: Count of particles
self.particle_count = 0
#: auto remove when particle finishes
self.auto_remove_on_finish = False
#: rendering mode; True is quads, False is point_sprites, None is auto fallback
if fallback is None:
fallback = not point_sprites_available()
self.fallback = fallback
if fallback:
self._fallback_init()
self.draw = self.draw_fallback
self.schedule( self.step )
def on_enter( self ):
super( ParticleSystem, self).on_enter()
#self.add_particle()
def draw(self):
gl.glPushMatrix()
self.transform()
# color preserve - at least nvidia 6150SE needs that
gl.glPushAttrib(gl.GL_CURRENT_BIT)
gl.glPointSize( self.size )
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture.id )
gl.glEnable(gl.GL_POINT_SPRITE)
gl.glTexEnvi(gl.GL_POINT_SPRITE, gl.GL_COORD_REPLACE, gl.GL_TRUE )
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
vertex_ptr = PointerToNumpy( self.particle_pos )
gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertex_ptr)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
color_ptr = PointerToNumpy( self.particle_color)
gl.glColorPointer(4, gl.GL_FLOAT, 0, color_ptr)
gl.glPushAttrib(gl.GL_COLOR_BUFFER_BIT)
gl.glEnable(gl.GL_BLEND)
if self.blend_additive:
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE)
else:
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# mode = GLint()
# glTexEnviv( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode )
#
# if self.color_modulate:
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE )
# else:
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE )
gl.glDrawArrays(gl.GL_POINTS, 0, self.total_particles)
# un -blend
gl.glPopAttrib()
# color restore
gl.glPopAttrib()
# restore env mode
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode)
# disable states
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisable(gl.GL_POINT_SPRITE)
gl.glDisable(gl.GL_TEXTURE_2D)
gl.glPopMatrix()
def step( self, delta ):
# update particle count
self.particle_count = numpy.sum( self.particle_life >= 0 )
if self.active:
rate = 1.0 / self.emission_rate
self.emit_counter += delta
# if random.random() < 0.01:
# delta += 0.5
while (self.particle_count < self.total_particles and
self.emit_counter > rate):
self.add_particle()
self.emit_counter -= rate
self.elapsed += delta
if self.duration != -1 and self.duration < self.elapsed:
self.stop_system()
self.update_particles( delta )
if (not self.active and
self.particle_count == 0 and self.auto_remove_on_finish == True):
self.unschedule( self.step )
self.parent.remove( self )
def add_particle(self):
"""
Code calling add_particle must be either:
be sure there is room for the particle
or
be prepared to catch the exception ExceptionNoEmptyParticle
It is acceptable to try: ... except...: pass
"""
self.init_particle()
self.particle_count += 1
def stop_system( self ):
self.active = False
self.elapsed= self.duration
self.emit_counter = 0
def reset_system( self ):
self.elapsed= self.duration
self.emit_counter = 0
def update_particles( self, delta ):
# radial: posx + posy
norm = numpy.sqrt( self.particle_pos[:,0] ** 2 + self.particle_pos[:,1] ** 2 )
# XXX prevent div by 0
norm = numpy.select( [norm==0], [0.0000001], default=norm )
posx = self.particle_pos[:,0] / norm
posy = self.particle_pos[:,1] / norm
radial = numpy.array( [posx, posy] )
tangential = numpy.array( [-posy, posx] )
# update dir
radial = numpy.swapaxes(radial,0,1)
radial *= self.particle_rad
tangential = numpy.swapaxes(tangential,0,1)
tangential *= self.particle_tan
self.particle_dir += (tangential + radial + self.particle_grav) * delta
# update pos with updated dir
self.particle_pos += self.particle_dir * delta
# life
self.particle_life -= delta
# position: free or grouped
if self.position_type == self.POSITION_FREE:
atuple = numpy.array( [self.x, self.y] )
tmp = atuple - self.start_pos
self.particle_pos -= tmp
# color
self.particle_color += self.particle_delta_color * delta
# if life < 0, set alpha in 0
self.particle_color[:,3] = numpy.select([self.particle_life[:,0] < 0],
[0],
default=self.\
particle_color[:,3])
# print self.particles[0]
# print self.pas[0,0:4]
def init_particle( self ):
# position
# p=self.particles[idx]
a = self.particle_life < 0
idxs = a.nonzero()
idx = -1
if len(idxs[0]) > 0:
idx = idxs[0][0]
else:
raise ExceptionNoEmptyParticle()
# position
self.particle_pos[idx][0] = self.pos_var.x * rand()
self.particle_pos[idx][1] = self.pos_var.y * rand()
# start position
self.start_pos[idx][0] = self.x
self.start_pos[idx][1] = self.y
a = math.radians( self.angle + self.angle_var * rand() )
v = Point2( math.cos( a ), math.sin( a ) )
s = self.speed + self.speed_var * rand()
adir = v * s
# direction
self.particle_dir[idx][0] = adir.x
self.particle_dir[idx][1] = adir.y
# radial accel
self.particle_rad[idx] = self.radial_accel + self.radial_accel_var * rand()
# tangential accel
self.particle_tan[idx] = self.tangential_accel + self.tangential_accel_var * rand()
# life
life = self.particle_life[idx] = self.life + self.life_var * rand()
# Color
# start
sr = self.start_color.r + self.start_color_var.r * rand()
sg = self.start_color.g + self.start_color_var.g * rand()
sb = self.start_color.b + self.start_color_var.b * rand()
sa = self.start_color.a + self.start_color_var.a * rand()
self.particle_color[idx][0] = sr
self.particle_color[idx][1] = sg
self.particle_color[idx][2] = sb
self.particle_color[idx][3] = sa
# end
er = self.end_color.r + self.end_color_var.r * rand()
eg = self.end_color.g + self.end_color_var.g * rand()
eb = self.end_color.b + self.end_color_var.b * rand()
ea = self.end_color.a + self.end_color_var.a * rand()
delta_color_r = (er - sr) / life
delta_color_g = (eg - sg) / life
delta_color_b = (eb - sb) / life
delta_color_a = (ea - sa) / life
self.particle_delta_color[idx][0] = delta_color_r
self.particle_delta_color[idx][1] = delta_color_g
self.particle_delta_color[idx][2] = delta_color_b
self.particle_delta_color[idx][3] = delta_color_a
# size
self.particle_size[idx] = self.size + self.size_var * rand()
# gravity
self.particle_grav[idx][0] = self.gravity.x
self.particle_grav[idx][1] = self.gravity.y
# Below only fallback functionality.
# It uses quads instehad of point sprites, doing a transformation
# point sprites buffers -> quads buffer, so any change in point sprite mode
# is automatically reflects in the fallback mode (except for changes in the
# draw method which should be manually adapted
def _fallback_init(self):
self.vertexs = numpy.zeros((self.total_particles * 4, 2), numpy.float32)
tex_coords_for_quad = numpy.array([[0.0, 1.0], [0.0, 0.0], [1.0, 0.0],
[1.0, 1.0]], numpy.float32)
self.tex_coords = numpy.zeros((self.total_particles * 4, 2),
numpy.float32)
all_tex_coords = self.tex_coords
for i in xrange(0,len(self.vertexs),4):
all_tex_coords[i : i + 4 ] = tex_coords_for_quad
self.per_vertex_colors = numpy.zeros((self.total_particles * 4, 4),
numpy.float32)
self.delta_pos_to_vertex = numpy.zeros((4, 2), numpy.float32)
def draw_fallback(self):
self.make_delta_pos_to_vertex()
self.update_vertexs_from_pos()
self.update_per_vertex_colors()
gl.glPushMatrix()
self.transform()
# color preserve - at least intel 945G needs that
gl.glPushAttrib(gl.GL_CURRENT_BIT)
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture.id )
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
vertexs_ptr = PointerToNumpy(self.vertexs)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertexs_ptr)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
color_ptr = PointerToNumpy(self.per_vertex_colors)
#glColorPointer(4, GL_UNSIGNED_BYTE, 0, color_ptr)
gl.glColorPointer(4, gl.GL_FLOAT, 0, color_ptr)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
tex_coord_ptr = PointerToNumpy(self.tex_coords)
gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, tex_coord_ptr)
gl.glPushAttrib(gl.GL_COLOR_BUFFER_BIT)
gl.glEnable(gl.GL_BLEND)
if self.blend_additive:
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE)
else:
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glDrawArrays(gl.GL_QUADS, 0, len(self.vertexs))
# un -blend
gl.glPopAttrib()
# color restore
gl.glPopAttrib()
# disable states
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisable(gl.GL_TEXTURE_2D)
gl.glPopMatrix()
def update_vertexs_from_pos(self):
vertexs = self.vertexs
delta = self.delta_pos_to_vertex
pos = self.particle_pos
for i, pos_i in enumerate(pos):
i4 = i*4
vertexs[i4:i4 + 4 ] = delta + pos_i
def update_per_vertex_colors(self):
colors = self.particle_color
per_vertex_colors = self.per_vertex_colors
for i, color in enumerate(colors):
i4 = i*4
per_vertex_colors[i4:i4 + 4 ] = color
def make_delta_pos_to_vertex(self):
size2 = self.size / 2.0
# counter-clockwise
self.delta_pos_to_vertex[0] = (-size2, +size2) # NW
self.delta_pos_to_vertex[1] = (-size2, -size2) # SW
self.delta_pos_to_vertex[2] = (+size2, -size2) # SE
self.delta_pos_to_vertex[3] = (+size2, +size2) # NE
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
help='path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=10,
help='maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
# NOTE(r-mibu): Since ipmitool hangs with an empty password file,
# we have to write '\0' if password was empty.
# see https://bugs.launchpad.net/nova/+bug/1237802 for more details
f.write(password or b"\0")
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address == None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user == None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password == None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"),
{'out': out, 'err': err})
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power on failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_on_called:
self._exec_ipmitool("power on")
self.power_on_called = True
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
self.power_on_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=1.0).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power off failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_off_called:
self._exec_ipmitool("power off")
self.power_off_called = True
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
self.power_off_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=1.0).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe options=persistent")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
| |
from __future__ import absolute_import
import sys
import abc
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tf_pose import common
DEFAULT_PADDING = 'SAME'
_init_xavier = tf.contrib.layers.xavier_initializer()
_init_norm = tf.truncated_normal_initializer(stddev=0.01)
_init_zero = slim.init_ops.zeros_initializer()
_l2_regularizer_00004 = tf.contrib.layers.l2_regularizer(0.00004)
_l2_regularizer_convb = tf.contrib.layers.l2_regularizer(common.regularizer_conv)
def layer(op):
'''
Decorator for composable network layers.
'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class BaseNetwork(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
shape=[],
name='use_dropout')
self.setup()
@abc.abstractmethod
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''
Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='bytes').item()
for op_name in data_dict:
if isinstance(data_dict[op_name], np.ndarray):
if 'RMSProp' in op_name:
continue
with tf.variable_scope('', reuse=True):
var = tf.get_variable(op_name.replace(':0', ''))
try:
session.run(var.assign(data_dict[op_name]))
except Exception as e:
print(op_name)
print(e)
sys.exit(-1)
else:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name.decode("utf-8"))
session.run(var.assign(data))
except ValueError as e:
print(e)
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
try:
is_str = isinstance(fed_layer, basestring)
except NameError:
is_str = isinstance(fed_layer, str)
if is_str:
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self, name=None):
'''Returns the current network output.'''
if not name:
return self.terminals[-1]
else:
return self.layers[name]
def get_tensor(self, name):
return self.get_output(name)
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape, trainable=True):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable & trainable, initializer=tf.contrib.layers.xavier_initializer())
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def normalize_vgg(self, input, name):
# normalize input -0.5 ~ 0.5
input = tf.divide(input, 256.0, name=name + '_divide')
input = tf.subtract(input, 0.5, name=name + '_subtract')
return input
@layer
def normalize_mobilenet(self, input, name):
input = tf.divide(input, 255.0, name=name + '_divide')
input = tf.subtract(input, 0.5, name=name + '_subtract')
input = tf.multiply(input, 2.0, name=name + '_multiply')
return input
@layer
def normalize_nasnet(self, input, name):
input = tf.divide(input, 255.0, name=name + '_divide')
input = tf.subtract(input, 0.5, name=name + '_subtract')
input = tf.multiply(input, 2.0, name=name + '_multiply')
return input
@layer
def upsample(self, input, factor, name):
return tf.image.resize_bilinear(input, [int(input.get_shape()[1]) * factor, int(input.get_shape()[2]) * factor], name=name)
@layer
def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True):
with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable):
output = slim.separable_convolution2d(input,
num_outputs=None,
stride=stride,
trainable=self.trainable,
depth_multiplier=1.0,
kernel_size=[k_h, k_w],
# activation_fn=common.activation_fn if relu else None,
activation_fn=None,
# normalizer_fn=slim.batch_norm,
weights_initializer=_init_xavier,
# weights_initializer=_init_norm,
weights_regularizer=_l2_regularizer_00004,
biases_initializer=None,
padding=DEFAULT_PADDING,
scope=name + '_depthwise')
output = slim.convolution2d(output,
c_o,
stride=1,
kernel_size=[1, 1],
activation_fn=common.activation_fn if relu else None,
weights_initializer=_init_xavier,
# weights_initializer=_init_norm,
biases_initializer=_init_zero if set_bias else None,
normalizer_fn=slim.batch_norm,
trainable=self.trainable,
weights_regularizer=None,
scope=name + '_pointwise')
return output
@layer
def convb(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True, set_tanh=False):
with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable):
output = slim.convolution2d(input, c_o, kernel_size=[k_h, k_w],
stride=stride,
normalizer_fn=slim.batch_norm,
weights_regularizer=_l2_regularizer_convb,
weights_initializer=_init_xavier,
# weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=_init_zero if set_bias else None,
trainable=self.trainable,
activation_fn=common.activation_fn if relu else None,
scope=name)
if set_tanh:
output = tf.nn.sigmoid(output, name=name + '_extra_acv')
return output
@layer
def conv(self,
input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
trainable=True,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(input.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o], trainable=self.trainable & trainable)
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o], trainable=self.trainable & trainable)
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(axis=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
feed_in, dim = (input, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = map(lambda v: v.value, input.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input = tf.squeeze(input, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input, name=name)
@layer
def batch_normalization(self, input, name, scale_offset=True, relu=False):
# NOTE: Currently, only inference is supported
with tf.variable_scope(name) as scope:
shape = [input.get_shape()[-1]]
if scale_offset:
scale = self.make_var('scale', shape=shape)
offset = self.make_var('offset', shape=shape)
else:
scale, offset = (None, None)
output = tf.nn.batch_normalization(
input,
mean=self.make_var('mean', shape=shape),
variance=self.make_var('variance', shape=shape),
offset=offset,
scale=scale,
# TODO: This is the default Caffe batch norm eps
# Get the actual eps from parameters
variance_epsilon=1e-5,
name=name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input, keep_prob, name):
keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
return tf.nn.dropout(input, keep, name=name)
| |
import os
import stat
import subprocess
import sys
import textwrap
import pytest
from exec_wrappers import create_wrappers
from exec_wrappers.create_wrappers import get_wrapper_extension
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
def test_execute_virtualenv_wrappers(tmpdir, monkeypatch):
import virtualenv
# monkey patch the current dir to make sure we convert the relative paths
# passed as arguments to absolute
monkeypatch.chdir(tmpdir)
virtualenv.create_environment(
"virtual envs/test", no_setuptools=True, no_pip=True, no_wheel=True
)
if sys.platform != "win32":
bin_dir = "virtual envs/test/bin"
else:
bin_dir = "virtual envs/test/Scripts"
create_wrappers._main(
[
"-t",
"virtualenv",
"--virtual-env-dir",
"virtual envs/test",
"--bin-dir",
bin_dir,
"--dest-dir",
"wrappers",
]
)
environ_from_activate = _environ_from_activate(
_activate_virtualenv_script(), tmpdir
)
# Remove some variables we don't care
if sys.platform != "win32":
environ_from_activate.pop("PS1", None)
environ_from_activate.pop("SHLVL")
else:
environ_from_activate.pop("_OLD_VIRTUAL_PATH")
environ_from_activate.pop("_OLD_VIRTUAL_PROMPT")
environ_from_activate.pop("PROMPT")
environ_from_activate["PATH"] = os.path.normcase(environ_from_activate["PATH"])
environ_from_activate["VIRTUAL_ENV"] = os.path.normcase(
environ_from_activate["VIRTUAL_ENV"]
)
environ_from_wrapper = _environ_from_wrapper()
if sys.platform != "win32":
environ_from_wrapper.pop("SHLVL")
else:
environ_from_wrapper.pop("PROMPT")
environ_from_wrapper["PATH"] = os.path.normcase(environ_from_wrapper["PATH"])
environ_from_wrapper["VIRTUAL_ENV"] = os.path.normcase(
environ_from_wrapper["VIRTUAL_ENV"]
)
assert environ_from_activate == environ_from_wrapper
def test_execute_conda_wrappers(tmpdir, monkeypatch):
# monkey patch the current dir to make sure we convert the relative paths
# passed as arguments to absolute
monkeypatch.chdir(tmpdir)
if not which("conda"):
pytest.fail(
"This test needs conda. Make sure you have miniconda "
"installed and added to PATH env var"
)
subprocess.check_call(
["conda", "create", "--clone", "root", "-p", "conda envs/test"]
)
activate_scripts_path = tmpdir.join("conda envs/test/etc/conda/activate.d")
if sys.platform != "win32":
bin_dir = "conda envs/test/bin"
# Create a custom activate script to ensure that the wrapper runs it.
activate_scripts_path.join("custom_activate.sh").write(
textwrap.dedent(
"""
export ENV_VAR_FROM_ACTIVATE='SOME_VALUE'
"""
),
ensure=True,
)
else:
bin_dir = "conda envs/test"
# Create a custom activate script to ensure that the wrapper runs it.
activate_scripts_path.join("custom_activate.bat").write(
textwrap.dedent(
"""
@set "ENV_VAR_FROM_ACTIVATE=SOME_VALUE"
"""
),
ensure=True,
)
create_wrappers._main(
[
"-t",
"conda",
"--conda-env-dir",
"conda envs/test",
"--bin-dir",
bin_dir,
"--dest-dir",
"wrappers",
]
)
environ_from_activate = _environ_from_activate(_activate_conda_script(), tmpdir)
environ_from_wrapper = _environ_from_wrapper()
assert environ_from_wrapper["CONDA_DEFAULT_ENV"] == "test"
assert environ_from_wrapper["CONDA_ENV_PATH"] == str(tmpdir.join("conda envs/test"))
assert environ_from_wrapper["ENV_VAR_FROM_ACTIVATE"] == "SOME_VALUE"
if sys.platform != "win32":
# conda 4.6 began to add a path like <conda_root_dir>/condabin, but
# we have no easy way to find out the root dir from the wrapper script.
# So let's just filter it for now.
path = environ_from_activate["PATH"].split(":")
filtered_path = [p for p in path if "/condabin" not in p]
environ_from_activate["PATH"] = ":".join(filtered_path)
path = environ_from_wrapper["PATH"].split(":")
filtered_path = [p for p in path if "/condabin" not in p]
environ_from_wrapper["PATH"] = ":".join(filtered_path)
# Remove some variables we don't care about
variables_to_ignore = [
# It's an absolute path when activating but just the env name when
# using the wrapper
"CONDA_DEFAULT_ENV",
# Only present on old conda versions
"CONDA_ENV_PATH",
# Only present on recent conda versions (>=4.4)
"CONDA_PROMPT_MODIFIER",
"CONDA_SHLVL",
"CONDA_PYTHON_EXE",
# Not present on conda >=4.4
"CONDA_PATH_BACKUP",
"CONDA_PS1_BACKUP",
# Only present on conda >=4.5
"CONDA_EXE",
# Only present on conda >=4.6.9
# Those variables seem to be used for dev mode only
# https://github.com/conda/conda/pull/8435/commits/9a4589a1fabc66c122fb0c20fa1f3aa2c8b1758b
"_CE_CONDA",
"_CE_M",
]
if sys.platform != "win32":
variables_to_ignore.extend(["PS1", "SHLVL"])
else:
variables_to_ignore.extend(
[
"CONDA_ROOT",
"PROMPT",
# Only present on conda >=4.5
"PYTHONIOENCODING",
]
)
for variable_name in variables_to_ignore:
environ_from_activate.pop(variable_name, None)
environ_from_wrapper.pop(variable_name, None)
assert environ_from_activate == environ_from_wrapper
def _activate_virtualenv_script():
if sys.platform == "win32":
return """@echo off
call "virtual envs\\test\\Scripts\\activate.bat"
"""
else:
return """#!/usr/bin/env bash
source 'virtual envs/test/bin/activate'
"""
def _activate_conda_script():
if sys.platform == "win32":
return """@echo off
@for /F %%i in ('conda info --root') do @set "CONDA_ROOT=%%i"
call "%CONDA_ROOT%\\Scripts\\activate.bat" "conda envs\\test"
"""
else:
return """#!/usr/bin/env bash
source "$(conda info --root)/bin/activate" "conda envs/test"
"""
def _environ_from_wrapper():
python_wrapper = os.path.normpath("wrappers/python") + get_wrapper_extension()
with pytest.raises(subprocess.CalledProcessError) as exc_info:
# Return a non-zero exit code on purpose, to make sure wrapper returns it.
subprocess.check_output(
[
python_wrapper,
"-c",
"from os import environ; print(dict(environ))"
"; import sys; sys.exit(42)",
]
)
assert exc_info.value.returncode == 42
output = exc_info.value.output
environ_from_wrapper = eval(output)
return environ_from_wrapper
def _environ_from_activate(activate_script, tmpdir):
activate_file = tmpdir.join("environ-from-activate") + get_wrapper_extension()
activate_file.write(
"""%s
python -c "from os import environ; print(dict(environ))"
"""
% activate_script
)
activate_file.chmod(activate_file.stat().mode | stat.S_IXUSR)
output = subprocess.check_output(str(activate_file))
environ_from_activate = eval(output)
return environ_from_activate
| |
import sys
import os
from mock import patch, MagicMock
import unittest
from subprocess import PIPE
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
import firmware
from firmware.firmware import FirmwareUpdater, MacFirmwareUpdater, LinuxFirmwareUpdater, WindowsFirmwareUpdater
@patch('firmware.sys')
class TestFirmwareInit(unittest.TestCase):
def test_correct_class_for_mac_platform_is_provided(self, mock_sys):
mock_sys.platform = 'darwin'
result = firmware.get_firmware_updater()
self.assertEquals(MacFirmwareUpdater, type(result))
def test_correct_class_for_win32_platform_is_provided(self, mock_sys):
mock_sys.platform = 'win32'
result = firmware.get_firmware_updater()
self.assertEquals(WindowsFirmwareUpdater, type(result))
def test_correct_class_for_win64_platform_is_provided(self, mock_sys):
mock_sys.platform = 'winamd64'
result = firmware.get_firmware_updater()
self.assertEquals(WindowsFirmwareUpdater, type(result))
def test_correct_class_for_linux_platform_is_provided(self, mock_sys):
mock_sys.platform = 'linux'
result = firmware.get_firmware_updater()
self.assertEquals(LinuxFirmwareUpdater, type(result))
def test_exception_raised_if_not_supported(self, mock_sys):
mock_sys.platform = 'sun'
with self.assertRaises(Exception):
firmware.get_firmware_updater()
@patch('firmware.firmware.Popen')
@patch('firmware.os.path.isfile')
@patch('firmware.os.stat')
@patch('firmware.os.chmod')
class TestLinuxFirmwareUpdater(unittest.TestCase):
BOOTLOADER_IDVENDOR = 0x0483
BOOTLOADER_IDPRODUCT = 0xdf11
PEACHY_IDVENDOR = 0x16d0
PEACHY_IDPRODUCT = 0x0af3
def setUp(self):
self.bin_path = os.path.join('some','binary', 'path')
self.firmware_path = os.path.join('some', 'firmware', 'path.bin')
def test_update_should_return_true_if_update_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_isfile.return_value = True
mock_Popen.return_value.communicate.return_value = ('err', 'out')
mock_Popen.return_value.wait.return_value = 0
usb_addess = '{}:{}'.format('0483', 'df11')
expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = l_fw_up.update(self.firmware_path)
self.assertTrue(result)
mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
mock_Popen.return_value.wait.assert_called_with()
def test_update_should_return_false_if_update_not_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_isfile.return_value = True
mock_Popen.return_value.communicate.return_value = ('err', 'out')
mock_Popen.return_value.wait.return_value = 34
usb_addess = '{}:{}'.format('0483', 'df11')
expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = l_fw_up.update(self.firmware_path)
self.assertFalse(result)
mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
mock_Popen.return_value.wait.assert_called_with()
def test_check_ready_should_return_true_if_1_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertTrue(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_no_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('', '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_only_peachy_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_peachy_and_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}\n{:04x}:{:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_peachys(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{0:04x}:{1:04x}\n{0:04x}:{1:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_bootloaders(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{0:04x}:{1:04x}\n{0:04x}:{1:04x}'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
@patch('firmware.firmware.Popen')
@patch('firmware.os.path.isfile')
@patch('firmware.os.stat')
@patch('firmware.os.chmod')
class TestWindowsFirmwareUpdater(unittest.TestCase):
BOOTLOADER_IDVENDOR = 0x0483
BOOTLOADER_IDPRODUCT = 0xdf11
PEACHY_IDVENDOR = 0x16d0
PEACHY_IDPRODUCT = 0x0af3
def setUp(self):
self.bin_path = os.path.join('some','binary', 'path')
self.firmware_path = os.path.join('some', 'firmware', 'path.bin')
# def test_update_should_return_true_if_update_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
# mock_isfile.return_value = True
# mock_Popen.return_value.communicate.return_value = ('err', 'out')
# mock_Popen.return_value.wait.return_value = 0
# usb_addess = '{}:{}'.format('0483', 'df11')
# expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
# l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
# result = l_fw_up.update(self.firmware_path)
# self.assertTrue(result)
# mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
# mock_Popen.return_value.wait.assert_called_with()
# def test_update_should_return_false_if_update_not_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
# mock_isfile.return_value = True
# mock_Popen.return_value.communicate.return_value = ('err', 'out')
# mock_Popen.return_value.wait.return_value = 34
# usb_addess = '{}:{}'.format('0483', 'df11')
# expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
# l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
# result = l_fw_up.update(self.firmware_path)
# self.assertFalse(result)
# mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
# mock_Popen.return_value.wait.assert_called_with()
def test_check_ready_should_return_true_if_1_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertTrue(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_no_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('', '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_only_peachy_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_peachy_and_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"\n"USB\VID_{:04X}&PID_{:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_peachys(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{0:04X}&PID_{1:04X}"\n"USB\VID_{0:04X}&PID_{1:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_bootloaders(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{0:04X}&PID_{1:04X}"\n"USB\VID_{0:04X}&PID_{1:04X}"'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the BSON corpus specification tests."""
import binascii
import codecs
import functools
import glob
import os
import sys
if sys.version_info[:2] == (2, 6):
try:
import simplejson as json
except ImportError:
import json
else:
import json
sys.path[0:0] = [""]
from bson import BSON, EPOCH_AWARE, json_util
from bson.binary import STANDARD
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.errors import InvalidBSON
from bson.py3compat import text_type, b
from bson.son import SON
from test import unittest
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'bson_corpus')
_DEPRECATED_BSON_TYPES = {
# Symbol
'0x0E': text_type,
# Undefined
'0x06': type(None),
# DBPointer
'0x0C': DBRef
}
# Need to set tz_aware=True in order to use "strict" dates in extended JSON.
codec_options = CodecOptions(tz_aware=True, document_class=SON)
# We normally encode UUID as binary subtype 0x03,
# but we'll need to encode to subtype 0x04 for one of the tests.
codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD)
json_options_uuid_04 = json_util.JSONOptions(
strict_number_long=True,
strict_uuid=True,
datetime_representation=json_util.DatetimeRepresentation.NUMBERLONG,
uuid_representation=STANDARD)
json_options_iso8601 = json_util.JSONOptions(
datetime_representation=json_util.DatetimeRepresentation.ISO8601)
to_extjson = functools.partial(
json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS)
to_extjson_uuid_04 = functools.partial(json_util.dumps,
json_options=json_options_uuid_04)
to_extjson_iso8601 = functools.partial(json_util.dumps,
json_options=json_options_iso8601)
to_bson_uuid_04 = functools.partial(BSON.encode,
codec_options=codec_options_uuid_04)
to_bson = functools.partial(BSON.encode, codec_options=codec_options)
decode_bson = lambda bbytes: BSON(bbytes).decode(codec_options=codec_options)
if json_util._HAS_OBJECT_PAIRS_HOOK:
decode_extjson = functools.partial(
json_util.loads,
json_options=json_util.JSONOptions(canonical_extended_json=True,
document_class=SON))
loads = functools.partial(json.loads, object_pairs_hook=SON)
else:
decode_extjson = functools.partial(
json_util.loads,
json_options=json_util.CANONICAL_JSON_OPTIONS)
loads = json.loads
class TestBSONCorpus(unittest.TestCase):
def test_all_bson_types(self):
# Because we can't round-trip all BSON types (see _DEPRECATED_BSON_TYPES
# above for how these are handled), make this test a special case,
# instead of mangling our create_test function below.
with open(os.path.join(_TEST_PATH, 'multi-type.json')) as spec_file:
case_spec = json.load(spec_file)
for valid_case in case_spec.get('valid', []):
B = binascii.unhexlify(b(valid_case['bson']))
E = valid_case['extjson']
# Make sure that the BSON and JSON decode to the same document.
self.assertEqual(
json_util.loads(
E, json_options=json_util.CANONICAL_JSON_OPTIONS),
BSON(B).decode(
codec_options=CodecOptions(
document_class=SON, tz_aware=True)))
def create_test(case_spec):
bson_type = case_spec['bson_type']
# Test key is absent when testing top-level documents.
test_key = case_spec.get('test_key')
def run_test(self):
for valid_case in case_spec.get('valid', []):
# Special case for testing encoding UUID as binary subtype 0x04.
if valid_case['description'] == 'subtype 0x04':
encode_extjson = to_extjson_uuid_04
encode_bson = to_bson_uuid_04
else:
encode_extjson = to_extjson
encode_bson = to_bson
B = binascii.unhexlify(b(valid_case['bson']))
if 'canonical_bson' in valid_case:
cB = binascii.unhexlify(b(valid_case['canonical_bson']))
else:
cB = B
if bson_type in _DEPRECATED_BSON_TYPES:
# Just make sure we can decode the type.
self.assertIsInstance(
decode_bson(B)[test_key], _DEPRECATED_BSON_TYPES[bson_type])
if B != cB:
self.assertIsInstance(
decode_bson(cB)[test_key],
_DEPRECATED_BSON_TYPES[bson_type])
# PyPy3 and Jython can't handle NaN with a payload from
# struct.(un)pack if endianness is specified in the format string.
elif not ((('PyPy' in sys.version and
sys.version_info[:2] < (3, 3)) or
sys.platform.startswith("java")) and
valid_case['description'] == 'NaN with payload'):
# Test round-tripping encoding/decoding the type.
self.assertEqual(encode_bson(decode_bson(B)), cB)
if B != cB:
self.assertEqual(
encode_bson(decode_bson(cB)), cB)
if 'extjson' in valid_case:
E = valid_case['extjson']
cE = valid_case.get('canonical_extjson', E)
if bson_type in _DEPRECATED_BSON_TYPES:
# Just make sure that we can parse the extended JSON.
self.assertIsInstance(
decode_extjson(E)[test_key],
_DEPRECATED_BSON_TYPES[bson_type])
if E != cE:
self.assertIsInstance(
decode_extjson(cE)[test_key],
_DEPRECATED_BSON_TYPES[bson_type])
continue
# Normalize extended json by parsing it with the built-in
# json library. This accounts for discrepancies in spacing.
# Key ordering is preserved when possible.
normalized_cE = loads(cE)
self.assertEqual(
loads(encode_extjson(decode_bson(B))),
normalized_cE)
self.assertEqual(
loads(encode_extjson(decode_extjson(E))),
normalized_cE)
if bson_type == '0x09':
# Test datetime can output ISO8601 to match extjson or
# $numberLong to match canonical_extjson if the datetime
# is pre-epoch.
if decode_extjson(E)[test_key] >= EPOCH_AWARE:
normalized_date = loads(E)
else:
normalized_date = normalized_cE
self.assertEqual(
loads(to_extjson_iso8601(decode_extjson(cE))),
normalized_date)
if B != cB:
self.assertEqual(
loads(encode_extjson(decode_bson(cB))),
normalized_cE)
if E != cE:
self.assertEqual(
loads(encode_extjson(decode_extjson(cE))),
normalized_cE)
if 'lossy' not in valid_case:
# Skip tests for document type in Python 2.6 that have
# multiple keys, since we can't control key ordering when
# parsing JSON.
if json_util._HAS_OBJECT_PAIRS_HOOK or not (
sys.version_info[:2] == (2, 6) and
bson_type in ('0x03', '0x00') and
len(decode_extjson(E)) > 1):
self.assertEqual(encode_bson(decode_extjson(E)), cB)
if E != cE:
self.assertEqual(
encode_bson(decode_extjson(cE)),
cB)
for decode_error_case in case_spec.get('decodeErrors', []):
with self.assertRaises(InvalidBSON):
decode_bson(
binascii.unhexlify(b(decode_error_case['bson'])))
return run_test
def create_tests():
for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')):
test_suffix, _ = os.path.splitext(os.path.basename(filename))
if test_suffix == 'multi-type':
# Special case in TestBSONCorpus.
continue
with codecs.open(filename, encoding='utf-8') as bson_test_file:
test_method = create_test(json.load(bson_test_file))
setattr(TestBSONCorpus, 'test_' + test_suffix, test_method)
create_tests()
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
import hashlib
import optparse
import os
import re
import shlex
import subprocess
import sys
import threading
import time
TASK_COMPILATION = 'compile'
TASK_DISABLE_OVERLAYS = 'disable overlays'
TASK_ENABLE_MULTIPLE_OVERLAYS = 'enable multiple overlays'
TASK_ENABLE_SINGLE_OVERLAY = 'enable single overlay'
TASK_FILE_EXISTS_TEST = 'test (file exists)'
TASK_GREP_IDMAP_TEST = 'test (grep idmap)'
TASK_MD5_TEST = 'test (md5)'
TASK_IDMAP_PATH = 'idmap --path'
TASK_IDMAP_SCAN = 'idmap --scan'
TASK_INSTRUMENTATION = 'instrumentation'
TASK_INSTRUMENTATION_TEST = 'test (instrumentation)'
TASK_MKDIR = 'mkdir'
TASK_PUSH = 'push'
TASK_ROOT = 'root'
TASK_REMOUNT = 'remount'
TASK_RM = 'rm'
TASK_SETUP_IDMAP_PATH = 'setup idmap --path'
TASK_SETUP_IDMAP_SCAN = 'setup idmap --scan'
TASK_START = 'start'
TASK_STOP = 'stop'
adb = 'adb'
def _adb_shell(cmd):
argv = shlex.split(adb + " shell '" + cmd + "; echo $?'")
proc = subprocess.Popen(argv, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
(stdout, stderr) = (stdout.replace('\r', ''), stderr.replace('\r', ''))
tmp = stdout.rsplit('\n', 2)
if len(tmp) == 2:
stdout == ''
returncode = int(tmp[0])
else:
stdout = tmp[0] + '\n'
returncode = int(tmp[1])
return returncode, stdout, stderr
class VerbosePrinter:
class Ticker(threading.Thread):
def _print(self):
s = '\r' + self.text + '[' + '.' * self.i + ' ' * (4 - self.i) + ']'
sys.stdout.write(s)
sys.stdout.flush()
self.i = (self.i + 1) % 5
def __init__(self, cond_var, text):
threading.Thread.__init__(self)
self.text = text
self.setDaemon(True)
self.cond_var = cond_var
self.running = False
self.i = 0
self._print()
self.running = True
def run(self):
self.cond_var.acquire()
while True:
self.cond_var.wait(0.25)
running = self.running
if not running:
break
self._print()
self.cond_var.release()
def stop(self):
self.cond_var.acquire()
self.running = False
self.cond_var.notify_all()
self.cond_var.release()
def _start_ticker(self):
self.ticker = VerbosePrinter.Ticker(self.cond_var, self.text)
self.ticker.start()
def _stop_ticker(self):
self.ticker.stop()
self.ticker.join()
self.ticker = None
def _format_begin(self, type, name):
N = self.width - len(type) - len(' [ ] ')
fmt = '%%s %%-%ds ' % N
return fmt % (type, name)
def __init__(self, use_color):
self.cond_var = threading.Condition()
self.ticker = None
if use_color:
self.color_RED = '\033[1;31m'
self.color_red = '\033[0;31m'
self.color_reset = '\033[0;37m'
else:
self.color_RED = ''
self.color_red = ''
self.color_reset = ''
argv = shlex.split('stty size') # get terminal width
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode == 0:
(h, w) = stdout.split()
self.width = int(w)
else:
self.width = 72 # conservative guesstimate
def begin(self, type, name):
self.text = self._format_begin(type, name)
sys.stdout.write(self.text + '[ ]')
sys.stdout.flush()
self._start_ticker()
def end_pass(self, type, name):
self._stop_ticker()
sys.stdout.write('\r' + self.text + '[ OK ]\n')
sys.stdout.flush()
def end_fail(self, type, name, msg):
self._stop_ticker()
sys.stdout.write('\r' + self.color_RED + self.text + '[FAIL]\n')
sys.stdout.write(self.color_red)
sys.stdout.write(msg)
sys.stdout.write(self.color_reset)
sys.stdout.flush()
class QuietPrinter:
def begin(self, type, name):
pass
def end_pass(self, type, name):
sys.stdout.write('PASS ' + type + ' ' + name + '\n')
sys.stdout.flush()
def end_fail(self, type, name, msg):
sys.stdout.write('FAIL ' + type + ' ' + name + '\n')
sys.stdout.flush()
class CompilationTask:
def __init__(self, makefile):
self.makefile = makefile
def get_type(self):
return TASK_COMPILATION
def get_name(self):
return self.makefile
def execute(self):
os.putenv('ONE_SHOT_MAKEFILE', os.getcwd() + "/" + self.makefile)
argv = shlex.split('make -C "../../../../../" files')
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return proc.returncode, stdout, stderr
class InstrumentationTask:
def __init__(self, instrumentation_class):
self.instrumentation_class = instrumentation_class
def get_type(self):
return TASK_INSTRUMENTATION
def get_name(self):
return self.instrumentation_class
def execute(self):
return _adb_shell('am instrument -r -w -e class %s com.android.overlaytest/android.test.InstrumentationTestRunner' % self.instrumentation_class)
class PushTask:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def get_type(self):
return TASK_PUSH
def get_name(self):
return "%s -> %s" % (self.src, self.dest)
def execute(self):
src = os.getenv('OUT') + "/" + self.src
argv = shlex.split(adb + ' push %s %s' % (src, self.dest))
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return proc.returncode, stdout, stderr
class MkdirTask:
def __init__(self, path):
self.path = path
def get_type(self):
return TASK_MKDIR
def get_name(self):
return self.path
def execute(self):
return _adb_shell('mkdir -p %s' % self.path)
class RmTask:
def __init__(self, path):
self.path = path
def get_type(self):
return TASK_RM
def get_name(self):
return self.path
def execute(self):
returncode, stdout, stderr = _adb_shell('ls %s' % self.path)
if returncode != 0 and stdout.endswith(': No such file or directory\n'):
return 0, "", ""
return _adb_shell('rm -r %s' % self.path)
class IdmapPathTask:
def __init__(self, path_target_apk, path_overlay_apk, path_idmap):
self.path_target_apk = path_target_apk
self.path_overlay_apk = path_overlay_apk
self.path_idmap = path_idmap
def get_type(self):
return TASK_IDMAP_PATH
def get_name(self):
return self.path_idmap
def execute(self):
return _adb_shell('su system idmap --path "%s" "%s" "%s"' % (self.path_target_apk, self.path_overlay_apk, self.path_idmap))
class IdmapScanTask:
def __init__(self, overlay_dir, target_pkg_name, target_pkg, idmap_dir, symlink_dir):
self.overlay_dir = overlay_dir
self.target_pkg_name = target_pkg_name
self.target_pkg = target_pkg
self.idmap_dir = idmap_dir
self.symlink_dir = symlink_dir
def get_type(self):
return TASK_IDMAP_SCAN
def get_name(self):
return self.target_pkg_name
def execute(self):
return _adb_shell('su system idmap --scan "%s" "%s" "%s" "%s"' % (self.overlay_dir, self.target_pkg_name, self.target_pkg, self.idmap_dir))
class FileExistsTest:
def __init__(self, path):
self.path = path
def get_type(self):
return TASK_FILE_EXISTS_TEST
def get_name(self):
return self.path
def execute(self):
return _adb_shell('ls %s' % self.path)
class GrepIdmapTest:
def __init__(self, path_idmap, pattern, expected_n):
self.path_idmap = path_idmap
self.pattern = pattern
self.expected_n = expected_n
def get_type(self):
return TASK_GREP_IDMAP_TEST
def get_name(self):
return self.pattern
def execute(self):
returncode, stdout, stderr = _adb_shell('idmap --inspect %s' % self.path_idmap)
if returncode != 0:
return returncode, stdout, stderr
all_matches = re.findall('\s' + self.pattern + '$', stdout, flags=re.MULTILINE)
if len(all_matches) != self.expected_n:
return 1, 'pattern=%s idmap=%s expected=%d found=%d\n' % (self.pattern, self.path_idmap, self.expected_n, len(all_matches)), ''
return 0, "", ""
class Md5Test:
def __init__(self, path, expected_content):
self.path = path
self.expected_md5 = hashlib.md5(expected_content).hexdigest()
def get_type(self):
return TASK_MD5_TEST
def get_name(self):
return self.path
def execute(self):
returncode, stdout, stderr = _adb_shell('md5sum %s' % self.path)
if returncode != 0:
return returncode, stdout, stderr
actual_md5 = stdout.split()[0]
if actual_md5 != self.expected_md5:
return 1, 'expected %s, got %s\n' % (self.expected_md5, actual_md5), ''
return 0, "", ""
class StartTask:
def get_type(self):
return TASK_START
def get_name(self):
return ""
def execute(self):
(returncode, stdout, stderr) = _adb_shell('start')
if returncode != 0:
return returncode, stdout, stderr
while True:
(returncode, stdout, stderr) = _adb_shell('getprop dev.bootcomplete')
if returncode != 0:
return returncode, stdout, stderr
if stdout.strip() == "1":
break
time.sleep(0.5)
return 0, "", ""
class StopTask:
def get_type(self):
return TASK_STOP
def get_name(self):
return ""
def execute(self):
(returncode, stdout, stderr) = _adb_shell('stop')
if returncode != 0:
return returncode, stdout, stderr
return _adb_shell('setprop dev.bootcomplete 0')
class RootTask:
def get_type(self):
return TASK_ROOT
def get_name(self):
return ""
def execute(self):
(returncode, stdout, stderr) = _adb_shell('getprop service.adb.root 0')
if returncode != 0:
return returncode, stdout, stderr
if stdout.strip() == '1': # already root
return 0, "", ""
argv = shlex.split(adb + ' root')
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
return proc.returncode, stdout, stderr
argv = shlex.split(adb + ' wait-for-device')
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return proc.returncode, stdout, stderr
class RemountTask:
def get_type(self):
return TASK_REMOUNT
def get_name(self):
return ""
def execute(self):
argv = shlex.split(adb + ' remount')
proc = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
# adb remount returns 0 even if the operation failed, so check stdout
if stdout.startswith('remount failed:'):
return 1, stdout, stderr
return proc.returncode, stdout, stderr
class CompoundTask:
def __init__(self, type, tasks):
self.type = type
self.tasks = tasks
def get_type(self):
return self.type
def get_name(self):
return ""
def execute(self):
for t in self.tasks:
(returncode, stdout, stderr) = t.execute()
if returncode != 0:
return returncode, stdout, stderr
return 0, "", ""
def _create_disable_overlays_task():
tasks = [
RmTask("/vendor/overlay/framework_a.apk"),
RmTask("/vendor/overlay/framework_b.apk"),
RmTask("/data/resource-cache/vendor@overlay@framework_a.apk@idmap"),
RmTask("/data/resource-cache/vendor@overlay@framework_b.apk@idmap"),
RmTask("/vendor/overlay/app_a.apk"),
RmTask("/vendor/overlay/app_b.apk"),
RmTask("/data/resource-cache/vendor@overlay@app_a.apk@idmap"),
RmTask("/data/resource-cache/vendor@overlay@app_b.apk@idmap"),
]
return CompoundTask(TASK_DISABLE_OVERLAYS, tasks)
def _create_enable_single_overlay_task():
tasks = [
_create_disable_overlays_task(),
MkdirTask('/system/vendor'),
MkdirTask('/vendor/overlay'),
PushTask('/data/app/com.android.overlaytest.overlay/com.android.overlaytest.overlay.apk', '/vendor/overlay/framework_a.apk'),
PushTask('/data/app/com.android.overlaytest.first_app_overlay/com.android.overlaytest.first_app_overlay.apk', '/vendor/overlay/app_a.apk'),
]
return CompoundTask(TASK_ENABLE_SINGLE_OVERLAY, tasks)
def _create_enable_multiple_overlays_task():
tasks = [
_create_disable_overlays_task(),
MkdirTask('/system/vendor'),
MkdirTask('/vendor/overlay'),
PushTask('/data/app/com.android.overlaytest.overlay/com.android.overlaytest.overlay.apk', '/vendor/overlay/framework_b.apk'),
PushTask('/data/app/com.android.overlaytest.first_app_overlay/com.android.overlaytest.first_app_overlay.apk', '/vendor/overlay/app_a.apk'),
PushTask('/data/app/com.android.overlaytest.second_app_overlay/com.android.overlaytest.second_app_overlay.apk', '/vendor/overlay/app_b.apk'),
]
return CompoundTask(TASK_ENABLE_MULTIPLE_OVERLAYS, tasks)
def _create_setup_idmap_path_task(idmaps, symlinks):
tasks = [
_create_enable_single_overlay_task(),
RmTask(symlinks),
RmTask(idmaps),
MkdirTask(idmaps),
MkdirTask(symlinks),
]
return CompoundTask(TASK_SETUP_IDMAP_PATH, tasks)
def _create_setup_idmap_scan_task(idmaps, symlinks):
tasks = [
_create_enable_single_overlay_task(),
RmTask(symlinks),
RmTask(idmaps),
MkdirTask(idmaps),
MkdirTask(symlinks),
_create_enable_multiple_overlays_task(),
]
return CompoundTask(TASK_SETUP_IDMAP_SCAN, tasks)
def _handle_instrumentation_task_output(stdout, printer):
regex_status_code = re.compile(r'^INSTRUMENTATION_STATUS_CODE: -?(\d+)')
regex_name = re.compile(r'^INSTRUMENTATION_STATUS: test=(.*)')
regex_begin_stack = re.compile(r'^INSTRUMENTATION_STATUS: stack=(.*)')
regex_end_stack = re.compile(r'^$')
failed_tests = 0
current_test = None
current_stack = []
mode_stack = False
for line in stdout.split("\n"):
line = line.rstrip() # strip \r from adb output
m = regex_status_code.match(line)
if m:
c = int(m.group(1))
if c == 1:
printer.begin(TASK_INSTRUMENTATION_TEST, current_test)
elif c == 0:
printer.end_pass(TASK_INSTRUMENTATION_TEST, current_test)
else:
failed_tests += 1
current_stack.append("\n")
msg = "\n".join(current_stack)
printer.end_fail(TASK_INSTRUMENTATION_TEST, current_test, msg.rstrip() + '\n')
continue
m = regex_name.match(line)
if m:
current_test = m.group(1)
continue
m = regex_begin_stack.match(line)
if m:
mode_stack = True
current_stack = []
current_stack.append(" " + m.group(1))
continue
m = regex_end_stack.match(line)
if m:
mode_stack = False
continue
if mode_stack:
current_stack.append(" " + line.strip())
return failed_tests
def _set_adb_device(option, opt, value, parser):
global adb
if opt == '-d' or opt == '--device':
adb = 'adb -d'
if opt == '-e' or opt == '--emulator':
adb = 'adb -e'
if opt == '-s' or opt == '--serial':
adb = 'adb -s ' + value
def _create_opt_parser():
parser = optparse.OptionParser()
parser.add_option('-d', '--device', action='callback', callback=_set_adb_device,
help='pass -d to adb')
parser.add_option('-e', '--emulator', action='callback', callback=_set_adb_device,
help='pass -e to adb')
parser.add_option('-s', '--serial', type="str", action='callback', callback=_set_adb_device,
help='pass -s <serical> to adb')
parser.add_option('-C', '--no-color', action='store_false',
dest='use_color', default=True,
help='disable color escape sequences in output')
parser.add_option('-q', '--quiet', action='store_true',
dest='quiet_mode', default=False,
help='quiet mode, output only results')
parser.add_option('-b', '--no-build', action='store_false',
dest='do_build', default=True,
help='do not rebuild test projects')
parser.add_option('-k', '--continue', action='store_true',
dest='do_continue', default=False,
help='do not rebuild test projects')
parser.add_option('-i', '--test-idmap', action='store_true',
dest='test_idmap', default=False,
help='run tests for single overlay')
parser.add_option('-0', '--test-no-overlay', action='store_true',
dest='test_no_overlay', default=False,
help='run tests without any overlay')
parser.add_option('-1', '--test-single-overlay', action='store_true',
dest='test_single_overlay', default=False,
help='run tests for single overlay')
parser.add_option('-2', '--test-multiple-overlays', action='store_true',
dest='test_multiple_overlays', default=False,
help='run tests for multiple overlays')
return parser
if __name__ == '__main__':
opt_parser = _create_opt_parser()
opts, args = opt_parser.parse_args(sys.argv[1:])
if not opts.test_idmap and not opts.test_no_overlay and not opts.test_single_overlay and not opts.test_multiple_overlays:
opts.test_idmap = True
opts.test_no_overlay = True
opts.test_single_overlay = True
opts.test_multiple_overlays = True
if len(args) > 0:
opt_parser.error("unexpected arguments: %s" % " ".join(args))
# will never reach this: opt_parser.error will call sys.exit
if opts.quiet_mode:
printer = QuietPrinter()
else:
printer = VerbosePrinter(opts.use_color)
tasks = []
# must be in the same directory as this script for compilation tasks to work
script = sys.argv[0]
dirname = os.path.dirname(script)
wd = os.path.realpath(dirname)
os.chdir(wd)
# build test cases
if opts.do_build:
tasks.append(CompilationTask('OverlayTest/Android.mk'))
tasks.append(CompilationTask('OverlayTestOverlay/Android.mk'))
tasks.append(CompilationTask('OverlayAppFirst/Android.mk'))
tasks.append(CompilationTask('OverlayAppSecond/Android.mk'))
# remount filesystem, install test project
tasks.append(RootTask())
tasks.append(RemountTask())
tasks.append(PushTask('/system/app/OverlayTest/OverlayTest.apk', '/system/app/OverlayTest.apk'))
# test idmap
if opts.test_idmap:
idmaps='/data/local/tmp/idmaps'
symlinks='/data/local/tmp/symlinks'
# idmap --path
tasks.append(StopTask())
tasks.append(_create_setup_idmap_path_task(idmaps, symlinks))
tasks.append(StartTask())
tasks.append(IdmapPathTask('/vendor/overlay/framework_a.apk', '/system/framework/framework-res.apk', idmaps + '/a.idmap'))
tasks.append(FileExistsTest(idmaps + '/a.idmap'))
tasks.append(GrepIdmapTest(idmaps + '/a.idmap', 'bool/config_annoy_dianne', 1))
# idmap --scan
idmap = idmaps + '/vendor@overlay@framework_b.apk@idmap'
tasks.append(StopTask())
tasks.append(_create_setup_idmap_scan_task(idmaps, symlinks))
tasks.append(StartTask())
tasks.append(IdmapScanTask('/vendor/overlay', 'android', '/system/framework/framework-res.apk', idmaps, symlinks))
tasks.append(FileExistsTest(idmap))
tasks.append(GrepIdmapTest(idmap, 'bool/config_annoy_dianne', 1))
# overlays.list
overlays_list_path = idmaps + '/overlays.list'
expected_content = '''\
/vendor/overlay/framework_b.apk /data/local/tmp/idmaps/vendor@overlay@framework_b.apk@idmap
'''
tasks.append(FileExistsTest(overlays_list_path))
tasks.append(Md5Test(overlays_list_path, expected_content))
# idmap cleanup
tasks.append(RmTask(symlinks))
tasks.append(RmTask(idmaps))
# test no overlay
if opts.test_no_overlay:
tasks.append(StopTask())
tasks.append(_create_disable_overlays_task())
tasks.append(StartTask())
tasks.append(InstrumentationTask('com.android.overlaytest.WithoutOverlayTest'))
# test single overlay
if opts.test_single_overlay:
tasks.append(StopTask())
tasks.append(_create_enable_single_overlay_task())
tasks.append(StartTask())
tasks.append(InstrumentationTask('com.android.overlaytest.WithOverlayTest'))
# test multiple overlays
if opts.test_multiple_overlays:
tasks.append(StopTask())
tasks.append(_create_enable_multiple_overlays_task())
tasks.append(StartTask())
tasks.append(InstrumentationTask('com.android.overlaytest.WithMultipleOverlaysTest'))
ignored_errors = 0
for t in tasks:
type = t.get_type()
name = t.get_name()
if type == TASK_INSTRUMENTATION:
# InstrumentationTask will run several tests, but we want it
# to appear as if each test was run individually. Calling
# "am instrument" with a single test method is prohibitively
# expensive, so let's instead post-process the output to
# emulate individual calls.
retcode, stdout, stderr = t.execute()
if retcode != 0:
printer.begin(TASK_INSTRUMENTATION, name)
printer.end_fail(TASK_INSTRUMENTATION, name, stderr)
sys.exit(retcode)
retcode = _handle_instrumentation_task_output(stdout, printer)
if retcode != 0:
if not opts.do_continue:
sys.exit(retcode)
else:
ignored_errors += retcode
else:
printer.begin(type, name)
retcode, stdout, stderr = t.execute()
if retcode == 0:
printer.end_pass(type, name)
if retcode != 0:
if len(stderr) == 0:
# hope for output from stdout instead (true for eg adb shell rm)
stderr = stdout
printer.end_fail(type, name, stderr)
if not opts.do_continue:
sys.exit(retcode)
else:
ignored_errors += retcode
sys.exit(ignored_errors)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections_abc.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class _CustomList(list):
pass
class _CustomSequenceThatRaisesException(collections.Sequence):
def __len__(self):
return 1
def __getitem__(self, item):
raise ValueError("Cannot get item: %s" % item)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a sequence, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegex(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegex(ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegex(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.keys()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.values()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.items()))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegex(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegex(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegex(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegex(ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegex(TypeError, "don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegex(ValueError, "don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegex(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
structure3 = collections.defaultdict(list)
structure3["a"] = [1, 2, 3, 4]
structure3["b"] = [2, 3, 4, 5]
expected_structure3 = collections.defaultdict(list)
expected_structure3["a"] = [2, 3, 4, 5]
expected_structure3["b"] = [3, 4, 5, 6]
self.assertEqual(expected_structure3,
nest.map_structure(lambda x: x + 1, structure3))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegex(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegex(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegex(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegex(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegex(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegex(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
def testMapStructureOverPlaceholders(self):
# Test requires placeholders and thus requires graph mode
with ops.Graph().as_default():
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab),
shallow_length=len(inp_abc))):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
# This assertion is expected to pass: two list-types with same number
# of fields are considered identical.
inp_shallow = _CustomList([1, 2])
inp_deep = [1, 2]
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegex(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree))
with self.assertRaisesRegex(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree),
shallow_length=len(shallow_tree))):
get_paths_and_values(shallow_tree, input_tree)
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegex(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegex(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegex(TypeError,
"didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
def testFlattenCustomSequenceThatRaisesException(self): # b/140746865
seq = _CustomSequenceThatRaisesException()
with self.assertRaisesRegex(ValueError, "Cannot get item"):
nest.flatten(seq)
def testListToTuple(self):
input_sequence = [1, (2, {3: [4, 5, (6,)]}, None, 7, [[[8]]])]
expected = (1, (2, {3: (4, 5, (6,))}, None, 7, (((8,),),)))
nest.assert_same_structure(
nest.list_to_tuple(input_sequence),
expected,
)
def testInvalidCheckTypes(self):
with self.assertRaises((ValueError, TypeError)):
nest.assert_same_structure(
nest1=array_ops.zeros((1)),
nest2=array_ops.ones((1, 1, 1)),
check_types=array_ops.ones((2)))
with self.assertRaises((ValueError, TypeError)):
nest.assert_same_structure(
nest1=array_ops.zeros((1)),
nest2=array_ops.ones((1, 1, 1)),
expand_composites=array_ops.ones((2)))
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
| |
#Copyright (c) 2012, Carnegie Mellon University.
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
import os
from glob import glob
import rrdtool
import re
import unittest
"""
Collect metadata about RRD contents
"""
class RRDInfo:
"""
Collect metadata from an RRD file
"""
def __init__(self, fname=None):
"""
if fname is not None, build from the given RRD file
"""
self.dsets = dict()
if fname != None:
self.build_from_file(fname)
else:
fname = "unknown"
self.fname = fname
def build_from_file(self, fname):
"""
Build RRD info from the given filename
"""
info = rrdtool.info(fname)
for (key, val) in info.iteritems():
#ignore RRAs, and only examine ds[***] entries
self.push_item(key, val)
def push_item(self, key, val):
#extract the dataset name
keypath = key.split(".")
first = keypath[0]
match = re.match("ds\[(.+)\]", first)
if match:
ikey = match.group(1)
if not ikey in self.dsets:
self.dsets[ikey] = {}
self.dsets[ikey][".".join(keypath[1:])] = val
#print key, val
def get_dset_names(self):
return self.dsets.keys()
def get_name(self):
return self.fname
class RRDGroup:
def __init__(self, dirname=None):
self.infos = []
if dirname != None:
self.add_dir(dirname)
else:
dirname = "unknown"
self.dirname = dirname
def add_dir(self, fold):
"""
Add a directory of RRDinfos
(add all *.rrd files)
"""
for fname in os.listdir(fold):
fullpath = os.path.join(fold, fname)
if os.path.splitext(fullpath)[-1] == ".rrd":
self.infos.append(RRDInfo(fullpath))
def add_info(self, info):
"""
Add an RRDinfo to this group of RRDs
"""
self.infos.append(info)
def get_shared(self):
"""
All dataset names that are shared by keys
"""
keylst = [set(i.get_dset_names()) for i in self.infos]
return set.intersection(*keylst)
def get_unshared(self):
"""
Non-shared keys, by dataset name
"""
shared = self.get_shared()
diffs = dict()
for info in self.infos:
name = info.get_name()
if name in diffs:
name += "+"
diffs[name] = set.difference(set(info.get_dset_names()), shared)
return diffs
def includable(self, basedir=""):
"""
Get an "include"-able representation of shared names and attributes
@param basedir: root dir of comp names
"""
compnames = []
for info in self.infos:
n = os.path.splitext(info.fname)[0]
if not n.startswith(basedir):
print "WARNING: name %s does not start with basedir %s" % (n, basedir)
else:
n = n[len(basedir):]
if n.startswith("/"):
n = n[1:]
compnames.append(n)
compnames.sort()
attrnames = list(self.get_shared())
attrnames.sort()
return (compnames, attrnames)
class TestRRDIndex(unittest.TestCase):
def test_push(self):
blank = RRDInfo()
blank.push_item("ds[a].index",4)
self.assertEqual(len(blank.dsets), 1)
self.assertTrue(blank.dsets["a"])
self.assertEqual(blank.dsets["a"]["index"], 4)
def test_group(self):
group = RRDGroup()
i1 = RRDInfo()
i1.push_item("ds[a].index",0)
i1.push_item("ds[b].index",1)
group.add_info(i1)
i2 = RRDInfo()
i2.push_item("ds[a].index",1)
group.add_info(i2)
i3 = RRDInfo()
i3.push_item("ds[a].index",1)
i3.push_item("ds[c].index",1)
group.add_info(i3)
shared = group.get_shared()
self.assertEqual(len(shared), 1)
self.assertTrue("a" in shared)
unshared = group.get_unshared()
self.assertEqual(len(unshared), 2)
vals = unshared.values()
self.assertEqual(len(vals[0]), 1)
self.assertEqual(len(vals[1]), 1)
def run_individual(rrdpath):
info = RRDInfo(rrdpath)
for (ds, dct) in info.dsets.iteritems():
print ds, dct
def run_folder(dirpath):
group = RRDGroup()
group.add_dir(os.path.join(dirpath, "dco"))
(compnames, attrnames) = group.includable(basedir=dirpath)
def format_list(lst):
return ",\n".join(["\"%s\"" % i for i in lst])
print "//Node names >>>\n"
print format_list(compnames)
print "//Attribute names >>>\n"
print format_list(attrnames)
if __name__ == '__main__':
#unittest.main()
#run_individual("/home/bigubuntu/rrd/dco/dco-n100.rrd")
run_folder("/home/bigubuntu/rrd")
| |
"""Collection of functions to coerce conversion of types with an intelligent guess."""
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from itertools import chain
from re import IGNORECASE, compile
from enum import Enum
from .compat import NoneType, integer_types, isiterable, iteritems, string_types, text_type
from .decorators import memoizedproperty
from .exceptions import AuxlibError
__all__ = ["boolify", "typify", "maybecall", "listify", "numberify"]
BOOLISH_TRUE = ("true", "yes", "on", "y")
BOOLISH_FALSE = ("false", "off", "n", "no", "non", "none", "")
NULL_STRINGS = ("none", "~", "null", "\0")
BOOL_COERCEABLE_TYPES = integer_types + (bool, float, complex, list, set, dict, tuple)
NUMBER_TYPES = integer_types + (float, complex)
NUMBER_TYPES_SET = set(NUMBER_TYPES)
STRING_TYPES_SET = set(string_types)
NO_MATCH = object()
class TypeCoercionError(AuxlibError, ValueError):
def __init__(self, value, msg, *args, **kwargs):
self.value = value
super(TypeCoercionError, self).__init__(msg, *args, **kwargs)
class _Regex(object):
@memoizedproperty
def BOOLEAN_TRUE(self):
return compile(r'^true$|^yes$|^on$', IGNORECASE), True
@memoizedproperty
def BOOLEAN_FALSE(self):
return compile(r'^false$|^no$|^off$', IGNORECASE), False
@memoizedproperty
def NONE(self):
return compile(r'^none$|^null$', IGNORECASE), None
@memoizedproperty
def INT(self):
return compile(r'^[-+]?\d+$'), int
@memoizedproperty
def BIN(self):
return compile(r'^[-+]?0[bB][01]+$'), bin
@memoizedproperty
def OCT(self):
return compile(r'^[-+]?0[oO][0-7]+$'), oct
@memoizedproperty
def HEX(self):
return compile(r'^[-+]?0[xX][0-9a-fA-F]+$'), hex
@memoizedproperty
def FLOAT(self):
return compile(r'^[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?$'), float
@memoizedproperty
def COMPLEX(self):
return (compile(r'^(?:[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)?' # maybe first float
r'[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?j$'), # second float with j
complex)
@property
def numbers(self):
yield self.INT
yield self.FLOAT
yield self.BIN
yield self.OCT
yield self.HEX
yield self.COMPLEX
@property
def boolean(self):
yield self.BOOLEAN_TRUE
yield self.BOOLEAN_FALSE
@property
def none(self):
yield self.NONE
def convert_number(self, value_string):
return self._convert(value_string, (self.numbers, ))
def convert(self, value_string):
return self._convert(value_string, (self.boolean, self.none, self.numbers, ))
def _convert(self, value_string, type_list):
return next((typish(value_string) if callable(typish) else typish
for regex, typish in chain.from_iterable(type_list)
if regex.match(value_string)),
NO_MATCH)
_REGEX = _Regex()
def numberify(value):
"""
Examples:
>>> [numberify(x) for x in ('1234', 1234, '0755', 0o0755, False, 0, '0', True, 1, '1')]
[1234, 1234, 755, 493, 0, 0, 0, 1, 1, 1]
>>> [numberify(x) for x in ('12.34', 12.34, 1.2+3.5j, '1.2+3.5j')]
[12.34, 12.34, (1.2+3.5j), (1.2+3.5j)]
"""
if isinstance(value, bool):
return int(value)
if isinstance(value, NUMBER_TYPES):
return value
candidate = _REGEX.convert_number(value)
if candidate is not NO_MATCH:
return candidate
raise TypeCoercionError(value, "Cannot convert {0} to a number.".format(value))
def boolify(value, nullable=False, return_string=False):
"""Convert a number, string, or sequence type into a pure boolean.
Args:
value (number, string, sequence): pretty much anything
Returns:
bool: boolean representation of the given value
Examples:
>>> [boolify(x) for x in ('yes', 'no')]
[True, False]
>>> [boolify(x) for x in (0.1, 0+0j, True, '0', '0.0', '0.1', '2')]
[True, False, True, False, False, True, True]
>>> [boolify(x) for x in ("true", "yes", "on", "y")]
[True, True, True, True]
>>> [boolify(x) for x in ("no", "non", "none", "off", "")]
[False, False, False, False, False]
>>> [boolify(x) for x in ([], set(), dict(), tuple())]
[False, False, False, False]
>>> [boolify(x) for x in ([1], set([False]), dict({'a': 1}), tuple([2]))]
[True, True, True, True]
"""
# cast number types naturally
if isinstance(value, BOOL_COERCEABLE_TYPES):
return bool(value)
# try to coerce string into number
val = text_type(value).strip().lower().replace('.', '', 1)
if val.isnumeric():
return bool(float(val))
elif val in BOOLISH_TRUE:
return True
elif nullable and val in NULL_STRINGS:
return None
elif val in BOOLISH_FALSE:
return False
else: # must be False
try:
return bool(complex(val))
except ValueError:
if isinstance(value, string_types) and return_string:
return value
raise TypeCoercionError(value, "The value %r cannot be boolified." % value)
def boolify_truthy_string_ok(value):
try:
return boolify(value)
except ValueError:
assert isinstance(value, string_types), repr(value)
return True
def typify_str_no_hint(value):
candidate = _REGEX.convert(value)
return candidate if candidate is not NO_MATCH else value
def typify(value, type_hint=None):
"""Take a primitive value, usually a string, and try to make a more relevant type out of it.
An optional type_hint will try to coerce the value to that type.
Args:
value (Any): Usually a string, not a sequence
type_hint (type or Tuple[type]):
Examples:
>>> typify('32')
32
>>> typify('32', float)
32.0
>>> typify('32.0')
32.0
>>> typify('32.0.0')
'32.0.0'
>>> [typify(x) for x in ('true', 'yes', 'on')]
[True, True, True]
>>> [typify(x) for x in ('no', 'FALSe', 'off')]
[False, False, False]
>>> [typify(x) for x in ('none', 'None', None)]
[None, None, None]
"""
# value must be a string, or there at least needs to be a type hint
if isinstance(value, string_types):
value = value.strip()
elif type_hint is None:
# can't do anything because value isn't a string and there's no type hint
return value
# now we either have a stripped string, a type hint, or both
# use the hint if it exists
if isiterable(type_hint):
if isinstance(type_hint, type) and issubclass(type_hint, Enum):
try:
return type_hint(value)
except ValueError as e:
try:
return type_hint[value]
except KeyError:
raise TypeCoercionError(value, text_type(e))
type_hint = set(type_hint)
if not (type_hint - NUMBER_TYPES_SET):
return numberify(value)
elif not (type_hint - STRING_TYPES_SET):
return text_type(value)
elif not (type_hint - {bool, NoneType}):
return boolify(value, nullable=True)
elif not (type_hint - (STRING_TYPES_SET | {bool})):
return boolify(value, return_string=True)
elif not (type_hint - (STRING_TYPES_SET | {NoneType})):
value = text_type(value)
return None if value.lower() == 'none' else value
elif not (type_hint - {bool, int}):
return typify_str_no_hint(text_type(value))
else:
raise NotImplementedError()
elif type_hint is not None:
# coerce using the type hint, or use boolify for bool
try:
return boolify(value) if type_hint == bool else type_hint(value)
except ValueError as e:
# ValueError: invalid literal for int() with base 10: 'nope'
raise TypeCoercionError(value, text_type(e))
else:
# no type hint, but we know value is a string, so try to match with the regex patterns
# if there's still no match, `typify_str_no_hint` will return `value`
return typify_str_no_hint(value)
def typify_data_structure(value, type_hint=None):
if isinstance(value, Mapping):
return type(value)((k, typify(v, type_hint)) for k, v in iteritems(value))
elif isiterable(value):
return type(value)(typify(v, type_hint) for v in value)
elif (isinstance(value, string_types)
and isinstance(type_hint, type) and issubclass(type_hint, string_types)):
# This block is necessary because if we fall through to typify(), we end up calling
# .strip() on the str, when sometimes we want to preserve preceding and trailing
# whitespace.
return type_hint(value)
else:
return typify(value, type_hint)
def maybecall(value):
return value() if callable(value) else value
def listify(val, return_type=tuple):
"""
Examples:
>>> listify('abc', return_type=list)
['abc']
>>> listify(None)
()
>>> listify(False)
(False,)
>>> listify(('a', 'b', 'c'), return_type=list)
['a', 'b', 'c']
"""
# TODO: flatlistify((1, 2, 3), 4, (5, 6, 7))
if val is None:
return return_type()
elif isiterable(val):
return return_type(val)
else:
return return_type((val, ))
| |
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 128])
b_conv2 = weight_variable([128])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 128, 256])
b_conv3 = weight_variable([256])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
# Forth Convolutional Layer
W_conv4 = weight_variable([3, 3, 256, 1])
b_conv4 = weight_variable([1])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = h_conv4
h_pool4 = tf.reshape(h_pool4, shape=[-1, 32, 36])
feature_mat = h_pool4
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
| |
import collections
import logging
import time
import sys
_logger = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
NO_PLOTTING = True
if not PY3: # my version of matplotlib doesn't support python 3
try:
from matplotlib import pyplot as plt
import numpy as np
NO_PLOTTING = False
except ImportError:
_logger.info(
'Unable to import matplotlib.pyplot or numpy: plotting disabled')
from morfessor import evaluation
from .exception import UnsupportedConfigurationError
class TimeHistogram(object):
# FIXME: this could be refactored with numpy
def __init__(self, groups, bins=50, outliers=True):
self.groups = groups
self._buffer = {group: [] for group in groups}
self.data = {group: [] for group in groups}
try:
self.bins = tuple(bins)
self.step()
except TypeError:
self.bins = None
self._num_bins = bins
self._outliers = outliers
def add(self, group, value):
if self.bins is None:
self._buffer[group].append(value)
return
self.data[group][-1][self._bin(value)] += 1
def step(self):
if self.bins is None:
self._set_bins()
for group in self._buffer:
self.data[group].append([0] * (len(self.bins) + 1))
for value in self._buffer[group]:
self.add(group, value)
del self._buffer
for group in self.data:
self.data[group].append([0] * (len(self.bins) + 1))
def _set_bins(self):
last_bin = 0
for group in self._buffer:
values = sorted(self._buffer[group])
if len(values) == 0:
continue
if self._outliers:
i = int(len(values) * (1.0 - (1.0 / float(self._num_bins))))
else:
i = len(values) - 1
last_bin = max(last_bin, values[i])
self.bins = [last_bin * ((1.0 + i) / float(self._num_bins))
for i in range(self._num_bins)]
def _bin(self, value):
for (i, edge) in enumerate(self.bins):
if value < edge:
return i
return len(self.bins)
class IterationStatistics(object):
def __init__(self, title=None):
self.epoch_numbers = []
self.operation_numbers = []
self.iteration_numbers = []
self.costs = []
self.cost_parts = []
self.tag_counts = []
self.morph_types = []
self.morph_tokens = []
self.durations = [0]
self.morph_lengths = []
self.changes = []
self.changes_op = []
self.violated_annots = []
self.gold_bpr = []
self._reference = None
self._me = None
self.t_prev = None
self.word_tokens = 1.0
self.categories = None
self.corpus_ths = {
'len_th': TimeHistogram(
('STM', 'other', 'longest', 'non-longest'),
bins=range(1, 25),
outliers=False),
'rppl_th': TimeHistogram(
('PRE', 'other', 'first', 'non-first'),
50),
'lppl_th': TimeHistogram(
('SUF', 'other', 'last', 'non-last'),
50)}
self.gold_ths = {
'len_th': TimeHistogram(
('STM', 'other', 'longest', 'non-longest'),
bins=range(1, 25),
outliers=False),
'rppl_th': TimeHistogram(
('PRE', 'other', 'first', 'non-first'),
50),
'lppl_th': TimeHistogram(
('SUF', 'other', 'last', 'non-last'),
50)}
if title is None:
self.title = 'epoch statistics {}'.format(
time.strftime("%a, %d.%m.%Y %H:%M:%S"))
else:
self.title = title
self.ops = None
def set_names(self, model, training_operations):
self.ops = training_operations
self.categories = model.get_categories()
model._changed_segmentations = set()
model._changed_segmentations_op = set()
def set_gold_standard(self, reference):
self._reference = reference
self._me = evaluation.MorfessorEvaluation(reference)
def callback(self, model, iteration_number=0):
t_cur = time.time()
self.epoch_numbers.append(model._epoch_number)
self.operation_numbers.append(model._operation_number)
self.iteration_numbers.append(iteration_number)
self.costs.append(model.get_cost())
ccc = model._corpus_coding.get_cost()
lcc = model._lexicon_coding.get_cost()
if model._supervised:
acc_unscaled = model._annot_coding.get_cost()
acc = acc_unscaled / model._annot_coding.weight
else:
acc_unscaled = 0
acc = 0
self.cost_parts.append([(ccc / model._corpus_coding.weight),
(lcc / model._lexicon_coding.weight),
acc,
ccc,
lcc,
acc_unscaled
])
tcounts = self._extract_tag_counts(model)
self.tag_counts.append(tcounts)
self.morph_types.append(len(model._morph_usage.seen_morphs()))
self.morph_tokens.append(sum(tcounts))
self.word_tokens = float(model.word_tokens)
self.changes.append(len(model._changed_segmentations))
self.changes_op.append(len(model._changed_segmentations_op))
if model._supervised:
# sum expression gives length of the generator
self.violated_annots.append(
sum(1 for _ in model.violated_annotations()))
else:
self.violated_annots.append(0)
if self._reference is not None:
tmp = self._reference.items()
wlist, annotations = zip(*tmp)
segments = [model.viterbi_analyze(w)[0] for w in wlist]
mer = self._me.evaluate_model(
model,
configuration=evaluation.EvaluationConfig(1, len(segments)))
self.gold_bpr.append((
mer.precision[0],
mer.recall[0],
mer.fscore[0]
))
self._condprob_timehistograms(
self.gold_ths, segments, model)
self._condprob_timehistograms(
self.corpus_ths,
(x.analysis for x in model.segmentations),
model)
if self.t_prev is not None:
self.durations.append(t_cur - self.t_prev)
self.t_prev = t_cur
def _condprob_timehistograms(self, ths, source, model):
for word in source:
lengths = []
if len(word) == 1:
# single-morph words are not counted in these stats
continue
for (i, cmorph) in enumerate(word):
measures = model._morph_usage._contexts[cmorph.morph]
if i == 0:
ths['rppl_th'].add('first', measures.right_perplexity)
else:
ths['rppl_th'].add('non-first', measures.right_perplexity)
if i == len(word) - 1:
ths['lppl_th'].add('last', measures.left_perplexity)
else:
ths['lppl_th'].add('non-last', measures.left_perplexity)
if cmorph.category == 'STM':
ths['len_th'].add('STM', len(cmorph))
else:
ths['len_th'].add('other', len(cmorph))
if cmorph.category == 'PRE':
ths['rppl_th'].add('PRE', measures.right_perplexity)
else:
ths['rppl_th'].add('other', measures.right_perplexity)
if cmorph.category == 'SUF':
ths['lppl_th'].add('SUF', measures.left_perplexity)
else:
ths['lppl_th'].add('other', measures.left_perplexity)
lengths.append(len(cmorph))
lengths.sort(reverse=True)
ths['len_th'].add('longest', lengths[0])
for length in lengths[1:]:
ths['len_th'].add('non-longest', length)
for th in ths.values():
th.step()
def _extract_tag_counts(self, model):
out = []
counter = model._corpus_coding._cat_tagcount
for cat in self.categories:
out.append(counter[cat])
return out
class IterationStatisticsPlotter(object):
def __init__(self, stats):
if NO_PLOTTING:
raise UnsupportedConfigurationError(
'Unable to import library matplotlib')
self.stats = stats
def show(self, style):
if style == 'stacked':
self.stacked()
else:
self.all()
def all(self):
plt.figure()
self.costs()
self._title()
plt.figure()
self.basecosts()
self._title()
plt.figure()
self.violated_annots()
self._title()
plt.figure()
self.tag_counts()
self._title()
plt.figure()
self.avg_morphs()
self._title()
plt.figure()
self.durations()
plt.figure()
self.types_and_tokens()
self._title()
plt.figure()
self.changes()
if self.stats._reference is not None:
plt.figure()
self.gold_bpr()
self._title()
self.condprobparams(data='gold')
self.condprobparams(data='corpus')
plt.show()
def stacked(self):
plt.figure(figsize=(5.5 * 2, 5.5 * 2))
plt.subplot(3, 2, 1)
self.costs(xlabel=False, zoom=True)
plt.subplot(3, 2, 2)
self.types(xlabel=False)
plt.subplot(3, 2, 3)
self.violated_annots(xlabel=False)
if self.stats._reference is not None:
plt.subplot(3, 2, 4)
self.gold_bpr(xlabel=False)
plt.subplot(3, 2, 5)
self.changes(both=False)
plt.subplot(3, 2, 6)
self.tag_counts()
plt.subplots_adjust(left=0.123, bottom=0.06, right=0.98, top=0.97,
wspace=None, hspace=0)
if self.stats._reference is not None:
self.condprobparams(data='gold')
self.condprobparams(data='corpus')
plt.show()
def costs(self, xlabel=True, zoom=False):
plt.plot(self.stats.costs, marker='+')
self._epoch_grid(xlabel=xlabel)
if zoom:
plt.ylim(min(self.stats.costs[1:]), max(self.stats.costs))
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Model cost')
def violated_annots(self, xlabel=True):
plt.plot(self.stats.violated_annots, marker='+')
self._epoch_grid(xlabel=xlabel)
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Violated annotations')
def basecosts(self):
if (len(self.stats.cost_parts) == 0 or
len(self.stats.cost_parts[0]) != 6):
_logger.info('Not plotting cost components: ' +
'wrong number of variables (old data?)')
return
plt.plot(self.stats.cost_parts, marker='+')
self._epoch_grid()
plt.xlabel('iteration number')
plt.ylabel('Cost component')
plt.legend(['U Corp', 'U Lexi', 'U Anno',
'W Corp', 'W Lexi', 'W Anno'], loc='best')
def tag_counts(self, xlabel=True):
plt.plot(self.stats.tag_counts, marker='+')
plt.gca().yaxis.get_major_formatter().set_powerlimits((-3, 4))
#unzipped = zip(*self.stats.tag_counts)
#for (i, series) in enumerate(unzipped):
# plt.plot(series, color=plt.cm.jet(float(i) /
# float(len(self.stats.categories))), marker='+')
self._epoch_grid(xlabel=xlabel)
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Category occurence count')
if self.stats.categories is not None:
plt.legend(self.stats.categories, loc='best',
prop={'size': 11}, labelspacing=0.2)
def avg_morphs(self):
normalized = [x / self.stats.word_tokens
for x in self.stats.morph_tokens]
plt.plot(normalized, marker='+')
self._epoch_grid()
plt.xlabel('iteration number')
plt.ylabel('Avg number of morphs per word token')
def types(self, xlabel=True):
plt.plot(self.stats.morph_types, color="blue", marker='+')
self._epoch_grid(xlabel=xlabel)
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Count of morph types')
def types_and_tokens(self):
plt.plot(self.stats.morph_tokens, color="red", marker='+')
plt.plot(self.stats.morph_types, color="blue", marker='+')
plt.legend(['Tokens', 'Types'], loc='best')
self._epoch_grid()
plt.xlabel('iteration number')
plt.ylabel('Count of morph tokens / types')
def durations(self):
by_epoch = [0.0] * (max(self.stats.epoch_numbers) + 1)
by_op = [0.0] * (max(self.stats.operation_numbers) + 1)
by_iteration = [0.0] * (max(self.stats.iteration_numbers) + 1)
for i in range(len(self.stats.epoch_numbers)):
by_epoch[self.stats.epoch_numbers[i]] += self.stats.durations[i]
by_op[self.stats.operation_numbers[i]] += self.stats.durations[i]
by_iteration[
self.stats.iteration_numbers[i]] += self.stats.durations[i]
plt.subplot(2, 2, 1)
plt.plot(self.stats.durations, marker='+')
self._epoch_grid()
plt.xlabel('iteration number')
plt.ylabel('iteration duration [s]')
self._title()
plt.subplot(2, 2, 2)
plt.bar(range(len(by_epoch)), by_epoch)
plt.ylabel('Total epoch duration [s]')
xls = range(len(by_epoch))
xs = [x + 0.5 for x in xls]
plt.xticks(xs, xls)
plt.subplot(2, 2, 3)
plt.bar(range(len(by_op)), by_op)
plt.ylabel('Total operation duration [s]')
xls = range(len(by_op))
xs = [x + 0.5 for x in xls]
if self.stats.ops is not None:
xls = self.stats.ops
plt.xticks(xs, xls)
plt.subplot(2, 2, 4)
plt.bar(range(len(by_iteration)), by_iteration)
plt.ylabel('Total iteration duration [s]')
xls = range(len(by_iteration))
xs = [x + 0.5 for x in xls]
plt.xticks(xs, xls)
def gold_bpr(self, xlabel=True):
plt.plot(self.stats.gold_bpr, marker='+')
self._epoch_grid(xlabel=xlabel)
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Boundary precision recall score')
plt.legend(['Precision', 'Recall', 'F-measure'], loc='best',
prop={'size': 11}, labelspacing=0.2)
def changes(self, xlabel=True, both=True):
if both:
plt.plot(self.stats.changes, color='blue', marker='+')
plt.plot(self.stats.changes_op, color='red', marker='+')
if both:
plt.legend(['cumulative w/in epoch', 'in iteration'], loc='best')
self._epoch_grid(xlabel=xlabel)
if xlabel:
plt.xlabel('iteration number')
plt.ylabel('Changed segmentations')
def condprobparams(self, data='corpus'):
if data == 'gold':
ths = self.stats.gold_ths
else:
ths = self.stats.corpus_ths
plt.figure(figsize=(5.5 * 2, 5.5 * 2))
plt.subplot(3, 4, 1)
self._time_histogram(ths['len_th'], 'STM')
plt.ylabel('morph length')
plt.subplot(3, 4, 2)
self._time_histogram(ths['len_th'], 'other', yticks=False)
plt.subplot(3, 4, 3)
self._time_histogram(ths['len_th'], 'longest', yticks=False)
plt.subplot(3, 4, 4)
self._time_histogram(ths['len_th'], 'non-longest', yticks=False)
plt.subplot(3, 4, 5)
self._time_histogram(ths['rppl_th'], 'PRE')
plt.ylabel('right perplexity')
plt.subplot(3, 4, 6)
self._time_histogram(ths['rppl_th'], 'other', yticks=False)
plt.subplot(3, 4, 7)
self._time_histogram(ths['rppl_th'], 'first', yticks=False)
plt.subplot(3, 4, 8)
self._time_histogram(ths['rppl_th'], 'non-first', yticks=False)
plt.subplot(3, 4, 9)
self._time_histogram(ths['lppl_th'], 'SUF')
plt.ylabel('left perplexity')
plt.subplot(3, 4, 10)
self._time_histogram(ths['lppl_th'], 'other', yticks=False)
plt.subplot(3, 4, 11)
self._time_histogram(ths['lppl_th'], 'last', yticks=False)
plt.xlabel('iteration number ({})'.format(data))
plt.subplot(3, 4, 12)
self._time_histogram(ths['lppl_th'], 'non-last', yticks=False)
plt.subplots_adjust(left=0.1, bottom=0.06, right=0.98, top=0.97,
wspace=0.06, hspace=0.08)
def _epoch_grid(self, xlabel=True):
num_ticks = len(self.stats.epoch_numbers) - 1
for i in range(num_ticks):
if (self.stats.epoch_numbers[i] !=
self.stats.epoch_numbers[i + 1]):
plt.axvline(x=(i + 0.5), color=[.6, .6, .6])
if (self.stats.operation_numbers[i] <
self.stats.operation_numbers[i + 1]):
plt.axvline(x=(i + 0.5), color=[.5, .5, .5], linestyle=':')
if not xlabel:
plt.xticks(range(num_ticks), [''] * num_ticks)
def _title(self):
plt.title(self.stats.title)
def _time_histogram(self, th, group, xlabel=True, yticks=True):
arr = np.array(th.data[group]).transpose()
if arr.size == 0:
return
plt.imshow(arr,
origin='lower',
interpolation='nearest',
cmap=plt.cm.gray)
if yticks:
if len(th.bins) > 48:
step = 3
elif len(th.bins) > 23:
step = 2
else:
step = 1
ts = [(i + .5, '{}'.format(int(x)))
for (i, x) in enumerate(th.bins)]
ts = ts[::step]
plt.yticks(*zip(*ts))
else:
plt.yticks([])
self._epoch_grid(xlabel=xlabel)
plt.title(group)
| |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of gRPC Python interceptors."""
import collections
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class _ClientCallDetails(
collections.namedtuple('_ClientCallDetails',
('method', 'timeout', 'metadata',
'credentials')), grpc.ClientCallDetails):
pass
class _GenericClientInterceptor(
grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor):
def __init__(self, interceptor_function):
self._fn = interceptor_function
def intercept_unary_unary(self, continuation, client_call_details, request):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request,)), False, False)
response = continuation(new_details, next(new_request_iterator))
return postprocess(response) if postprocess else response
def intercept_unary_stream(self, continuation, client_call_details,
request):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request,)), False, True)
response_it = continuation(new_details, new_request_iterator)
return postprocess(response_it) if postprocess else response_it
def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator, True, False)
response = continuation(new_details, next(new_request_iterator))
return postprocess(response) if postprocess else response
def intercept_stream_stream(self, continuation, client_call_details,
request_iterator):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator, True, True)
response_it = continuation(new_details, new_request_iterator)
return postprocess(response_it) if postprocess else response_it
class _LoggingInterceptor(
grpc.ServerInterceptor, grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor, grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor):
def __init__(self, tag, record):
self.tag = tag
self.record = record
def intercept_service(self, continuation, handler_call_details):
self.record.append(self.tag + ':intercept_service')
return continuation(handler_call_details)
def intercept_unary_unary(self, continuation, client_call_details, request):
self.record.append(self.tag + ':intercept_unary_unary')
return continuation(client_call_details, request)
def intercept_unary_stream(self, continuation, client_call_details,
request):
self.record.append(self.tag + ':intercept_unary_stream')
return continuation(client_call_details, request)
def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
self.record.append(self.tag + ':intercept_stream_unary')
return continuation(client_call_details, request_iterator)
def intercept_stream_stream(self, continuation, client_call_details,
request_iterator):
self.record.append(self.tag + ':intercept_stream_stream')
return continuation(client_call_details, request_iterator)
class _DefectiveClientInterceptor(grpc.UnaryUnaryClientInterceptor):
def intercept_unary_unary(self, ignored_continuation,
ignored_client_call_details, ignored_request):
raise test_control.Defect()
def _wrap_request_iterator_stream_interceptor(wrapper):
def intercept_call(client_call_details, request_iterator, request_streaming,
ignored_response_streaming):
if request_streaming:
return client_call_details, wrapper(request_iterator), None
else:
return client_call_details, request_iterator, None
return _GenericClientInterceptor(intercept_call)
def _append_request_header_interceptor(header, value):
def intercept_call(client_call_details, request_iterator,
ignored_request_streaming, ignored_response_streaming):
metadata = []
if client_call_details.metadata:
metadata = list(client_call_details.metadata)
metadata.append((header, value,))
client_call_details = _ClientCallDetails(
client_call_details.method, client_call_details.timeout, metadata,
client_call_details.credentials)
return client_call_details, request_iterator, None
return _GenericClientInterceptor(intercept_call)
class _GenericServerInterceptor(grpc.ServerInterceptor):
def __init__(self, fn):
self._fn = fn
def intercept_service(self, continuation, handler_call_details):
return self._fn(continuation, handler_call_details)
def _filter_server_interceptor(condition, interceptor):
def intercept_service(continuation, handler_call_details):
if condition(handler_call_details):
return interceptor.intercept_service(continuation,
handler_call_details)
return continuation(handler_call_details)
return _GenericServerInterceptor(intercept_service)
class InterceptorTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._record = []
conditional_interceptor = _filter_server_interceptor(
lambda x: ('secret', '42') in x.invocation_metadata,
_LoggingInterceptor('s3', self._record))
self._server = grpc.server(
self._server_pool,
interceptors=(_LoggingInterceptor('s1', self._record),
conditional_interceptor,
_LoggingInterceptor('s2', self._record),))
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(None)
self._server_pool.shutdown(wait=True)
def testTripleRequestMessagesClientInterceptor(self):
def triple(request_iterator):
while True:
try:
item = next(request_iterator)
yield item
yield item
yield item
except StopIteration:
break
interceptor = _wrap_request_iterator_stream_interceptor(triple)
channel = grpc.intercept_channel(self._channel, interceptor)
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
multi_callable = _stream_stream_multi_callable(channel)
response_iterator = multi_callable(
iter(requests),
metadata=(
('test',
'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
responses = tuple(response_iterator)
self.assertEqual(len(responses), 3 * test_constants.STREAM_LENGTH)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
iter(requests),
metadata=(
('test',
'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
responses = tuple(response_iterator)
self.assertEqual(len(responses), test_constants.STREAM_LENGTH)
def testDefectiveClientInterceptor(self):
interceptor = _DefectiveClientInterceptor()
defective_channel = grpc.intercept_channel(self._channel, interceptor)
request = b'\x07\x08'
multi_callable = _unary_unary_multi_callable(defective_channel)
call_future = multi_callable.future(
request,
metadata=(
('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
self.assertIsNotNone(call_future.exception())
self.assertEqual(call_future.code(), grpc.StatusCode.INTERNAL)
def testInterceptedHeaderManipulationWithServerSideVerification(self):
request = b'\x07\x08'
channel = grpc.intercept_channel(
self._channel, _append_request_header_interceptor('secret', '42'))
channel = grpc.intercept_channel(
channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
self._record[:] = []
multi_callable = _unary_unary_multi_callable(channel)
multi_callable.with_call(
request,
metadata=(
('test',
'InterceptedUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertSequenceEqual(self._record, [
'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
's1:intercept_service', 's3:intercept_service',
's2:intercept_service'
])
def testInterceptedUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _unary_unary_multi_callable(channel)
multi_callable(
request,
metadata=(
('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
self.assertSequenceEqual(self._record, [
'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
self._record[:] = []
multi_callable = _unary_unary_multi_callable(channel)
multi_callable.with_call(
request,
metadata=(
('test',
'InterceptedUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertSequenceEqual(self._record, [
'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _unary_unary_multi_callable(channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'InterceptedUnaryRequestFutureUnaryResponse'),))
response_future.result()
self.assertSequenceEqual(self._record, [
'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedUnaryRequestStreamResponse(self):
request = b'\x37\x58'
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _unary_stream_multi_callable(channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'InterceptedUnaryRequestStreamResponse'),))
tuple(response_iterator)
self.assertSequenceEqual(self._record, [
'c1:intercept_unary_stream', 'c2:intercept_unary_stream',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedStreamRequestBlockingUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _stream_unary_multi_callable(channel)
multi_callable(
request_iterator,
metadata=(
('test', 'InterceptedStreamRequestBlockingUnaryResponse'),))
self.assertSequenceEqual(self._record, [
'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _stream_unary_multi_callable(channel)
multi_callable.with_call(
request_iterator,
metadata=(
('test',
'InterceptedStreamRequestBlockingUnaryResponseWithCall'),))
self.assertSequenceEqual(self._record, [
'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedStreamRequestFutureUnaryResponse(self):
requests = tuple(b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _stream_unary_multi_callable(channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'InterceptedStreamRequestFutureUnaryResponse'),))
response_future.result()
self.assertSequenceEqual(self._record, [
'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
's1:intercept_service', 's2:intercept_service'
])
def testInterceptedStreamRequestStreamResponse(self):
requests = tuple(b'\x77\x58'
for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
self._record[:] = []
channel = grpc.intercept_channel(
self._channel,
_LoggingInterceptor('c1', self._record),
_LoggingInterceptor('c2', self._record))
multi_callable = _stream_stream_multi_callable(channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'InterceptedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertSequenceEqual(self._record, [
'c1:intercept_stream_stream', 'c2:intercept_stream_stream',
's1:intercept_service', 's2:intercept_service'
])
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
from __future__ import (absolute_import, print_function, division)
import collections
import os
import re
from OpenSSL import SSL
from netlib import certutils, tcp
from netlib.http import authentication
from netlib.tcp import Address, sslversion_choices
from .. import utils, platform
CONF_BASENAME = "mitmproxy"
CA_DIR = "~/.mitmproxy"
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=apache-2.2.15&openssl=1.0.2&hsts=yes&profile=old
DEFAULT_CLIENT_CIPHERS = "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
class HostMatcher(object):
def __init__(self, patterns=tuple()):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __nonzero__(self):
return bool(self.patterns)
ServerSpec = collections.namedtuple("ServerSpec", "scheme address")
class ProxyConfig:
def __init__(
self,
host='',
port=8080,
cadir=CA_DIR,
clientcerts=None,
no_upstream_cert=False,
body_size_limit=None,
mode="regular",
upstream_server=None,
authenticator=None,
ignore_hosts=tuple(),
tcp_hosts=tuple(),
http2=False,
rawtcp=False,
ciphers_client=DEFAULT_CLIENT_CIPHERS,
ciphers_server=None,
certs=tuple(),
ssl_version_client="secure",
ssl_version_server="secure",
ssl_verify_upstream_cert=False,
ssl_verify_upstream_trusted_cadir=None,
ssl_verify_upstream_trusted_ca=None,
):
self.host = host
self.port = port
self.ciphers_client = ciphers_client
self.ciphers_server = ciphers_server
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
self.mode = mode
if upstream_server:
self.upstream_server = ServerSpec(upstream_server[0], Address.wrap(upstream_server[1]))
else:
self.upstream_server = None
self.check_ignore = HostMatcher(ignore_hosts)
self.check_tcp = HostMatcher(tcp_hosts)
self.http2 = http2
self.rawtcp = rawtcp
self.authenticator = authenticator
self.cadir = os.path.expanduser(cadir)
self.certstore = certutils.CertStore.from_store(
self.cadir,
CONF_BASENAME
)
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
self.openssl_method_client, self.openssl_options_client = \
sslversion_choices[ssl_version_client]
self.openssl_method_server, self.openssl_options_server = \
sslversion_choices[ssl_version_server]
if ssl_verify_upstream_cert:
self.openssl_verification_mode_server = SSL.VERIFY_PEER
else:
self.openssl_verification_mode_server = SSL.VERIFY_NONE
self.openssl_trusted_cadir_server = ssl_verify_upstream_trusted_cadir
self.openssl_trusted_ca_server = ssl_verify_upstream_trusted_ca
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
mode, upstream_server = "regular", None
if options.transparent_proxy:
c += 1
if not platform.resolver:
return parser.error("Transparent mode not supported on this platform.")
mode = "transparent"
if options.socks_proxy:
c += 1
mode = "socks5"
if options.reverse_proxy:
c += 1
mode = "reverse"
upstream_server = options.reverse_proxy
if options.upstream_proxy:
c += 1
mode = "upstream"
upstream_server = options.upstream_proxy
if c > 1:
return parser.error(
"Transparent, SOCKS5, reverse and upstream proxy mode "
"are mutually exclusive. Read the docs on proxy modes to understand why."
)
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(options.clientcerts):
return parser.error(
"Client certificate path does not exist: %s" % options.clientcerts
)
if options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd:
if options.transparent_proxy:
return parser.error("Proxy Authentication not supported in transparent mode.")
if options.socks_proxy:
return parser.error(
"Proxy Authentication not supported in SOCKS mode. "
"https://github.com/mitmproxy/mitmproxy/issues/738"
)
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error(
"Invalid single-user specification. Please use the format username:password"
)
username, password = options.auth_singleuser.split(':')
password_manager = authentication.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = authentication.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = authentication.PassManHtpasswd(
options.auth_htpasswd)
except ValueError as v:
return parser.error(v.message)
authenticator = authentication.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = authentication.NullProxyAuth(None)
certs = []
for i in options.certs:
parts = i.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
parts[1] = os.path.expanduser(parts[1])
if not os.path.exists(parts[1]):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
return ProxyConfig(
host=options.addr,
port=options.port,
cadir=options.cadir,
clientcerts=options.clientcerts,
no_upstream_cert=options.no_upstream_cert,
body_size_limit=body_size_limit,
mode=mode,
upstream_server=upstream_server,
ignore_hosts=options.ignore_hosts,
tcp_hosts=options.tcp_hosts,
http2=options.http2,
rawtcp=options.rawtcp,
authenticator=authenticator,
ciphers_client=options.ciphers_client,
ciphers_server=options.ciphers_server,
certs=tuple(certs),
ssl_version_client=options.ssl_version_client,
ssl_version_server=options.ssl_version_server,
ssl_verify_upstream_cert=options.ssl_verify_upstream_cert,
ssl_verify_upstream_trusted_cadir=options.ssl_verify_upstream_trusted_cadir,
ssl_verify_upstream_trusted_ca=options.ssl_verify_upstream_trusted_ca
)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google DisplayVideo hook."""
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.discovery import Resource, build
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleDisplayVideo360Hook(GoogleBaseHook):
"""Hook for Google Display & Video 360."""
_conn = None # type: Optional[Any]
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Resource:
"""Retrieves connection to DisplayVideo."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"doubleclickbidmanager",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def get_conn_to_display_video(self) -> Resource:
"""Retrieves connection to DisplayVideo."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"displayvideo",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
@staticmethod
def erf_uri(partner_id, entity_type) -> List[str]:
"""
Return URI for all Entity Read Files in bucket.
For example, if you were generating a file name to retrieve the entity read file
for partner 123 accessing the line_item table from April 2, 2013, your filename
would look something like this:
gdbm-123/entity/20130402.0.LineItem.json
More information:
https://developers.google.com/bid-manager/guides/entity-read/overview
:param partner_id The numeric ID of your Partner.
:type partner_id: int
:param entity_type: The type of file Partner, Advertiser, InsertionOrder,
LineItem, Creative, Pixel, InventorySource, UserList, UniversalChannel, and summary.
:type entity_type: str
"""
return [f"gdbm-{partner_id}/entity/{{{{ ds_nodash }}}}.*.{entity_type}.json"]
def create_query(self, query: Dict[str, Any]) -> dict:
"""
Creates a query.
:param query: Query object to be passed to request body.
:type query: Dict[str, Any]
"""
response = self.get_conn().queries().createquery(body=query).execute(num_retries=self.num_retries)
return response
def delete_query(self, query_id: str) -> None:
"""
Deletes a stored query as well as the associated stored reports.
:param query_id: Query ID to delete.
:type query_id: str
"""
(self.get_conn().queries().deletequery(queryId=query_id).execute(num_retries=self.num_retries))
def get_query(self, query_id: str) -> dict:
"""
Retrieves a stored query.
:param query_id: Query ID to retrieve.
:type query_id: str
"""
response = self.get_conn().queries().getquery(queryId=query_id).execute(num_retries=self.num_retries)
return response
def list_queries(
self,
) -> List[Dict]:
"""Retrieves stored queries."""
response = self.get_conn().queries().listqueries().execute(num_retries=self.num_retries)
return response.get('queries', [])
def run_query(self, query_id: str, params: Dict[str, Any]) -> None:
"""
Runs a stored query to generate a report.
:param query_id: Query ID to run.
:type query_id: str
:param params: Parameters for the report.
:type params: Dict[str, Any]
"""
(
self.get_conn()
.queries()
.runquery(queryId=query_id, body=params)
.execute(num_retries=self.num_retries)
)
def upload_line_items(self, line_items: Any) -> List[Dict[str, Any]]:
"""
Uploads line items in CSV format.
:param line_items: downloaded data from GCS and passed to the body request
:type line_items: Any
:return: response body.
:rtype: List[Dict[str, Any]]
"""
request_body = {
"lineItems": line_items,
"dryRun": False,
"format": "CSV",
}
response = (
self.get_conn()
.lineitems()
.uploadlineitems(body=request_body)
.execute(num_retries=self.num_retries)
)
return response
def download_line_items(self, request_body: Dict[str, Any]) -> List[Any]:
"""
Retrieves line items in CSV format.
:param request_body: dictionary with parameters that should be passed into.
More information about it can be found here:
https://developers.google.com/bid-manager/v1.1/lineitems/downloadlineitems
:type request_body: Dict[str, Any]
"""
response = (
self.get_conn()
.lineitems()
.downloadlineitems(body=request_body)
.execute(num_retries=self.num_retries)
)
return response["lineItems"]
def create_sdf_download_operation(self, body_request: Dict[str, Any]) -> Dict[str, Any]:
"""
Creates an SDF Download Task and Returns an Operation.
:param body_request: Body request.
:type body_request: Dict[str, Any]
More information about body request n be found here:
https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create
"""
result = (
self.get_conn_to_display_video()
.sdfdownloadtasks()
.create(body=body_request)
.execute(num_retries=self.num_retries)
)
return result
def get_sdf_download_operation(self, operation_name: str):
"""
Gets the latest state of an asynchronous SDF download task operation.
:param operation_name: The name of the operation resource.
:type operation_name: str
"""
result = (
self.get_conn_to_display_video()
.sdfdownloadtasks()
.operations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
)
return result
def download_media(self, resource_name: str):
"""
Downloads media.
:param resource_name: of the media that is being downloaded.
:type resource_name: str
"""
request = self.get_conn_to_display_video().media().download_media(resource_name=resource_name)
return request
| |
"""
===============================================
Local Binary Pattern for texture classification
===============================================
In this example, we will see how to classify textures based on LBP (Local
Binary Pattern). LBP looks at points surrounding a central point and tests
whether the surrounding points are greater than or less than the central point
(i.e. gives a binary result).
Before trying out LBP on an image, it helps to look at a schematic of LBPs.
The below code is just used to plot the schematic.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
METHOD = 'uniform'
plt.rcParams['font.size'] = 9
def plot_circle(ax, center, radius, color):
circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
ax.add_patch(circle)
def plot_lbp_model(ax, binary_values):
"""Draw the schematic for a local binary pattern."""
# Geometry spec
theta = np.deg2rad(45)
R = 1
r = 0.15
w = 1.5
gray = '0.5'
# Draw the central pixel.
plot_circle(ax, (0, 0), radius=r, color=gray)
# Draw the surrounding pixels.
for i, facecolor in enumerate(binary_values):
x = R * np.cos(i * theta)
y = R * np.sin(i * theta)
plot_circle(ax, (x, y), radius=r, color=str(facecolor))
# Draw the pixel grid.
for x in np.linspace(-w, w, 4):
ax.axvline(x, color=gray)
ax.axhline(x, color=gray)
# Tweak the layout.
ax.axis('image')
ax.axis('off')
size = w + 0.2
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
binary_patterns = [np.zeros(8),
np.ones(8),
np.hstack([np.ones(4), np.zeros(4)]),
np.hstack([np.zeros(3), np.ones(5)]),
[1, 0, 0, 1, 1, 1, 0, 0]]
for ax, values, name in zip(axes, binary_patterns, titles):
plot_lbp_model(ax, values)
ax.set_title(name)
"""
.. image:: PLOT2RST.current_figure
The figure above shows example results with black (or white) representing
pixels that are less (or more) intense than the central pixel. When surrounding
pixels are all black or all white, then that image region is flat (i.e.
featureless). Groups of continuous black or white pixels are considered
"uniform" patterns that can be interpreted as corners or edges. If pixels
switch back-and-forth between black and white pixels, the pattern is considered
"non-uniform".
When using LBP to detect texture, you measure a collection of LBPs over an
image patch and look at the distribution of these LBPs. Lets apply LBP to
a brick texture.
"""
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data
from skimage.color import label2rgb
# settings for LBP
radius = 3
n_points = 8 * radius
def overlay_labels(image, lbp, labels):
mask = np.logical_or.reduce([lbp == each for each in labels])
return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
def highlight_bars(bars, indexes):
for i in indexes:
bars[i].set_facecolor('r')
image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)
def hist(ax, lbp):
n_bins = lbp.max() + 1
return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()
titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
i_14 = n_points // 4 # 1/4th of the histogram
i_34 = 3 * (n_points // 4) # 3/4th of the histogram
corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
list(range(i_34 - w, i_34 + w + 1)))
label_sets = (edge_labels, flat_labels, corner_labels)
for ax, labels in zip(ax_img, label_sets):
ax.imshow(overlay_labels(image, lbp, labels))
for ax, labels, name in zip(ax_hist, label_sets, titles):
counts, _, bars = hist(ax, lbp)
highlight_bars(bars, labels)
ax.set_ylim(ymax=np.max(counts[:-1]))
ax.set_xlim(xmax=n_points + 2)
ax.set_title(name)
ax_hist[0].set_ylabel('Percentage')
for ax in ax_img:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The above plot highlights flat, edge-like, and corner-like regions of the
image.
The histogram of the LBP result is a good measure to classify textures. Here,
we test the histogram distributions against each other using the
Kullback-Leibler-Divergence.
"""
# settings for LBP
radius = 2
n_points = 8 * radius
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
def match(refs, img):
best_score = 10
best_name = None
lbp = local_binary_pattern(img, n_points, radius, METHOD)
n_bins = lbp.max() + 1
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
for name, ref in refs.items():
ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins,
range=(0, n_bins))
score = kullback_leibler_divergence(hist, ref_hist)
if score < best_score:
best_score = score
best_name = name
return best_name
brick = data.load('brick.png')
grass = data.load('grass.png')
wall = data.load('rough-wall.png')
refs = {
'brick': local_binary_pattern(brick, n_points, radius, METHOD),
'grass': local_binary_pattern(grass, n_points, radius, METHOD),
'wall': local_binary_pattern(wall, n_points, radius, METHOD)
}
# classify rotated textures
print('Rotated images matched against references using LBP:')
print('original: brick, rotated: 30deg, match result: ',
match(refs, rotate(brick, angle=30, resize=False)))
print('original: brick, rotated: 70deg, match result: ',
match(refs, rotate(brick, angle=70, resize=False)))
print('original: grass, rotated: 145deg, match result: ',
match(refs, rotate(grass, angle=145, resize=False)))
# plot histograms of LBP of textures
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
figsize=(9, 6))
plt.gray()
ax1.imshow(brick)
ax1.axis('off')
hist(ax4, refs['brick'])
ax4.set_ylabel('Percentage')
ax2.imshow(grass)
ax2.axis('off')
hist(ax5, refs['grass'])
ax5.set_xlabel('Uniform LBP values')
ax3.imshow(wall)
ax3.axis('off')
hist(ax6, refs['wall'])
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrapper functions around the megacli command."""
import os
import re
from subprocess import PIPE
from subprocess import Popen
import sys
from hardware import detect_utils
SEP_REGEXP = re.compile(r'\s*:\s*')
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Return the path to an executable.
Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(myfile, mode):
return (os.path.exists(myfile) and os.access(myfile, mode)
and not os.path.isdir(myfile))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for directory in path:
normdir = os.path.normcase(directory)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(directory, thefile)
if _access_check(name, mode):
return name
return None
def search_exec(possible_names):
prog_path = None
for prog_name in possible_names:
prog_path = which(prog_name)
if prog_path is not None:
break
return prog_path
def parse_output(output):
"""Parse the output of the megacli command into an associative array."""
res = {}
for line in output.split('\n'):
lis = re.split(SEP_REGEXP, line.strip())
if len(lis) == 2:
if len(lis[1]) > 1 and lis[1][-1] == '.':
lis[1] = lis[1][:-1]
try:
res[lis[0].title().replace(' ', '')] = int(lis[1])
except ValueError:
res[lis[0].title().replace(' ', '')] = lis[1]
return res
def split_parts(sep, output):
"""Split the output string according to the regexp sep."""
regexp = re.compile(sep)
lines = output.split('\n')
idx = []
num = 0
for line in lines:
if regexp.search(line):
idx.append(num)
num = num + 1
arr = []
start = idx[0]
for num in idx[1:]:
arr.append('\n'.join(lines[start:num - 1]))
start = num
arr.append('\n'.join(lines[start:]))
return arr
def run_megacli(*args):
"""Run the megacli command in a subprocess and return the output."""
prog_exec = search_exec(["megacli", "MegaCli", "MegaCli64"])
if prog_exec:
cmd = prog_exec + ' - ' + ' '.join(args)
proc = Popen(cmd, shell=True, stdout=PIPE, universal_newlines=True)
return proc.communicate()[0]
sys.stderr.write('Cannot find megacli on the system\n')
return ""
def run_and_parse(*args):
"""Run the megacli command in a subprocess.
Returns the output as an associative array.
"""
res = run_megacli(*args)
return parse_output(res)
def adp_count():
"""Get the numberof adaptaters."""
arr = run_and_parse('adpCount')
if 'ControllerCount' in arr:
return int(arr['ControllerCount'])
return 0
def adp_all_info(ctrl):
"""Get adaptater info."""
arr = run_and_parse('adpallinfo -a%d' % ctrl)
for key in ('RaidLevelSupported', 'SupportedDrives'):
if key in arr:
arr[key] = arr[key].split(', ')
return arr
def pd_get_num(ctrl):
"""Get the number of physical drives on a controller."""
try:
key = 'NumberOfPhysicalDrivesOnAdapter%d' % ctrl
return run_and_parse('PDGetNum -a%d' % ctrl)[key]
except KeyError:
return 0
def enc_info(ctrl):
"""Get enclosing info on a controller."""
parts = split_parts(' +Enclosure [0-9]+:',
run_megacli('EncInfo -a%d' % ctrl))
all_ = list(map(parse_output, parts))
for entry in all_:
for key in entry.keys():
if re.search(r"Enclosure\d+", key):
entry['Enclosure'] = int(key[len('Enclosure'):])
del entry[key]
break
return all_
def pdinfo(ctrl, encl, disk):
"""Get info about a physical drive on an enclosure and a controller."""
return run_and_parse('pdinfo -PhysDrv[%d:%d] -a%d' % (encl, disk, ctrl))
def ld_get_num(ctrl):
"""Get the number of logical drives on a controller."""
try:
key = 'NumberOfVirtualDrivesConfiguredOnAdapter%d' % ctrl
return run_and_parse('LDGetNum -a%d' % ctrl)[key]
except KeyError:
return 0
def ld_get_info(ctrl, ldrv):
"""Get info about a logical drive on a controller."""
return run_and_parse('LDInfo -L%d -a%d' % (ldrv, ctrl))
def detect():
"""Detect LSI MegaRAID controller configuration."""
hw_lst = []
ctrl_num = adp_count()
if ctrl_num == 0:
return hw_lst
disk_count = 0
global_pdisk_size = 0
for ctrl in range(ctrl_num):
ctrl_info = adp_all_info(ctrl)
for entry in ctrl_info.keys():
hw_lst.append(('megaraid', 'Controller_%d' % ctrl, '%s' % entry,
'%s' % ctrl_info[entry]))
for enc in enc_info(ctrl):
if "Enclosure" in enc.keys():
for key in enc.keys():
ignore_list = ["ExitCode", "Enclosure"]
if key in ignore_list:
continue
hw_lst.append(('megaraid',
'Controller_%d/Enclosure_%s' %
(ctrl, enc["Enclosure"]),
'%s' % key, '%s' % enc[key]))
for slot_num in range(enc['NumberOfSlots']):
disk = 'disk%d' % slot_num
info = pdinfo(ctrl, enc['DeviceId'], slot_num)
# If no PdType, it means that's not a disk
if 'PdType' not in info.keys():
continue
disk_count += 1
hw_lst.append(('pdisk', disk, 'ctrl', str(ctrl_num)))
hw_lst.append(('pdisk', disk, 'type', info['PdType']))
hw_lst.append(('pdisk', disk, 'id',
'%s:%d' % (info['EnclosureDeviceId'],
slot_num)))
disk_size = detect_utils.size_in_gb(
"%s %s" % (info['CoercedSize'].split()[0],
info['CoercedSize'].split()[1]))
global_pdisk_size = global_pdisk_size + float(disk_size)
hw_lst.append(('pdisk', disk, 'size', disk_size))
for key in info.keys():
ignore_list = ['PdType', 'EnclosureDeviceId',
'CoercedSize', 'ExitCode']
if key not in ignore_list:
if "DriveTemperature" in key:
if "C" in str(info[key].split()[0]):
pdisk = info[key].split()[0].split("C")[0]
hw_lst.append(('pdisk', disk, key,
str(pdisk).strip()))
hw_lst.append(('pdisk', disk,
"%s_units" % key,
"Celsius"))
else:
hw_lst.append(('pdisk', disk, key,
str(info[key]).strip()))
elif "InquiryData" in key:
count = 0
for mystring in info[key].split():
hw_lst.append(('pdisk', disk,
"%s[%d]" % (key, count),
str(mystring.strip())))
count = count + 1
else:
hw_lst.append(('pdisk', disk, key,
str(info[key]).strip()))
if global_pdisk_size > 0:
hw_lst.append(('pdisk', 'all', 'size',
"%.2f" % global_pdisk_size))
for ld_num in range(ld_get_num(ctrl)):
disk = 'disk%d' % ld_num
info = ld_get_info(ctrl, ld_num)
ignore_list = ['Size']
for item in info.keys():
if item not in ignore_list:
hw_lst.append(('ldisk', disk, item,
str(info[item])))
if 'Size' in info:
hw_lst.append(('ldisk', disk, 'Size',
detect_utils.size_in_gb(info['Size'])))
hw_lst.append(('disk', 'megaraid', 'count', str(disk_count)))
return hw_lst
| |
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <mkoval@cs.cmu.edu>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging, numpy, openravepy, time
from ..util import SetTrajectoryTags
from base import (BasePlanner, PlanningError, UnsupportedPlanningError,
PlanningMethod, Tags)
logger = logging.getLogger(__name__)
def DoNothing(robot):
return numpy.zeros(robot.GetActiveDOF())
# Based on Moslem Kazemi's code from ARM-S.
def JointLimitAvoidance(robot, limit_tolerance=0.2, gain=100):
q = robot.GetActiveDOFValues()
q_min, q_max = robot.GetDOFLimits(robot.GetActiveDOFIndices())
num_dofs = robot.GetActiveDOF()
q_dot = numpy.zeros(num_dofs)
for i in xrange(num_dofs):
max_limit_dist = q_max[i] - q[i]
min_limit_dist = q_min[i] - q[i]
if max_limit_dist < limit_tolerance:
q_dot[i] = -gain * (max_limit_dist - limit_tolerance) ** 2
elif min_limit_dist > -limit_tolerance:
q_dot[i] = gain * (min_limit_dist + limit_tolerance) ** 2
else:
q_dot[i] = 0
return q_dot
class MKPlanner(BasePlanner):
def __init__(self):
super(MKPlanner, self).__init__()
def __str__(self):
return 'MKPlanner'
#Calculates a change of joint angles to maintain the same orientation, and move the position forward slightly
### NOTE: The sign_flipper is a hack
### Sometimes, it seems changing the direction of the error term caused it to succeed
### sign_flipper is monitered by the planner. If the orientation error starts increasing, it flips the sign of the error term
def GetStraightVelocity(self, manip, velocity, initial_hand_pose, nullspace_fn, step_size, sign_flipper = 1):
robot = manip.GetRobot()
current_hand_pose = manip.GetEndEffectorTransform()
initial_position = initial_hand_pose[0:3, 3]
current_position = current_hand_pose[0:3, 3]
# Simulate a position goal step_size distance further along the velocity vector.
moved_already = velocity * numpy.dot(current_position - initial_position, velocity)
desired_position = initial_position + moved_already + velocity * step_size
error_pos = desired_position - current_position
# Append the desired quaternion to create the error vector. There is a
# sign ambiguity on quaternions, so we'll always choose the shortest path.
initial_ori = openravepy.quatFromRotationMatrix(initial_hand_pose)
current_ori = openravepy.quatFromRotationMatrix(current_hand_pose)
choices_ori = [ current_ori - initial_ori, current_ori + initial_ori ]
error_ori = sign_flipper*min(choices_ori, key=lambda q: numpy.linalg.norm(q))
# Jacobian pseudo-inverse.
jacobian_spatial = manip.CalculateJacobian()
jacobian_angular = manip.CalculateRotationJacobian() #this function seems very buggy/wrong
jacobian = numpy.vstack((jacobian_spatial, jacobian_angular))
jacobian_pinv = numpy.linalg.pinv(jacobian)
# Null-space projector
nullspace_projector = numpy.eye(jacobian.shape[1]) - numpy.dot(jacobian_pinv, jacobian)
nullspace_goal = nullspace_fn(robot)
pose_error = numpy.hstack((error_pos, error_ori))
return numpy.dot(jacobian_pinv, pose_error) + numpy.dot(nullspace_projector, nullspace_goal)
@PlanningMethod
def PlanToEndEffectorOffset(self, robot, direction, distance, max_distance=None,
nullspace=JointLimitAvoidance, timelimit=5.0, step_size=0.001,
position_tolerance=0.01, angular_tolerance=0.15, **kw_args):
"""
Plan to a desired end-effector offset with move-hand-straight
constraint. movement less than distance will return failure. The motion
will not move further than max_distance.
@param robot
@param direction unit vector in the direction of motion
@param distance minimum distance in meters
@param max_distance maximum distance in meters
@param timelimit timeout in seconds
@param stepsize step size in meters for the Jacobian pseudoinverse controller
@param position_tolerance constraint tolerance in meters
@param angular_tolerance constraint tolerance in radians
@return traj
"""
if distance < 0:
raise ValueError('Distance must be non-negative.')
elif numpy.linalg.norm(direction) == 0:
raise ValueError('Direction must be non-zero')
elif max_distance is not None and max_distance < distance:
raise ValueError('Max distance is less than minimum distance.')
elif step_size <= 0:
raise ValueError('Step size must be positive.')
elif position_tolerance < 0:
raise ValueError('Position tolerance must be non-negative.')
elif angular_tolerance < 0:
raise ValueError('Angular tolerance must be non-negative.')
# save all active bodies so we only check collision with those
active_bodies = []
for body in self.env.GetBodies():
if body.IsEnabled():
active_bodies.append(body)
# Normalize the direction vector.
direction = numpy.array(direction, dtype='float')
direction /= numpy.linalg.norm(direction)
# Default to moving an exact distance.
if max_distance is None:
max_distance = distance
with robot:
manip = robot.GetActiveManipulator()
traj = openravepy.RaveCreateTrajectory(self.env, '')
traj.Init(manip.GetArmConfigurationSpecification())
active_dof_indices = manip.GetArmIndices()
limits_lower, limits_upper = robot.GetDOFLimits(active_dof_indices)
initial_pose = manip.GetEndEffectorTransform()
q = robot.GetDOFValues(active_dof_indices)
traj.Insert(0, q)
start_time = time.time()
current_distance = 0.0
sign_flipper = 1
last_rot_error = 9999999999.0
try:
while current_distance < max_distance:
# Check for a timeout.
current_time = time.time()
if timelimit is not None and current_time - start_time > timelimit:
raise PlanningError('Reached time limit.')
# Compute joint velocities using the Jacobian pseudoinverse.
q_dot = self.GetStraightVelocity(manip, direction, initial_pose, nullspace, step_size, sign_flipper=sign_flipper)
q += q_dot
robot.SetDOFValues(q, active_dof_indices)
# Check for collisions.
#if self.env.CheckCollision(robot):
for body in active_bodies:
if self.env.CheckCollision(robot, body):
raise PlanningError('Encountered collision.')
if robot.CheckSelfCollision():
raise PlanningError('Encountered self-collision.')
# Check for joint limits.
elif not (limits_lower < q).all() or not (q < limits_upper).all():
raise PlanningError('Encountered joint limit during Jacobian move.')
# Check our distance from the constraint.
current_pose = manip.GetEndEffectorTransform()
a = initial_pose[0:3, 3]
p = current_pose[0:3, 3]
orthogonal_proj = (a - p) - numpy.dot(a - p, direction) * direction
if numpy.linalg.norm(orthogonal_proj) > position_tolerance:
raise PlanningError('Deviated from a straight line constraint.')
# Check our orientation against the constraint.
offset_pose = numpy.dot(numpy.linalg.inv(current_pose), initial_pose)
offset_angle = openravepy.axisAngleFromRotationMatrix(offset_pose)
offset_angle_norm = numpy.linalg.norm(offset_angle)
if offset_angle_norm > last_rot_error + 0.0005:
sign_flipper *= -1
last_rot_error = offset_angle_norm
if offset_angle_norm > angular_tolerance:
raise PlanningError('Deviated from orientation constraint.')
traj.Insert(traj.GetNumWaypoints(), q)
# Check if we've exceeded the maximum distance by projecting our
# displacement along the direction.
hand_pose = manip.GetEndEffectorTransform()
displacement = hand_pose[0:3, 3] - initial_pose[0:3, 3]
current_distance = numpy.dot(displacement, direction)
except PlanningError as e:
# Throw an error if we haven't reached the minimum distance.
if current_distance < distance:
raise
# Otherwise we'll gracefully terminate.
else:
logger.warning('Terminated early at distance %f < %f: %s',
current_distance, max_distance, e.message)
SetTrajectoryTags(output_traj, {Tags.CONSTRAINED: True}, append=True)
return traj
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class RulesOperations(object):
"""RulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def list_by_subscriptions(
self, resource_group_name, namespace_name, topic_name, subscription_name, custom_headers=None, raw=False, **operation_config):
"""List all the rules within given topic-subscription.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Rule
:rtype:
~azure.mgmt.servicebus.models.RulePaged[~azure.mgmt.servicebus.models.Rule]
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}/rules'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.RulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, namespace_name, topic_name, subscription_name, rule_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a new rule and updates an existing rule.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param rule_name: The rule name.
:type rule_name: str
:param parameters: Parameters supplied to create a rule.
:type parameters: ~azure.mgmt.servicebus.models.Rule
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Rule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.Rule or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}/rules/{ruleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'ruleName': self._serialize.url("rule_name", rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Rule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Rule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, namespace_name, topic_name, subscription_name, rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing rule.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param rule_name: The rule name.
:type rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}/rules/{ruleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'ruleName': self._serialize.url("rule_name", rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, namespace_name, topic_name, subscription_name, rule_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the description for the specified rule.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param subscription_name: The subscription name.
:type subscription_name: str
:param rule_name: The rule name.
:type rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Rule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servicebus.models.Rule or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}/rules/{ruleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1),
'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', max_length=50, min_length=1),
'ruleName': self._serialize.url("rule_name", rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Rule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
import dateutil.parser
import hashlib
import json
import logging
import os
import re
import string
import sys
import time
import bs4
import elasticsearch
import elasticsearch.helpers
import nltk
import pyzmail
from nltk.stem.snowball import SnowballStemmer
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
class Importer(object):
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def __init__(self, arg_document_count_limit=sys.maxint, arg_process_text_part=True, arg_process_html_part=False,
arg_process_both_empty=False):
self.document_count_limit = arg_document_count_limit
self.process_text_part = arg_process_text_part
self.process_html_part = arg_process_html_part
self.process_both_empty = arg_process_both_empty
self.stemmer = SnowballStemmer("english")
pass
# http://brandonrose.org/clustering (with some modifications)
@staticmethod
def strip_proppers(arg_text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [word for sent in nltk.sent_tokenize(arg_text) for word in nltk.word_tokenize(sent)
if word.islower()]
# todo get the startswiths and endswiths right here
return "".join(
[" " + i if not i.startswith("'") and not i.startswith("/") and not i.endswith(
"=") and i not in string.punctuation else i for i in tokens]).strip()
# http://brandonrose.org/clustering
def tokenize_and_stem(self, arg_text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [current_word for sent in nltk.sent_tokenize(arg_text) for current_word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [self.stemmer.stem(token) for token in filtered_tokens]
return stems
def process_folder(self, arg_folder, arg_bulk_upload, arg_document_type, arg_buffer_limit, arg_server,
arg_index_name, arg_kmeans_dictionary):
document_count = 0
document_buffer = []
indexed_count = 0
error_count = 0
for root, subdirectories, files in os.walk(arg_folder):
for current in files:
if document_count < self.document_count_limit:
current_full_file_name = os.path.join(root, current)
# logging.debug("%d %s", document_count, current_full_file_name)
current_json, document_id = self.get_json(current_full_file_name,
arg_process_text_part=self.process_text_part,
arg_process_html_part=self.process_html_part,
arg_process_both_empty=self.process_both_empty,
arg_kmeans_cluster_dictionary=arg_kmeans_dictionary)
# logging.debug(current_json)
document_count += 1
try:
if arg_bulk_upload:
wrapped = {'_type': arg_document_type, '_source': current_json}
document_buffer.append(wrapped)
if len(document_buffer) == arg_buffer_limit:
try:
index_result = elasticsearch.helpers.bulk(arg_server, document_buffer,
index=arg_index_name,
request_timeout=1000)
logging.debug(index_result)
indexed_count += len(document_buffer)
document_buffer = []
except elasticsearch.exceptions.ConnectionTimeout as connectionTimeout:
logging.warn(connectionTimeout)
document_buffer = []
else:
index_result = arg_server.index(index=arg_index_name, doc_type=arg_document_type,
body=current_json, id=document_id)
indexed_count += 1
logging.debug("id: %s, result: %s", document_id, index_result)
except elasticsearch.exceptions.SerializationError as serializationError:
logging.warn(serializationError)
error_count += 1
# need to flush the pending buffer
if arg_bulk_upload and len(document_buffer) > 0:
index_result = elasticsearch.helpers.bulk(arg_server, document_buffer, index=arg_index_name)
logging.debug(index_result)
target_encoding = 'utf-8'
# https://groups.google.com/forum/#!topic/microsoft.public.outlookexpress.general/oig7-xNFISg
clean_address_tokens = ['=?us-ascii?Q?', '=0D=0A_=28', '=?utf-8?Q?', '=29?=', '=0D=0A']
def clean_address(self, argvalue):
result = str(argvalue)
for token in self.clean_address_tokens:
if token in result:
result = result.replace(token, ' ')
return result.lower().strip()
@staticmethod
def get_references(current_file):
result = {}
with open(current_file, 'rb') as fp:
message = pyzmail.message_from_file(fp)
if 'Message-Id' in message.keys():
result['message-id'] = message['Message-Id']
elif 'Message-ID' in message.keys():
result['message-id'] = message['Message-ID']
else:
logging.warn('no message id in file %s', current_file)
if 'References' in message.keys():
references = message['References'].split(' ')
result['references'] = references
return result
def get_json(self, current_file, arg_process_text_part, arg_process_html_part, arg_process_both_empty,
arg_kmeans_cluster_dictionary):
result = {'original_file': current_file}
with open(current_file, 'rb') as fp:
message = pyzmail.message_from_file(fp)
# todo clean up internal whitespace
senders = message.get_addresses('from')
result['sender'] = [item[i] for i in [0, 1] for item in senders]
result['short_sender'] = [item.split('@')[0] for item in result['sender']]
clean_senders = [self.clean_address(item[1]) for item in senders]
result['clean_sender'] = clean_senders
# todo clean up internal whitespace
recipients = message.get_addresses('to') + message.get_addresses('cc') + message.get_addresses('bcc')
result['recipient'] = recipients
result['party'] = list(
['{name} = {address}'.format(name=item[0], address=item[1]) for item in senders + recipients])
result['clean_recipient'] = [self.clean_address(item[1]) for item in recipients]
result['recipient'] = [item[i] for i in [0, 1] for item in recipients]
result['short_recipient'] = [item.split('@')[0] for item in result['clean_recipient']]
subject = message.get('subject')
result['subject'] = '' if subject is None else subject.decode('iso-8859-1').encode(self.target_encoding)
raw_date = message.get('date')
if raw_date is not None:
try:
result['date'] = dateutil.parser.parse(raw_date)
except ValueError as valueError:
# todo find a way to deal with these special cases?
# we occasionally get a string the parser won't parse e.g.
# Wed, 17 Dec 2008 12:35:42 -0700 (GMT-07:00)
# and we need to drop off the trailing time zone and try to parse again
logging.warn('%s %s %s', raw_date, valueError, current_file)
pieces = str(raw_date).split('(')
result['date'] = dateutil.parser.parse(pieces[0])
else:
# todo add special code to handle these?
logging.warn('no date: %s ', message)
text_part = message.text_part
if text_part is not None and arg_process_text_part:
charset = text_part.charset
payload = text_part.get_payload()
if charset is not None:
try:
body = payload.decode(charset, 'ignore').encode(self.target_encoding)
except LookupError as lookupError:
if text_part.charset == 'iso-8859-8-i':
body = payload.decode('iso-8859-8', 'ignore').encode(self.target_encoding)
else:
body = payload.decode('utf-8', 'ignore').encode(self.target_encoding)
logging.warn('lookup error %s', lookupError)
else:
body = payload.decode('utf-8', 'ignore').encode(self.target_encoding)
result['body'] = body
short_file_name = os.path.basename(current_file)
result['kmeans_cluster'] = arg_kmeans_cluster_dictionary[short_file_name]
elif message.html_part is not None and arg_process_html_part:
payload = message.html_part.part.get_payload()
payload_text = bs4.BeautifulSoup(payload, 'lxml').get_text().strip()
charset = message.html_part.charset if message.html_part.charset is not None else 'utf-8'
result['body'] = payload_text.decode(charset, 'ignore').encode(self.target_encoding)
elif arg_process_both_empty:
logging.warn('both text_part and html_part are None: %s', current_file)
else:
logging.warn('not processing %s', current_file)
if 'body' in result.keys():
if len(result['body']) == 0:
result['empty_body'] = True
if 'Message-Id' in message.keys():
result['message-id'] = message['Message-Id']
if 'In-Reply-To' in message.keys():
result['in-reply-to'] = message['In-Reply-To']
if 'References' in message.keys():
result['references'] = message['References'].split(' ')
md5 = hashlib.md5()
with open(current_file, 'rb') as fp:
md5.update(fp.read())
return result, md5.hexdigest()
def run():
start_time = time.time()
with open('real-settings.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
document_count_limit = data['document_count_limit']
if document_count_limit == -1:
document_count_limit = sys.maxint
process_text_part = data['process_text_part']
process_html_part = data['process_html_part']
process_both_empty = data['process_both_empty']
elasticsearch_host = data['elasticsearch_host']
elasticsearch_port = data['elasticsearch_port']
elasticsearch_index_name = data['elasticsearch_index_name']
elasticsearch_document_type = data['elasticsearch_document_type']
elasticsearch_batch_size = data['elasticsearch_batch_size']
kmeans_cluster_file_name = data['kmeans_cluster_file_name']
# get the connection to elasticsearch
elasticsearch_server = elasticsearch.Elasticsearch([{'host': elasticsearch_host, 'port': elasticsearch_port}])
if elasticsearch_server.indices.exists(elasticsearch_index_name):
elasticsearch_server.indices.delete(elasticsearch_index_name)
elasticsearch_server.indices.create(elasticsearch_index_name)
kmeans_cluster_dictionary = json.load(open(kmeans_cluster_file_name, 'r'))
mapping = {
elasticsearch_document_type: {
'properties': {
'subject': {
'type': 'string',
'fields': {
'raw': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'sender': {
'type': 'string',
'fields': {
'raw': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'sender_clean': {
'type': 'string',
'fields': {
'raw': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'party': {
'type': 'string',
'fields': {
'raw': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
'recipient_clean': {
'type': 'string',
'fields': {
'raw': {
'type': 'string',
'index': 'not_analyzed'
}
}
}
}
}
}
elasticsearch_server.indices.put_mapping(index=elasticsearch_index_name, doc_type=elasticsearch_document_type,
body=mapping)
instance = Importer(arg_document_count_limit=document_count_limit, arg_process_text_part=process_text_part,
arg_process_html_part=process_html_part, arg_process_both_empty=process_both_empty)
instance.process_folder(input_folder, True, elasticsearch_document_type, elasticsearch_batch_size,
elasticsearch_server, elasticsearch_index_name, kmeans_cluster_dictionary)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info(
"Elapsed time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
if __name__ == '__main__':
run()
| |
"""Test the TcEx App Feature Advance Request Module."""
# standard library
import json
class TestAdvancedRequest:
"""Test the TcEx App Feature Advance Request Module."""
# properties
tc_playbook_out_variables = None
@staticmethod
def _load_data(tcex: object, context: str):
"""Load data from Redis into a dict.
Args:
tcex (object): The TcEx object.
context (str): The KV store context.
"""
data = {}
for k, v in tcex.redis_client.hgetall(context).items():
if k.decode() == '#App:0001:pytest.request.headers!String':
data[k.decode()] = json.loads(json.loads(v.decode()))
else:
data[k.decode()] = json.loads(v.decode())
return data
def setup_class(self):
"""Configure setup before all tests."""
self.tc_playbook_out_variables = [
'#App:0001:pytest.request.headers!String',
'#App:0001:pytest.request.ok!String',
'#App:0001:pytest.request.reason!String',
'#App:0001:pytest.request.status_code!String',
'#App:0001:pytest.request.url!String',
'#App:0001:pytest.request.content!String',
'#App:0001:pytest.request.content.binary!Binary',
]
@staticmethod
def test_advanced_request_get_standard(playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_adv_req_exclude_null_params': False,
'tc_adv_req_fail_on_error': False,
'tc_adv_req_urlencode_body': False,
'tc_adv_req_body': None,
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'GET',
'tc_adv_req_params': [{'key': 'one', 'value': '1'}, {'key': 'two', 'value': ''}],
'tc_adv_req_path': '/anything',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
r = ar.request()
data = r.json()
assert r.request.url == data.get('url')
assert r.status_code == 200
# assert headers
assert data.get('headers', {}).get('Pytest') == 'pytest'
assert data.get('headers', {}).get('User-Agent') == 'TcEx App: Pytest - 1.0.0'
# assert params
assert data.get('args', {}).get('one') == '1'
assert data.get('args', {}).get('two') == ''
def test_advanced_request_get_500(self, playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_playbook_out_variables': self.tc_playbook_out_variables,
'tc_adv_req_exclude_null_params': False,
'tc_adv_req_fail_on_error': True,
'tc_adv_req_urlencode_body': False,
'tc_adv_req_body': None,
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'GET',
'tc_adv_req_params': [{'key': 'one', 'value': '1'}, {'key': 'two', 'value': ''}],
'tc_adv_req_path': '/status/500',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
try:
ar.request()
except RuntimeError:
pass
try:
# the write_output method is called in exit method
tcex.playbook.exit(1)
except SystemExit:
pass
# load output data from KV store to validate
data = self._load_data(tcex, tcex.args.tc_playbook_db_context)
assert data.get('#App:0001:pytest.request.reason!String') == 'INTERNAL SERVER ERROR'
assert data.get('#App:0001:pytest.request.content!String') == ''
assert data.get('#App:0001:pytest.request.headers!String').get('Server') == 'nginx'
assert data.get('#App:0001:pytest.request.status_code!String') == '500'
assert data.get('#App:0001:pytest.request.content.binary!Binary') == ''
assert data.get('#App:0001:pytest.request.ok!String') == 'false'
assert (
data.get('#App:0001:pytest.request.url!String')
== 'https://httpbin.tci.ninja/status/500?one=1&two='
)
@staticmethod
def test_advanced_request_get_exclude_null_params(playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_adv_req_exclude_null_params': True,
'tc_adv_req_fail_on_error': False,
'tc_adv_req_urlencode_body': False,
'tc_adv_req_body': None,
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'GET',
'tc_adv_req_params': [{'key': 'one', 'value': '1'}, {'key': 'two', 'value': None}],
'tc_adv_req_path': '/anything',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
r = ar.request()
data = r.json()
assert r.request.url == data.get('url')
assert r.status_code == 200
# assert headers
assert data.get('headers', {}).get('Pytest') == 'pytest'
assert data.get('headers', {}).get('User-Agent') == 'TcEx App: Pytest - 1.0.0'
# assert params
assert data.get('args', {}).get('one') == '1'
assert data.get('args', {}).get('two') is None
@staticmethod
def test_advanced_request_post_str(playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_adv_req_exclude_null_params': False,
'tc_adv_req_fail_on_error': False,
'tc_adv_req_urlencode_body': False,
'tc_adv_req_body': 'pytest',
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'POST',
'tc_adv_req_params': {'key': 'one', 'value': '1'},
'tc_adv_req_path': '/anything',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
r = ar.request()
data = r.json()
assert r.request.url == data.get('url')
assert r.status_code == 200
# assert data
assert data.get('data') == tcex.args.tc_adv_req_body
# assert headers
assert data.get('headers', {}).get('Pytest') == 'pytest'
assert data.get('headers', {}).get('User-Agent') == 'TcEx App: Pytest - 1.0.0'
# assert params
assert data.get('args', {}).get('one') == '1'
@staticmethod
def test_advanced_request_post_bytes(playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_adv_req_exclude_null_params': False,
'tc_adv_req_fail_on_error': False,
'tc_adv_req_urlencode_body': False,
'tc_adv_req_body': b'pytest',
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'POST',
'tc_adv_req_params': {'key': 'one', 'value': '1'},
'tc_adv_req_path': '/anything',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
r = ar.request()
data = r.json()
assert r.request.url == data.get('url')
assert r.status_code == 200
# assert data
assert data.get('data') == tcex.args.tc_adv_req_body.decode()
# assert headers
assert data.get('headers', {}).get('Pytest') == 'pytest'
assert data.get('headers', {}).get('User-Agent') == 'TcEx App: Pytest - 1.0.0'
# assert params
assert data.get('args', {}).get('one') == '1'
@staticmethod
def test_advanced_request_post_urlencode(playbook_app: callable):
"""Test advanced request feature
Args:
playbook_app (callable, fixture): The playbook_app fixture.
"""
tcex = playbook_app(
config_data={
'tc_adv_req_exclude_null_params': False,
'tc_adv_req_fail_on_error': False,
'tc_adv_req_urlencode_body': True,
'tc_adv_req_body': json.dumps({'one': '1', 'two': '2'}),
'tc_adv_req_headers': {'key': 'pytest', 'value': 'pytest'},
'tc_adv_req_http_method': 'POST',
'tc_adv_req_params': {'key': 'one', 'value': '1'},
'tc_adv_req_path': '/anything',
}
).tcex
se = tcex.session_external
se.base_url = 'https://httpbin.tci.ninja'
se.verify = False
ar = tcex.advanced_request(session=se, timeout=60)
r = ar.request()
data = r.json()
assert r.request.url == data.get('url')
assert r.status_code == 200
# assert form
assert data.get('form', {}).get('one') == '1'
assert data.get('form', {}).get('two') == '2'
# assert headers
assert data.get('headers', {}).get('Pytest') == 'pytest'
assert data.get('headers', {}).get('User-Agent') == 'TcEx App: Pytest - 1.0.0'
# assert params
assert data.get('args', {}).get('one') == '1'
| |
from datetime import datetime, timedelta
from functools import wraps
from mock import Mock
from pyparsing import ParseException
from celery import task
from celery.app import app_or_default
from celery.task import task as task_dec
from celery.exceptions import RetryTaskError
from celery.execute import send_task
from celery.result import EagerResult
from celery.schedules import crontab, crontab_parser
from celery.utils import gen_unique_id
from celery.utils.timeutils import parse_iso8601
from celery.tests.utils import with_eager_tasks, unittest, StringIO
def return_True(*args, **kwargs):
# Task run functions can't be closures/lambdas, as they're pickled.
return True
return_True_task = task_dec()(return_True)
def raise_exception(self, **kwargs):
raise Exception("%s error" % self.__class__)
class MockApplyTask(task.Task):
def run(self, x, y):
return x * y
@classmethod
def apply_async(self, *args, **kwargs):
pass
class IncrementCounterTask(task.Task):
name = "c.unittest.increment_counter_task"
count = 0
def run(self, increment_by=1, **kwargs):
increment_by = increment_by or 1
self.__class__.count += increment_by
return self.__class__.count
class RaisingTask(task.Task):
name = "c.unittest.raising_task"
def run(self, **kwargs):
raise KeyError("foo")
class RetryTask(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.__class__.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
return self.retry(countdown=0, max_retries=max_retries)
class RetryTaskNoArgs(task.Task):
max_retries = 3
iterations = 0
def run(self, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return 42
else:
return self.retry(kwargs=kwargs, countdown=0)
class RetryTaskMockApply(task.Task):
max_retries = 3
iterations = 0
applied = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1
else:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs, countdown=0)
@classmethod
def apply_async(self, *args, **kwargs):
self.applied = 1
class MyCustomException(Exception):
"""Random custom exception."""
class RetryTaskCustomExc(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1 + kwarg
else:
try:
raise MyCustomException("Elaine Marie Benes")
except MyCustomException, exc:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs,
countdown=0, exc=exc)
class TestTaskRetries(unittest.TestCase):
def test_retry(self):
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF])
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 4)
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"max_retries": 10})
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 11)
def test_retry_no_args(self):
RetryTaskNoArgs.max_retries = 3
RetryTaskNoArgs.iterations = 0
result = RetryTaskNoArgs.apply()
self.assertEqual(result.get(), 42)
self.assertEqual(RetryTaskNoArgs.iterations, 4)
def test_retry_kwargs_can_be_empty(self):
self.assertRaises(RetryTaskError, RetryTaskMockApply.retry,
args=[4, 4], kwargs=None)
def test_retry_not_eager(self):
exc = Exception("baz")
try:
RetryTaskMockApply.retry(args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=False)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
try:
self.assertRaises(RetryTaskError, RetryTaskMockApply.retry,
args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=True)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
def test_retry_with_kwargs(self):
RetryTaskCustomExc.max_retries = 3
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
self.assertEqual(result.get(), 0xFF + 0xF)
self.assertEqual(RetryTaskCustomExc.iterations, 4)
def test_retry_with_custom_exception(self):
RetryTaskCustomExc.max_retries = 2
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
self.assertRaises(MyCustomException,
result.get)
self.assertEqual(RetryTaskCustomExc.iterations, 3)
def test_max_retries_exceeded(self):
RetryTask.max_retries = 2
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
self.assertRaises(RetryTask.MaxRetriesExceededError,
result.get)
self.assertEqual(RetryTask.iterations, 3)
RetryTask.max_retries = 1
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
self.assertRaises(RetryTask.MaxRetriesExceededError,
result.get)
self.assertEqual(RetryTask.iterations, 2)
class TestCeleryTasks(unittest.TestCase):
def test_unpickle_task(self):
import pickle
@task_dec
def xxx():
pass
self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx)
def createTaskCls(self, cls_name, task_name=None):
attrs = {"__module__": self.__module__}
if task_name:
attrs["name"] = task_name
cls = type(cls_name, (task.Task, ), attrs)
cls.run = return_True
return cls
def test_AsyncResult(self):
task_id = gen_unique_id()
result = RetryTask.AsyncResult(task_id)
self.assertEqual(result.backend, RetryTask.backend)
self.assertEqual(result.task_id, task_id)
@with_eager_tasks
def test_ping(self):
self.assertEqual(task.ping(), 'pong')
def assertNextTaskDataEqual(self, consumer, presult, task_name,
test_eta=False, test_expires=False, **kwargs):
next_task = consumer.fetch()
task_data = next_task.decode()
self.assertEqual(task_data["id"], presult.task_id)
self.assertEqual(task_data["task"], task_name)
task_kwargs = task_data.get("kwargs", {})
if test_eta:
self.assertIsInstance(task_data.get("eta"), basestring)
to_datetime = parse_iso8601(task_data.get("eta"))
self.assertIsInstance(to_datetime, datetime)
if test_expires:
self.assertIsInstance(task_data.get("expires"), basestring)
to_datetime = parse_iso8601(task_data.get("expires"))
self.assertIsInstance(to_datetime, datetime)
for arg_name, arg_value in kwargs.items():
self.assertEqual(task_kwargs.get(arg_name), arg_value)
def test_incomplete_task_cls(self):
class IncompleteTask(task.Task):
name = "c.unittest.t.itask"
self.assertRaises(NotImplementedError, IncompleteTask().run)
def test_task_kwargs_must_be_dictionary(self):
self.assertRaises(ValueError, IncrementCounterTask.apply_async,
[], "str")
def test_task_args_must_be_list(self):
self.assertRaises(ValueError, IncrementCounterTask.apply_async,
"str", {})
def test_regular_task(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
self.assertIsInstance(T1(), T1)
self.assertTrue(T1().run())
self.assertTrue(callable(T1()),
"Task class is callable()")
self.assertTrue(T1()(),
"Task class runs run() when called")
# task name generated out of class module + name.
T2 = self.createTaskCls("T2")
self.assertTrue(T2().name.endswith("test_task.T2"))
t1 = T1()
consumer = t1.get_consumer()
self.assertRaises(NotImplementedError, consumer.receive, "foo", "foo")
consumer.discard_all()
self.assertIsNone(consumer.fetch())
# Without arguments.
presult = t1.delay()
self.assertNextTaskDataEqual(consumer, presult, t1.name)
# With arguments.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza")
# send_task
sresult = send_task(t1.name, kwargs=dict(name="Elaine M. Benes"))
self.assertNextTaskDataEqual(consumer, sresult, t1.name,
name="Elaine M. Benes")
# With eta.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
eta=datetime.now() + timedelta(days=1),
expires=datetime.now() + timedelta(days=2))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# With countdown.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
countdown=10, expires=12)
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# Discarding all tasks.
consumer.discard_all()
t1.apply_async()
self.assertEqual(consumer.discard_all(), 1)
self.assertIsNone(consumer.fetch())
self.assertFalse(presult.successful())
t1.backend.mark_as_done(presult.task_id, result=None)
self.assertTrue(presult.successful())
publisher = t1.get_publisher()
self.assertTrue(publisher.exchange)
def test_context_get(self):
request = self.createTaskCls("T1", "c.unittest.t.c.g").request
request.foo = 32
self.assertEqual(request.get("foo"), 32)
self.assertEqual(request.get("bar", 36), 36)
def test_task_class_repr(self):
task = self.createTaskCls("T1", "c.unittest.t.repr")
self.assertIn("class Task of", repr(task.app.Task))
def test_after_return(self):
task = self.createTaskCls("T1", "c.unittest.t.after_return")()
task.backend = Mock()
task.request.chord = 123
task.after_return("SUCCESS", 1.0, "foobar", (), {}, None)
task.backend.on_chord_part_return.assert_called_with(task)
def test_send_task_sent_event(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
conn = T1.app.broker_connection()
chan = conn.channel()
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = True
dispatcher = [None]
class Pub(object):
channel = chan
def delay_task(self, *args, **kwargs):
dispatcher[0] = kwargs.get("event_dispatcher")
try:
T1.apply_async(publisher=Pub())
finally:
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = False
chan.close()
conn.close()
self.assertTrue(dispatcher[0])
def test_get_publisher(self):
connection = app_or_default().broker_connection()
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange="foo")
self.assertEqual(p.exchange.name, "foo")
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange_type="fanout")
self.assertEqual(p.exchange.type, "fanout")
def test_update_state(self):
@task_dec
def yyy():
pass
tid = gen_unique_id()
yyy.update_state(tid, "FROBULATING", {"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBULATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
yyy.request.id = tid
yyy.update_state(state="FROBUZATING", meta={"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBUZATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
def test_repr(self):
@task_dec
def task_test_repr():
pass
self.assertIn("task_test_repr", repr(task_test_repr))
def test_has___name__(self):
@task_dec
def yyy2():
pass
self.assertTrue(yyy2.__name__)
def test_get_logger(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
t1 = T1()
logfh = StringIO()
logger = t1.get_logger(logfile=logfh, loglevel=0)
self.assertTrue(logger)
T1.request.loglevel = 3
logger = t1.get_logger(logfile=logfh, loglevel=None)
self.assertTrue(logger)
class TestTaskSet(unittest.TestCase):
@with_eager_tasks
def test_function_taskset(self):
subtasks = [return_True_task.subtask([i]) for i in range(1, 6)]
ts = task.TaskSet(subtasks)
res = ts.apply_async()
self.assertListEqual(res.join(), [True, True, True, True, True])
def test_counter_taskset(self):
IncrementCounterTask.count = 0
ts = task.TaskSet(tasks=[
IncrementCounterTask.subtask((), {}),
IncrementCounterTask.subtask((), {"increment_by": 2}),
IncrementCounterTask.subtask((), {"increment_by": 3}),
IncrementCounterTask.subtask((), {"increment_by": 4}),
IncrementCounterTask.subtask((), {"increment_by": 5}),
IncrementCounterTask.subtask((), {"increment_by": 6}),
IncrementCounterTask.subtask((), {"increment_by": 7}),
IncrementCounterTask.subtask((), {"increment_by": 8}),
IncrementCounterTask.subtask((), {"increment_by": 9}),
])
self.assertEqual(ts.total, 9)
consumer = IncrementCounterTask().get_consumer()
consumer.purge()
consumer.close()
taskset_res = ts.apply_async()
subtasks = taskset_res.subtasks
taskset_id = taskset_res.taskset_id
consumer = IncrementCounterTask().get_consumer()
for subtask in subtasks:
m = consumer.fetch().payload
self.assertDictContainsSubset({"taskset": taskset_id,
"task": IncrementCounterTask.name,
"id": subtask.task_id}, m)
IncrementCounterTask().run(
increment_by=m.get("kwargs", {}).get("increment_by"))
self.assertEqual(IncrementCounterTask.count, sum(xrange(1, 10)))
def test_named_taskset(self):
prefix = "test_named_taskset-"
ts = task.TaskSet([return_True_task.subtask([1])])
res = ts.apply(taskset_id=prefix + gen_unique_id())
self.assertTrue(res.taskset_id.startswith(prefix))
class TestTaskApply(unittest.TestCase):
def test_apply_throw(self):
self.assertRaises(KeyError, RaisingTask.apply, throw=True)
def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self):
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
try:
self.assertRaises(KeyError, RaisingTask.apply)
finally:
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
def test_apply(self):
IncrementCounterTask.count = 0
e = IncrementCounterTask.apply()
self.assertIsInstance(e, EagerResult)
self.assertEqual(e.get(), 1)
e = IncrementCounterTask.apply(args=[1])
self.assertEqual(e.get(), 2)
e = IncrementCounterTask.apply(kwargs={"increment_by": 4})
self.assertEqual(e.get(), 6)
self.assertTrue(e.successful())
self.assertTrue(e.ready())
self.assertTrue(repr(e).startswith("<EagerResult:"))
f = RaisingTask.apply()
self.assertTrue(f.ready())
self.assertFalse(f.successful())
self.assertTrue(f.traceback)
self.assertRaises(KeyError, f.get)
class MyPeriodic(task.PeriodicTask):
run_every = timedelta(hours=1)
class TestPeriodicTask(unittest.TestCase):
def test_must_have_run_every(self):
self.assertRaises(NotImplementedError, type, "Foo",
(task.PeriodicTask, ), {"__module__": __name__})
def test_remaining_estimate(self):
self.assertIsInstance(
MyPeriodic().remaining_estimate(datetime.now()),
timedelta)
def test_is_due_not_due(self):
due, remaining = MyPeriodic().is_due(datetime.now())
self.assertFalse(due)
# This assertion may fail if executed in the
# first minute of an hour, thus 59 instead of 60
self.assertGreater(remaining, 59)
def test_is_due(self):
p = MyPeriodic()
due, remaining = p.is_due(datetime.now() - p.run_every.run_every)
self.assertTrue(due)
self.assertEqual(remaining,
p.timedelta_seconds(p.run_every.run_every))
def test_schedule_repr(self):
p = MyPeriodic()
self.assertTrue(repr(p.run_every))
class EveryMinutePeriodic(task.PeriodicTask):
run_every = crontab()
class QuarterlyPeriodic(task.PeriodicTask):
run_every = crontab(minute="*/15")
class HourlyPeriodic(task.PeriodicTask):
run_every = crontab(minute=30)
class DailyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30)
class WeeklyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30, day_of_week="thursday")
def patch_crontab_nowfun(cls, retval):
def create_patcher(fun):
@wraps(fun)
def __inner(*args, **kwargs):
prev_nowfun = cls.run_every.nowfun
cls.run_every.nowfun = lambda: retval
try:
return fun(*args, **kwargs)
finally:
cls.run_every.nowfun = prev_nowfun
return __inner
return create_patcher
class test_crontab_parser(unittest.TestCase):
def test_parse_star(self):
self.assertEquals(crontab_parser(24).parse('*'), set(range(24)))
self.assertEquals(crontab_parser(60).parse('*'), set(range(60)))
self.assertEquals(crontab_parser(7).parse('*'), set(range(7)))
def test_parse_range(self):
self.assertEquals(crontab_parser(60).parse('1-10'),
set(range(1, 10 + 1)))
self.assertEquals(crontab_parser(24).parse('0-20'),
set(range(0, 20 + 1)))
self.assertEquals(crontab_parser().parse('2-10'),
set(range(2, 10 + 1)))
def test_parse_groups(self):
self.assertEquals(crontab_parser().parse('1,2,3,4'),
set([1, 2, 3, 4]))
self.assertEquals(crontab_parser().parse('0,15,30,45'),
set([0, 15, 30, 45]))
def test_parse_steps(self):
self.assertEquals(crontab_parser(8).parse('*/2'),
set([0, 2, 4, 6]))
self.assertEquals(crontab_parser().parse('*/2'),
set(i * 2 for i in xrange(30)))
self.assertEquals(crontab_parser().parse('*/3'),
set(i * 3 for i in xrange(20)))
def test_parse_composite(self):
self.assertEquals(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
self.assertEquals(crontab_parser().parse('2-9/5'), set([5]))
self.assertEquals(crontab_parser().parse('2-10/5'), set([5, 10]))
self.assertEquals(crontab_parser().parse('2-11/5,3'), set([3, 5, 10]))
self.assertEquals(crontab_parser().parse('2-4/3,*/5,0-21/4'),
set([0, 3, 4, 5, 8, 10, 12, 15, 16,
20, 25, 30, 35, 40, 45, 50, 55]))
def test_parse_errors_on_empty_string(self):
self.assertRaises(ParseException, crontab_parser(60).parse, '')
def test_parse_errors_on_empty_group(self):
self.assertRaises(ParseException, crontab_parser(60).parse, '1,,2')
def test_parse_errors_on_empty_steps(self):
self.assertRaises(ParseException, crontab_parser(60).parse, '*/')
def test_parse_errors_on_negative_number(self):
self.assertRaises(ParseException, crontab_parser(60).parse, '-20')
def test_expand_cronspec_eats_iterables(self):
self.assertEqual(crontab._expand_cronspec(iter([1, 2, 3]), 100),
set([1, 2, 3]))
def test_expand_cronspec_invalid_type(self):
self.assertRaises(TypeError, crontab._expand_cronspec, object(), 100)
def test_repr(self):
self.assertIn("*", repr(crontab("*")))
def test_eq(self):
self.assertEqual(crontab(day_of_week="1, 2"),
crontab(day_of_week="1-2"))
self.assertEqual(crontab(minute="1", hour="2", day_of_week="5"),
crontab(minute="1", hour="2", day_of_week="5"))
self.assertNotEqual(crontab(minute="1"), crontab(minute="2"))
self.assertFalse(object() == crontab(minute="1"))
self.assertFalse(crontab(minute="1") == object())
class test_crontab_remaining_estimate(unittest.TestCase):
def next_ocurrance(self, crontab, now):
crontab.nowfun = lambda: now
return now + crontab.remaining_estimate(now)
def test_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 11, 14, 31))
def test_not_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 59, 15))
self.assertEquals(next, datetime(2010, 9, 11, 15, 0))
def test_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 42]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 11, 14, 42))
def test_not_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 10, 15]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 11, 15, 5))
def test_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12, 17]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 11, 17, 5))
def test_not_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 12, 12, 5))
def test_weekday(self):
next = self.next_ocurrance(crontab(minute=30,
hour=14,
day_of_week="sat"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 18, 14, 30))
def test_not_weekday(self):
next = self.next_ocurrance(crontab(minute=[5, 42],
day_of_week="mon-fri"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEquals(next, datetime(2010, 9, 13, 0, 5))
class test_crontab_is_due(unittest.TestCase):
def setUp(self):
self.now = datetime.now()
self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond
def test_default_crontab_spec(self):
c = crontab()
self.assertEquals(c.minute, set(range(60)))
self.assertEquals(c.hour, set(range(24)))
self.assertEquals(c.day_of_week, set(range(7)))
def test_simple_crontab_spec(self):
c = crontab(minute=30)
self.assertEquals(c.minute, set([30]))
self.assertEquals(c.hour, set(range(24)))
self.assertEquals(c.day_of_week, set(range(7)))
def test_crontab_spec_minute_formats(self):
c = crontab(minute=30)
self.assertEquals(c.minute, set([30]))
c = crontab(minute='30')
self.assertEquals(c.minute, set([30]))
c = crontab(minute=(30, 40, 50))
self.assertEquals(c.minute, set([30, 40, 50]))
c = crontab(minute=set([30, 40, 50]))
self.assertEquals(c.minute, set([30, 40, 50]))
def test_crontab_spec_invalid_minute(self):
self.assertRaises(ValueError, crontab, minute=60)
self.assertRaises(ValueError, crontab, minute='0-100')
def test_crontab_spec_hour_formats(self):
c = crontab(hour=6)
self.assertEquals(c.hour, set([6]))
c = crontab(hour='5')
self.assertEquals(c.hour, set([5]))
c = crontab(hour=(4, 8, 12))
self.assertEquals(c.hour, set([4, 8, 12]))
def test_crontab_spec_invalid_hour(self):
self.assertRaises(ValueError, crontab, hour=24)
self.assertRaises(ValueError, crontab, hour='0-30')
def test_crontab_spec_dow_formats(self):
c = crontab(day_of_week=5)
self.assertEquals(c.day_of_week, set([5]))
c = crontab(day_of_week='5')
self.assertEquals(c.day_of_week, set([5]))
c = crontab(day_of_week='fri')
self.assertEquals(c.day_of_week, set([5]))
c = crontab(day_of_week='tuesday,sunday,fri')
self.assertEquals(c.day_of_week, set([0, 2, 5]))
c = crontab(day_of_week='mon-fri')
self.assertEquals(c.day_of_week, set([1, 2, 3, 4, 5]))
c = crontab(day_of_week='*/2')
self.assertEquals(c.day_of_week, set([0, 2, 4, 6]))
def test_crontab_spec_invalid_dow(self):
self.assertRaises(ValueError, crontab, day_of_week='fooday-barday')
self.assertRaises(ValueError, crontab, day_of_week='1,4,foo')
self.assertRaises(ValueError, crontab, day_of_week='7')
self.assertRaises(ValueError, crontab, day_of_week='12')
def test_every_minute_execution_is_due(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.assertAlmostEquals(remaining, self.next_minute, 1)
def test_every_minute_execution_is_not_due(self):
last_ran = self.now - timedelta(seconds=self.now.second)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertFalse(due)
self.assertAlmostEquals(remaining, self.next_minute, 1)
# 29th of May 2010 is a saturday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 29, 10, 30))
def test_execution_is_due_on_saturday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.assertAlmostEquals(remaining, self.next_minute, 1)
# 30th of May 2010 is a sunday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 30, 10, 30))
def test_execution_is_due_on_sunday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.assertAlmostEquals(remaining, self.next_minute, 1)
# 31st of May 2010 is a monday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 31, 10, 30))
def test_execution_is_due_on_monday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.assertAlmostEquals(remaining, self.next_minute, 1)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_every_hour_execution_is_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEquals(remaining, 60 * 60)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_every_hour_execution_is_not_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 9, 30))
self.assertFalse(due)
self.assertEquals(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 15))
def test_first_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEquals(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_second_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEquals(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 14))
def test_first_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 0))
self.assertFalse(due)
self.assertEquals(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_second_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 15))
self.assertFalse(due)
self.assertEquals(remaining, 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 7, 30))
def test_daily_execution_is_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 9, 7, 30))
self.assertTrue(due)
self.assertEquals(remaining, 24 * 60 * 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_daily_execution_is_not_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 10, 7, 30))
self.assertFalse(due)
self.assertEquals(remaining, 21 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 6, 7, 30))
def test_weekly_execution_is_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 4, 30, 7, 30))
self.assertTrue(due)
self.assertEquals(remaining, 7 * 24 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 7, 10, 30))
def test_weekly_execution_is_not_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 5, 6, 7, 30))
self.assertFalse(due)
self.assertEquals(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Attila Csipa <web2py@csipa.in.rs>
Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu>
"""
import sys
import os
import threading
import logging
import time
import sched
import re
import datetime
import platform
import portalocker
import fileutils
import cPickle
from settings import global_settings
logger = logging.getLogger("web2py.cron")
_cron_stopping = False
def absolute_path_link(path):
"""
Return an absolute path for the destination of a symlink
"""
if os.path.islink(path):
link = os.readlink(path)
if not os.path.isabs(link):
link = os.path.join(os.path.dirname(path), link)
else:
link = os.path.abspath(path)
return link
def stopcron():
"graceful shutdown of cron"
global _cron_stopping
_cron_stopping = True
class extcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(False)
self.path = applications_parent
crondance(self.path, 'external', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('external cron invocation')
crondance(self.path, 'external', startup=False)
class hardcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(True)
self.path = applications_parent
crondance(self.path, 'hard', startup=True)
def launch(self):
if not _cron_stopping:
logger.debug('hard cron invocation')
crondance(self.path, 'hard', startup = False)
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
class softcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.path = applications_parent
crondance(self.path, 'soft', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('soft cron invocation')
crondance(self.path, 'soft', startup=False)
class Token(object):
def __init__(self,path):
self.path = os.path.join(path, 'cron.master')
if not os.path.exists(self.path):
fileutils.write_file(self.path, '', 'wb')
self.master = None
self.now = time.time()
def acquire(self,startup=False):
"""
returns the time when the lock is acquired or
None if cron already running
lock is implemented by writing a pickle (start, stop) in cron.master
start is time when cron job starts and stop is time when cron completed
stop == 0 if job started but did not yet complete
if a cron job started within less than 60 seconds, acquire returns None
if a cron job started before 60 seconds and did not stop,
a warning is issue "Stale cron.master detected"
"""
if portalocker.LOCK_EX is None:
logger.warning('WEB2PY CRON: Disabled because no file locking')
return None
self.master = open(self.path,'rb+')
try:
ret = None
portalocker.lock(self.master,portalocker.LOCK_EX)
try:
(start, stop) = cPickle.load(self.master)
except:
(start, stop) = (0, 1)
if startup or self.now - start > 59.99:
ret = self.now
if not stop:
# this happens if previous cron job longer than 1 minute
logger.warning('WEB2PY CRON: Stale cron.master detected')
logger.debug('WEB2PY CRON: Acquiring lock')
self.master.seek(0)
cPickle.dump((self.now,0),self.master)
finally:
portalocker.unlock(self.master)
if not ret:
# do this so no need to release
self.master.close()
return ret
def release(self):
"""
this function writes into cron.master the time when cron job
was completed
"""
if not self.master.closed:
portalocker.lock(self.master,portalocker.LOCK_EX)
logger.debug('WEB2PY CRON: Releasing cron lock')
self.master.seek(0)
(start, stop) = cPickle.load(self.master)
if start == self.now: # if this is my lock
self.master.seek(0)
cPickle.dump((self.now,time.time()),self.master)
portalocker.unlock(self.master)
self.master.close()
def rangetolist(s, period='min'):
retval = []
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
m = re.compile(r'(\d+)-(\d+)/(\d+)')
match = m.match(s)
if match:
for i in range(int(match.group(1)), int(match.group(2)) + 1):
if i % int(match.group(3)) == 0:
retval.append(i)
return retval
def parsecronline(line):
task = {}
if line.startswith('@reboot'):
line=line.replace('@reboot', '-1 * * * *')
elif line.startswith('@yearly'):
line=line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line=line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line=line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line=line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line=line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line=line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line=line.replace('@hourly', '0 * * * *')
params = line.strip().split(None, 6)
if len(params) < 7:
return None
daysofweek={'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6}
for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']):
if not s in [None, '*']:
task[id] = []
vals = s.split(',')
for val in vals:
if val != '-1' and '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[id] += rangetolist(val, id)
elif val.isdigit() or val=='-1':
task[id].append(int(val))
elif id=='dow' and val[:3].lower() in daysofweek:
task[id].append(daysofweek(val[:3].lower()))
task['user'] = params[5]
task['cmd'] = params[6]
return task
class cronlauncher(threading.Thread):
def __init__(self, cmd, shell=True):
threading.Thread.__init__(self)
if platform.system() == 'Windows':
shell = False
elif isinstance(cmd,list):
cmd = ' '.join(cmd)
self.cmd = cmd
self.shell = shell
def run(self):
import subprocess
proc = subprocess.Popen(self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell)
(stdoutdata,stderrdata) = proc.communicate()
if proc.returncode != 0:
logger.warning(
'WEB2PY CRON Call returned code %s:\n%s' % \
(proc.returncode, stdoutdata+stderrdata))
else:
logger.debug('WEB2PY CRON Call returned success:\n%s' \
% stdoutdata)
def crondance(applications_parent, ctype='soft', startup=False):
apppath = os.path.join(applications_parent,'applications')
cron_path = os.path.join(applications_parent)
token = Token(cron_path)
cronmaster = token.acquire(startup=startup)
if not cronmaster:
return
now_s = time.localtime()
checks=(('min',now_s.tm_min),
('hr',now_s.tm_hour),
('mon',now_s.tm_mon),
('dom',now_s.tm_mday),
('dow',(now_s.tm_wday+1)%7))
apps = [x for x in os.listdir(apppath)
if os.path.isdir(os.path.join(apppath, x))]
full_apath_links = set()
for app in apps:
if _cron_stopping:
break;
apath = os.path.join(apppath,app)
# if app is a symbolic link to other app, skip it
full_apath_link = absolute_path_link(apath)
if full_apath_link in full_apath_links:
continue
else:
full_apath_links.add(full_apath_link)
cronpath = os.path.join(apath, 'cron')
crontab = os.path.join(cronpath, 'crontab')
if not os.path.exists(crontab):
continue
try:
cronlines = fileutils.readlines_file(crontab, 'rt')
lines = [x.strip() for x in cronlines if x.strip() and not x.strip().startswith('#')]
tasks = [parsecronline(cline) for cline in lines]
except Exception, e:
logger.error('WEB2PY CRON: crontab read error %s' % e)
continue
for task in tasks:
if _cron_stopping:
break;
commands = [sys.executable]
w2p_path = fileutils.abspath('web2py.py', gluon=True)
if os.path.exists(w2p_path):
commands.append(w2p_path)
if global_settings.applications_parent != global_settings.gluon_parent:
commands.extend(('-f', global_settings.applications_parent))
citems = [(k in task and not v in task[k]) for k,v in checks]
task_min= task.get('min',[])
if not task:
continue
elif not startup and task_min == [-1]:
continue
elif task_min != [-1] and reduce(lambda a,b: a or b, citems):
continue
logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' \
% (ctype, app, task.get('cmd'),
os.getcwd(), datetime.datetime.now()))
action, command, models = False, task['cmd'], ''
if command.startswith('**'):
(action,models,command) = (True,'',command[2:])
elif command.startswith('*'):
(action,models,command) = (True,'-M',command[1:])
else:
action=False
if action and command.endswith('.py'):
commands.extend(('-J', # cron job
models, # import models?
'-S', app, # app name
'-a', '"<recycle>"', # password
'-R', command)) # command
shell = True
elif action:
commands.extend(('-J', # cron job
models, # import models?
'-S', app+'/'+command, # app name
'-a', '"<recycle>"')) # password
shell = True
else:
commands = command
shell = False
try:
cronlauncher(commands, shell=shell).start()
except Exception, e:
logger.warning(
'WEB2PY CRON: Execution error for %s: %s' \
% (task.get('cmd'), e))
token.release()
| |
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import os
import re
from optparse import OptionParser
from django.utils.six.moves import input
from jinja2 import Environment, PackageLoader
from reviewboard import get_version_string
env = Environment(
loader=PackageLoader(
'reviewboard', '../contrib/tools/templates/extensions'))
options = None
def get_confirmation(question):
"""
Will pose the question to the user and keep asking them until they
provide an answer that starts with either a 'y' or an 'n', at which
point it will return True if it was a 'y'.
"""
while True:
response = input("%s (y/n): " % question).lower()
if re.match(r'^[yn]', response) is not None:
break
print("Incorrect option '%s'" % response)
return response[0] == 'y'
class NamingConvention(object):
"""
Provides functionality for testing adherence to a naming convention
and a method for converting a string to the convention.
"""
ILLEGAL_CHARACTERS = re.compile(r'[^A-Za-z0-9 ]')
def formatted(self, string):
return False
def convert(self, string):
return string
class CamelCase(NamingConvention):
"""
This represents the camel case naming convention and is typically used for
class names. All tokens are one of the following:
1) Alphabetic and starting with a capital
2) Numeric
3) Alphanumeric and starting with a capital letter
There must be at least one token, and the first character must be a
capital letter.
"""
REGEX = re.compile(r'^[A-Z][a-z0-9]*(([0-9]+)|([A-Z][a-z0-9]*))*$')
def formatted(self, string):
return re.match(self.REGEX, string) is not None
def convert(self, string):
string = re.sub(self.ILLEGAL_CHARACTERS, " ", string)
string = re.sub(r'([0-9a-zA-Z])([A-Z])', r'\1 \2', string)
return ''.join([word.capitalize() for word in string.split()])
class LowerCaseWithUnderscores(NamingConvention):
"""
This represents the case typically used for module/package names (and
perhaps functions). All tokens are one of the following separated by
an underscore:
1) Alphabetic lower case
2) Numeric
3) Alphanumeric lower case and starting with a letter
There must be at least one token, and the first character must be a letter.
"""
REGEX = re.compile(r'^[a-z][a-z0-9]*(_+(([0-9]+)|([a-z][a-z0-9]*)))*_*$')
def formatted(self, string):
return re.match(self.REGEX, string) is not None
def convert(self, string):
string = re.sub(self.ILLEGAL_CHARACTERS, " ", string)
string = re.sub(r'([0-9a-zA-Z])([A-Z])', r'\1 \2', string)
return '_'.join(string.lower().split())
def get_formatted_string(string_type, string, fallback, case):
"""
Given the name of the type of string, the string itself, and the fallback
from which a string will be auto-generated in the given case if the given
string does not conform to the case.
"""
if string is not None:
if case.formatted(string):
return string
else:
string = case.convert(fallback)
question = "Do you wish to use %s as the %s?" % \
(string, string_type)
if not get_confirmation(question):
string = input("Please input a %s: " % string_type)
while not case.formatted(string):
print("'%s' is not a valid %s." % (string, string_type))
string = input("Please input a valid %s: " % string_type)
return string
def parse_options():
"""
Parses the options and stores them in the global options variable.
"""
parser = OptionParser(usage="%prog name [options]",
version="Review Board " + get_version_string())
parser.add_option("--class-name",
dest="class_name", default=None,
help="class name of extension (capitalized no spaces)")
parser.add_option("--package-name",
dest="package_name", default=None,
help="package name of extension (lower case with "
"underscores)")
parser.add_option("--description",
dest="description", default=None,
help="description of extension")
parser.add_option("--author",
dest="author", default=None,
help="author of the extension")
parser.add_option("--is-configurable",
dest="is_configurable", action="store_true",
default=False,
help="whether this extension is configurable")
(globals()["options"], args) = parser.parse_args()
if len(args) != 1:
print("Error: incorrect number of arguments")
parser.print_help()
exit(-1)
options.extension_name = args[0]
autofill_unprovided_options()
def autofill_unprovided_options():
"""
This will autofill all the empty 'necessary' options that can be auto-
generated from the necessary fields.
"""
options.package_name = get_formatted_string("package name",
options.package_name,
options.extension_name,
LowerCaseWithUnderscores())
options.class_name = get_formatted_string("class name",
options.class_name,
options.extension_name,
CamelCase())
if options.description is None:
options.description = "Extension %s" % options.extension_name
class TemplateBuilder(object):
"""
A builder that handles the creation of directories for the registed
template files in addition to creating the output files by filling
in the templates with the values from options.
"""
def __init__(self, package_name, options):
self.package_name = package_name
self.options = vars(options)
self.templates = {}
self.directories = set()
def add_template(self, template, target):
target = re.sub("\{\{PACKAGE\}\}", self.package_name, target)
self.templates[template] = target
directory = os.path.dirname(target)
self.add_directory(os.path.join(self.package_name, directory))
def add_directory(self, dir_name):
self.directories.add(dir_name)
def build(self):
self._build_directories()
self._fill_templates()
def _build_directories(self):
if os.path.exists(self.package_name):
question = "Directory '%s' already exists. " \
"Do you wish to continue?" \
% self.package_name
if not get_confirmation(question):
print("Exiting...")
exit(-1)
for directory in self.directories:
if not os.path.exists(directory):
os.makedirs(directory)
def _fill_templates(self):
for template, target in self.templates.iteritems():
self._write_file(template, target, self.options)
def _write_file(self, template, target, file_opts):
filepath = os.path.join(self.package_name, target)
f = open(filepath, "w")
template = env.get_template(template)
f.writelines(template.render(file_opts))
f.close()
def main():
parse_options()
builder = TemplateBuilder(options.package_name, options)
builder.add_template("setup.py", "setup.py")
builder.add_template("extension/extension.py",
"{{PACKAGE}}/extension.py")
builder.add_template("extension/__init__.py",
"{{PACKAGE}}/__init__.py")
builder.add_template("extension/admin_urls.py",
"{{PACKAGE}}/admin_urls.py")
if options.is_configurable:
builder.add_template("extension/templates/extension/configure.html",
"{{PACKAGE}}/templates/{{PACKAGE}}/configure.html"
)
builder.add_template("extension/views.py",
"{{PACKAGE}}/views.py")
builder.build()
if __name__ == "__main__":
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32, np.int64]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| |
import logging
import random
from datetime import timedelta
from annotatetext.views import post_annotation as annotatetext_post_annotation
from django.conf import settings
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.http import (
HttpResponseForbidden, HttpResponseRedirect)
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from django.views.generic import TemplateView, DetailView, ListView
from okscraper_django.models import ScraperRun
from committees.models import CommitteeMeeting
from events.models import Event
from laws.models import Vote, Bill
from mks.models import Member
from .forms import TidbitSuggestionForm, FeedbackSuggestionForm
from .models import Tidbit
class MainScraperStatusView(ListView):
queryset = ScraperRun.objects.all().filter(start_time__gt=timezone.now() - timedelta(days=30)).order_by(
'-start_time')
template_name = 'auxiliary/main_scraper_status.html'
def get_context_data(self, *args, **kwargs):
context = super(ListView, self).get_context_data(*args, **kwargs)
for object in context['object_list']:
status = 'SUCCESS'
failedLogs = object.logs.exclude(status='INFO')
if failedLogs.count() > 0:
status = failedLogs.order_by('-id')[0].status
object.status = status
return context
class ScraperRunDetailView(DetailView):
model = ScraperRun
template_name = 'auxiliary/scraper_run_detail.html'
logger = logging.getLogger("open-knesset.auxiliary.views")
def help_page(request):
context = cache.get('help_page_context')
if not context:
context = {}
context['title'] = _('Help')
context['member'] = Member.current_knesset.all()[random.randrange(Member.current_knesset.count())]
votes = Vote.objects.filter_and_order(order='controversy')
context['vote'] = votes[random.randrange(votes.count())]
context['bill'] = Bill.objects.all()[random.randrange(Bill.objects.count())]
tags_cloud = cache.get('tags_cloud', None)
if not tags_cloud:
# TODO: ugly hack, remove this import later, when I figure out why this is even needed here
from ok_tag.views import calculate_cloud_from_models
tags_cloud = calculate_cloud_from_models(Vote, Bill, CommitteeMeeting)
tags_cloud.sort(key=lambda x: x.name)
cache.set('tags_cloud', tags_cloud, settings.LONG_CACHE_TIME)
context['tags'] = random.sample(tags_cloud,
min(len(tags_cloud), 8)
) if tags_cloud else None
context['has_search'] = False # enable the base template search
cache.set('help_page_context', context, 300) # 5 Minutes
template_name = '%s.%s%s' % ('help_page', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context, context_instance=RequestContext(request))
def add_previous_comments(comments):
previous_comments = set()
for c in comments:
c.previous_comments = Comment.objects.filter(
object_pk=c.object_pk,
content_type=c.content_type,
submit_date__lt=c.submit_date).select_related('user')
previous_comments.update(c.previous_comments)
c.is_comment = True
comments = [c for c in comments if c not in previous_comments]
return comments
def get_annotations(comments, annotations):
for a in annotations:
a.submit_date = a.timestamp
comments = add_previous_comments(comments)
annotations.extend(comments)
annotations.sort(key=lambda x: x.submit_date, reverse=True)
return annotations
def main(request):
"""
Note on annotations:
Old:
Return annotations by concatenating Annotation last 10 and Comment last
10, adding all related comments (comments on same item that are older).
annotations_old = get_annotations(
annotations=list(Annotation.objects.all().order_by('-timestamp')[:10]),
comments=Comment.objects.all().order_by('-submit_date')[:10])
New:
Return annotations by Action filtered to include only:
annotation-added (to meeting), ignore annotated (by user)
comment-added
"""
# context = cache.get('main_page_context')
# if not context:
# context = {
# 'title': _('Home'),
# 'hide_crumbs': True,
# }
# actions = list(main_actions()[:10])
#
# annotations = get_annotations(
# annotations=[a.target for a in actions if a.verb != 'comment-added'],
# comments=[x.target for x in actions if x.verb == 'comment-added'])
# context['annotations'] = annotations
# b = get_debated_bills()
# if b:
# context['bill'] = get_debated_bills()[0]
# else:
# context['bill'] = None
# public_agenda_ids = Agenda.objects.filter(is_public=True
# ).values_list('id',flat=True)
# if len(public_agenda_ids) > 0:
# context['agenda_id'] = random.choice(public_agenda_ids)
# context['topics'] = Topic.objects.filter(status__in=PUBLIC_TOPIC_STATUS)\
# .order_by('-modified')\
# .select_related('creator')[:10]
# cache.set('main_page_context', context, 300) # 5 Minutes
# did we post the TidbitSuggest form ?
if request.method == 'POST':
# only logged-in users can suggest
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = TidbitSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
NUMOF_EVENTS = 8
events = Event.objects.get_upcoming()
# Reduce the number of sql queries, by prefetching the objects and setting
# them on the objects
upcoming = list(events[:NUMOF_EVENTS])
generics = {}
for item in upcoming:
if item.which_pk:
generics.setdefault(item.which_type_id, set()).add(item.which_pk)
content_types = ContentType.objects.in_bulk(generics.keys())
relations = {}
for ct, fk_list in generics.items():
ct_model = content_types[ct].model_class()
relations[ct] = ct_model.objects.in_bulk(list(fk_list))
for item in upcoming:
if item.which_pk:
setattr(item, '_which_object_cache',
relations[item.which_type_id].get(item.which_pk))
context = {
'title': _('Home'),
'hide_crumbs': True,
'is_index': True,
'tidbits': Tidbit.active.all().order_by('?'),
'suggestion_forms': {'tidbit': TidbitSuggestionForm()},
'events': upcoming,
'INITIAL_EVENTS': NUMOF_EVENTS,
'events_more': events.count() > NUMOF_EVENTS,
}
template_name = '%s.%s%s' % ('main', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context,
context_instance=RequestContext(request))
@require_http_methods(['POST'])
def post_feedback(request):
"Post a feedback suggestion form"
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = FeedbackSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
def post_annotation(request):
if request.user.has_perm('annotatetext.add_annotation'):
return annotatetext_post_annotation(request)
else:
return HttpResponseForbidden(_("Sorry, you do not have the permission to annotate."))
def search(request, lang='he'):
# remove the 'cof' get variable from the query string so that the page
# linked to by the javascript fallback doesn't think its inside an iframe.
mutable_get = request.GET.copy()
if 'cof' in mutable_get:
del mutable_get['cof']
return render_to_response('search/search.html', RequestContext(request, {
'query': request.GET.get('q'),
'query_string': mutable_get.urlencode(),
'has_search': True,
'lang': lang,
'cx': settings.GOOGLE_CUSTOM_SEARCH,
}))
def post_details(request, post_id):
''' patching django-planet's post_detail view so it would update the
hitcount and redirect to the post's url
'''
from hitcount.views import _update_hit_count
from hitcount.models import HitCount
from planet.models import Post
# update the it count
ctype = ContentType.objects.get(app_label="planet", model="post")
hitcount, created = HitCount.objects.get_or_create(content_type=ctype,
object_pk=post_id)
result = _update_hit_count(request, hitcount)
post = get_object_or_404(Post, pk=post_id)
return HttpResponseRedirect(post.url)
class RobotsView(TemplateView):
"""Return the robots.txt"""
template_name = 'robots.txt'
def render_to_response(self, context, **kwargs):
return super(RobotsView, self).render_to_response(context,
content_type='text/plain', **kwargs)
class AboutView(TemplateView):
"""About template"""
template_name = 'about.html'
class CommentsView(ListView):
"""Comments index view"""
model = Comment
queryset = Comment.objects.order_by("-submit_date")
paginate_by = 20
| |
import scriptparsers
import re
class AlignmentParser(scriptparsers.AbstractParser):
""" This Parser tries to use alignment information to parse scripts"""
class ScriptDocument:
def __init__(self):
self.__lastLineBold = False
self.__rows = []
def addLine(self, line):
line = line.replace("\n","").replace("\r","")
row = AlignmentParser.ScriptRow(line, self.__lastLineBold)
self.__lastLineBold = row.endsBold()
self.__rows += [row]
def getRows(self):
return self.__rows
class ScriptRow:
patternWhitespace = re.compile('[^\s]')
def __init__(self, text, bold):
self.__tokens = []
self.__length = 0
self.__emptyRow = True
self.__firstContentToken = None
i = 0
lastType = 'u' # unknown
lastIndex = 0
newBold = bold
while i < len(text):
newType = 'u'
if text[i].isspace():
newType = 's' # space
elif text[i] == '<':
if text[i:i+3]=='<b>':
newBold=True
newType = 'u'
i+=2 # adjust index
elif text[i:i+4]=='</b>':
newBold=False
newType = 'u'
i+=3 # adjust index
else:
newType = 'w' # word
else:
newType = 'w'
if newType!=lastType:
if lastType == 'w':
t = AlignmentParser.ScriptToken(text[lastIndex:i], bold)
self.__tokens += [t]
self.__emptyRow = False
if self.__firstContentToken is None:
self.__firstContentToken = t
elif lastType == 's':
self.__tokens += [AlignmentParser.ScriptWhitespace(i-lastIndex)]
lastType = newType
lastIndex = i
# unknown needs no processing (only tags or other garbage)
bold = newBold
i+=1 # next character
# Process the last token
if lastType == 'w':
t = AlignmentParser.ScriptToken(text[lastIndex:], bold)
self.__tokens += [t]
self.__emptyRow = False
if self.__firstContentToken is None:
self.__firstContentToken = t
elif lastType == 's':
self.__tokens += [AlignmentParser.ScriptWhitespace(len(text)-lastIndex)]
# Find row properties
self.__endBold = bold
# Calculate row length (Exclude whitespace tokens in beginning and end
self.__initialPadding = 0
for token in self.__tokens:
self.__length += len(token)
if len(self.__tokens)>0:
if isinstance(self.__tokens[0], AlignmentParser.ScriptWhitespace):
self.__length -= len(self.__tokens[0])
self.__initialPadding = len(self.__tokens[0])
if len(self.__tokens) > 1 and isinstance(self.__tokens[-1], AlignmentParser.ScriptWhitespace):
self.__length -= len(self.__tokens[-1])
def __str__(self):
s = "Row(pad=" + str(self.__initialPadding)
s+= ", len=" + str(self.__length) + ", tokens=["
for i in range(len(self.__tokens)):
token=self.__tokens[i]
if i>0:
s+=", "
s+=str(token)
return s+"])"
def __len__(self):
return self.__length
def endsBold(self):
return self.__endBold
def getTokens(self):
return self.__tokens
def getInitialPadding(self):
return self.__initialPadding
def isEmptyRow(self):
return self.__emptyRow
def getFirstContentToken(self):
return self.__firstContentToken
class ScriptWhitespace:
def __init__(self, length):
self.__length = length
def __len__(self):
return self.__length
def __str__(self):
return "WhiteSpace("+str(self.__length)+")"
def getContent(self):
return " "
class ScriptToken:
def __init__(self, content, bold):
self.__bold = bold
self.__content = content
def __len__(self):
return len(self.__content)
def __str__(self):
return "Token(\""+self.__content+("\", bold" if self.__bold else "\"")+")"
def isBold(self):
return self.__bold
def getContent(self):
return self.__content
def parseScript(self, filename, movieId):
# Reset confidence information
# Confidence calculation is based on the reason for breaking the input
# apart: If it's mostly done because the indentation changes, it might
# be a good idea to use this parser
self.__confBoldBreaks = 1.0
self.__confPaddBreaks = 1.0
# Generate document structure
doc = AlignmentParser.ScriptDocument()
for line in open(filename, 'r'):
doc.addLine(line)
return self.__getQuotes(doc)
def __getQuotes(self, doc):
quotes = []
padding = -1 # -1 means: The next non-bold token defines the padding to use
speaker = None # None: No active speaker, create no quotes
tokens = [] # List of tokens for current speaker
bold = False
# Iterate over Tokens
for row in doc.getRows():
# Check for a non-empty line with different padding
rowPadding = row.getInitialPadding()
firstContentToken = row.getFirstContentToken()
#print(str(row)+"\n\tSpeaker: "+str(speaker)+"\n\tTokens: "+str(tokens)+"\n\tPadding: "+str(padding))
if not row.isEmptyRow() and rowPadding != padding:
if len(tokens) > 0 and speaker is not None and len(speaker)>0 and padding>=0:
# The padding changed, the quote ends here
quotes += self.__createQuote(speaker, tokens)
self.__confPaddBreaks += 1.0
speaker = None
tokens = []
padding = -1
# Check tokens in this row
for token in row.getTokens():
if isinstance(token, AlignmentParser.ScriptToken):
# Set padding if required
if padding == -1 and not token.isBold() and firstContentToken.getContent()[0]!="(":
padding = rowPadding
# If the previous token wasn't bold and this is: New speaker
if not bold and token.isBold():
# If there is a previous speaker and the quote has tokens
# it's time to save it.
if len(tokens) > 0 and speaker is not None and len(speaker)>0:
quotes += self.__createQuote(speaker, tokens)
self.__confBoldBreaks += 1.0
tokens = []
speaker = ""
padding = -1
# Update state
bold = token.isBold()
# Append token
if bold and speaker is not None:
speaker += token.getContent()
elif not bold:
tokens += [token.getContent()]
return quotes
def __createQuote(self, speaker, tokens):
# Create a quote of a token list
quote = "".join(tokens).strip()
if len(quote) > 0:
return self.findAttributesInQuote(speaker.strip(), quote)
else:
return []
def calculateConfidence(self, quotes):
return self.__confPaddBreaks / (self.__confBoldBreaks + self.__confPaddBreaks)
| |
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import signal
from subprocess import CalledProcessError
from unittest import TestCase
from unittest.mock import call, patch, MagicMock
from ona_service.hostname_resolver import (
ENV_HOSTNAME_DNS,
ENV_HOSTNAME_NETBIOS,
gethostbyaddr,
HostnameResolver,
nmblookup,
resolve_host_names,
)
PATCH_PATH = 'ona_service.hostname_resolver.{}'
DNS_RESOLUTIONS = {'10.1.1.1': 'test_1', '192.168.1.12': 'test_2'}
NETBIOS_RESOLUTIONS = {'192.0.2.1': 'test_3', '192.168.1.12': 'test_2.bogus'}
class HostnameResolverTest(TestCase):
def test_gethostbyaddr(self):
self.assertEqual(gethostbyaddr('127.0.0.1'), 'localhost')
self.assertIsNone(gethostbyaddr('127.0.0.256'))
self.assertIsNone(gethostbyaddr('bogus'))
@patch(PATCH_PATH.format('subprocess.check_output'), autospec=True)
def test_nmblookup(self, mock_check_output):
mock_check_output.return_value = (
'Ignoring unknown parameter "server role"\n'
'Looking up status of 192.0.2.1\n'
'\tWRONG <00> - M <OFFLINE>\n'
'\tWKSOBSR01 <00> - M <ACTIVE> \n'
'\tON <00> - <GROUP> M <ACTIVE> \n'
'\tON <1c> - <GROUP> M <ACTIVE> \n'
'\tWKSOBSR01 <20> - M <ACTIVE> \n'
'\n\tMAC Address = 02-04-01-01-04-02\n'
'\n'
)
self.assertEqual(nmblookup('192.0.2.1'), 'wksobsr01')
mock_check_output.assert_called_once_with(
'timeout 1s nmblookup -A 192.0.2.1'.split(),
encoding='utf-8',
errors='ignore',
)
@patch(PATCH_PATH.format('subprocess.check_output'), autospec=True)
def test_nmblookup_fail(self, mock_check_output):
mock_check_output.return_value = (
'Ignoring unknown parameter "server role"\n'
'Looking up status of 192.0.2.1\n'
'No reply from 192.0.2.1\n\n'
)
self.assertIsNone(nmblookup('192.0.2.1'))
mock_check_output.side_effect = CalledProcessError(None, None)
self.assertIsNone(nmblookup('192.0.2.1'))
@patch(PATCH_PATH.format('sleep'), autospec=True)
def test_resolve_host_names(self, mock_sleep):
resolvers = [DNS_RESOLUTIONS.get, NETBIOS_RESOLUTIONS.get]
ips = ['10.1.1.1', '192.168.1.12', '192.0.2.1', '198.51.100.1']
actual = resolve_host_names(ips, resolvers)
expected = {
'10.1.1.1': 'test_1',
'192.168.1.12': 'test_2',
'192.0.2.1': 'test_3',
'198.51.100.1': None,
}
self.assertEqual(actual, expected)
self.assertEqual(mock_sleep.call_args_list, [call(0.1)] * len(ips))
@patch(PATCH_PATH.format('gethostbyaddr'), DNS_RESOLUTIONS.get)
def test_execute(self):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
# Set up mock for api.get_data - what we are to retrieve
ips = ['10.1.1.1', '192.168.1.12']
self.inst.api.get_data.return_value.json.return_value = ips
# Set up mock for api.send_file
remote_path = 'file:///tmp/obsrvbl/hostnames/resolutions.json'
output = {}
def _send_file(data_type, path, now, suffix=None):
with open(path) as infile:
output[index] = infile.read()
return remote_path
self.inst.api.send_file.side_effect = _send_file
# Do the deed
index = 0
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 1)
call_args, call_kwargs = self.inst.api.send_file.call_args
self.assertEqual(call_args[0], 'hostnames')
self.assertEqual(call_kwargs['suffix'], 'hosts')
self.assertEqual(output[0], json.dumps(DNS_RESOLUTIONS))
self.inst.api.send_signal.assert_called_once_with(
'hostnames', {'path': remote_path}
)
@patch.dict(
'os.environ',
{ENV_HOSTNAME_DNS: 'false', ENV_HOSTNAME_NETBIOS: 'true'}
)
@patch(PATCH_PATH.format('subprocess.check_output'), autospec=True)
def test_execute_netbios(self, mock_check_output):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
# Set up mock for api.get_data - what we are to retrieve
ips = ['192.0.2.1', '192.168.1.12']
self.inst.api.get_data.return_value.json.return_value = ips
# Set up mock for api.send_file
remote_path = 'file:///tmp/obsrvbl/hostnames/resolutions.json'
output = {}
def _send_file(data_type, path, now, suffix=None):
with open(path) as infile:
output[index] = infile.read()
return remote_path
self.inst.api.send_file.side_effect = _send_file
# Set up the resolver
def _check_output(*popenargs, **kwargs):
ip = popenargs[0][-1]
if ip == '192.0.2.1':
return '\tTEST_3 <00> - M <ACTIVE> \n'
elif ip == '192.168.1.12':
return '\tTEST_2.BOGUS <00> - M <ACTIVE> \n'
raise CalledProcessError(None, None)
mock_check_output.side_effect = _check_output
# Do the deed
index = 0
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 1)
call_args, call_kwargs = self.inst.api.send_file.call_args
self.assertEqual(call_args[0], 'hostnames')
self.assertEqual(call_kwargs['suffix'], 'hosts')
self.assertEqual(output[0], json.dumps(NETBIOS_RESOLUTIONS))
self.inst.api.send_signal.assert_called_once_with(
'hostnames', {'path': remote_path}
)
@patch.dict('os.environ', {ENV_HOSTNAME_NETBIOS: 'true'})
@patch(PATCH_PATH.format('gethostbyaddr'), DNS_RESOLUTIONS.get)
@patch(PATCH_PATH.format('subprocess.check_output'), autospec=True)
def test_execute_both(self, mock_check_output):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
# Set up mock for api.get_data - what we are to retrieve
ips = ['192.0.2.1', '192.168.1.12', '10.1.1.1', '198.51.100.1']
self.inst.api.get_data.return_value.json.return_value = ips
# Set up mock for api.send_file
remote_path = 'file:///tmp/obsrvbl/hostnames/resolutions.json'
output = {}
def _send_file(data_type, path, now, suffix=None):
with open(path) as infile:
output[index] = infile.read()
return remote_path
self.inst.api.send_file.side_effect = _send_file
# Set up the resolver
def _check_output(*popenargs, **kwargs):
ip = popenargs[0][-1]
if ip == '192.0.2.1':
return '\tTEST_3 <00> - M <ACTIVE> \n'
elif ip == '192.168.1.12':
return '\tTEST_2.BOGUS <00> - M <ACTIVE> \n'
raise CalledProcessError(None, None)
mock_check_output.side_effect = _check_output
expected_resolutions = {
'192.0.2.1': NETBIOS_RESOLUTIONS['192.0.2.1'],
'192.168.1.12': NETBIOS_RESOLUTIONS['192.168.1.12'],
'10.1.1.1': DNS_RESOLUTIONS['10.1.1.1'],
'198.51.100.1': None,
}
# Do the deed
index = 0
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 1)
call_args, call_kwargs = self.inst.api.send_file.call_args
self.assertEqual(call_args[0], 'hostnames')
self.assertEqual(call_kwargs['suffix'], 'hosts')
self.assertEqual(output[0], json.dumps(expected_resolutions))
self.inst.api.send_signal.assert_called_once_with(
'hostnames', {'path': remote_path}
)
@patch.dict(
'os.environ',
{ENV_HOSTNAME_DNS: 'false', ENV_HOSTNAME_NETBIOS: 'false'}
)
def test_execute_no_resolvers(self):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
self.inst.api.get_data.return_value.json.return_value = []
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 0)
self.assertEqual(self.inst.api.send_signal.call_count, 0)
def test_execute_no_ips(self):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
self.inst.api.get_data.return_value.json.return_value = []
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 0)
self.assertEqual(self.inst.api.send_signal.call_count, 0)
def test_execute_error(self):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
self.inst.api.get_data.return_value.json.side_effect = ValueError
self.inst.execute()
self.assertEqual(self.inst.api.send_file.call_count, 0)
self.assertEqual(self.inst.api.send_signal.call_count, 0)
def test_service(self):
self.inst = HostnameResolver()
self.inst.api = MagicMock()
self.inst.poll_seconds = 0
def killer(signum, frame):
self.inst.stop()
signal.signal(signal.SIGALRM, killer)
signal.alarm(1)
self.inst.run()
| |
import json
import os
from django.contrib.auth.models import Group, User, Permission
from django.conf import settings
from django.core.files import File
from funfactory.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.main.models import (
Event,
Tag,
Channel,
EventRevision,
RecruitmentMessage,
Picture,
)
from airmozilla.base.tests.testbase import DjangoTestCase
class TestEventEdit(DjangoTestCase):
other_image = 'airmozilla/manage/tests/other_logo.png'
third_image = 'airmozilla/manage/tests/other_logo_reversed.png'
def _event_to_dict(self, event):
from airmozilla.main.views import EventEditView
return EventEditView.event_to_dict(event)
def test_link_to_edit(self):
event = Event.objects.get(title='Test event')
response = self.client.get(reverse('main:event', args=(event.slug,)))
eq_(response.status_code, 200)
url = reverse('main:event_edit', args=(event.slug,))
ok_(url not in response.content)
self._login()
response = self.client.get(reverse('main:event', args=(event.slug,)))
eq_(response.status_code, 200)
ok_(url in response.content)
def test_cant_view(self):
event = Event.objects.get(title='Test event')
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
response = self.client.post(url, {})
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
def test_edit_title(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
eq_(current.event, event)
eq_(initial.user, None)
eq_(current.user, user)
eq_(initial.title, 'Test event')
eq_(current.title, 'Different title')
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Different title')
def test_edit_channel(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
main_channel = Channel.objects.get(
slug=settings.DEFAULT_CHANNEL_SLUG
)
assert main_channel in event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
old_channel = Channel.objects.create(
name='Old', slug='old', never_show=True
)
bad_channel = Channel.objects.create(
name='Bad', slug='bad', never_show=True
)
good_channel = Channel.objects.create(
name='Good', slug='good',
)
event.channels.add(old_channel)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# the Good channel should be a choice
html = '<option value="{0}">{1}</option>'.format(
good_channel.id, good_channel.name
)
ok_(html in response.content)
# the Main channel should be in there and already selected
html = '<option value="{0}" selected="selected">{1}</option>'.format(
main_channel.id, main_channel.name
)
ok_(html in response.content)
# the Old channel should be in there and already selected
html = '<option value="{0}" selected="selected">{1}</option>'.format(
old_channel.id, old_channel.name
)
ok_(html in response.content)
# the bad channel shouldn't even be a choice
html = '<option value="{0}">{1}</option>'.format(
bad_channel.id, bad_channel.name
)
ok_(html not in response.content)
def test_edit_nothing(self):
"""basically pressing save without changing anything"""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
self._login()
response = self.client.post(url, data)
eq_(response.status_code, 302)
ok_(not EventRevision.objects.all())
def test_edit_no_image(self):
"""basically pressing save without changing anything"""
event = Event.objects.get(title='Test event')
event.placeholder_img = None
event.save()
url = reverse('main:event_edit', args=(event.slug,))
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
self._login()
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Events needs to have a picture' in
response.context['form'].errors['__all__'])
ok_('Events needs to have a picture' in response.content)
def test_bad_edit_title(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': '',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
def test_edit_on_bad_url(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=('xxx',))
response = self.client.get(url)
eq_(response.status_code, 404)
old_slug = event.slug
event.slug = 'new-slug'
event.save()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
url = reverse('main:event_edit', args=(old_slug,))
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.get(url)
# because you're not allowed to view it
eq_(response.status_code, 302)
url = reverse('main:event_edit', args=(event.slug,))
response = self.client.post(url, data)
# because you're not allowed to view it, still
eq_(response.status_code, 302)
def test_edit_all_simple_fields(self):
"""similar to test_edit_title() but changing all fields
other than the placeholder_img
"""
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
assert event.tags.all()
assert event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
new_channel = Channel.objects.create(
name='New Stuff',
slug='new-stuff'
)
new_channel2 = Channel.objects.create(
name='New Stuff II',
slug='new-stuff-2'
)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': 'new short description',
'description': 'new description',
'additional_links': 'new additional_links',
'tags': 'newtag',
'channels': [new_channel.pk, new_channel2.pk]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
eq_(initial.title, 'Test event')
eq_(current.title, 'Different title')
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Different title')
eq_(event.description, 'new description')
eq_(event.short_description, 'new short description')
eq_(event.additional_links, 'new additional_links')
eq_(
sorted(x.name for x in event.tags.all()),
['newtag']
)
eq_(
sorted(x.name for x in event.channels.all()),
['New Stuff', 'New Stuff II']
)
def test_edit_recruitmentmessage(self):
"""Change the revision message from nothing, to something
to another one.
"""
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
assert event.tags.all()
assert event.channels.all()
url = reverse('main:event_edit', args=(event.slug,))
user = self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
msg1 = RecruitmentMessage.objects.create(
text='Web Developer',
url='http://careers.mozilla.com/123',
active=True
)
msg2 = RecruitmentMessage.objects.create(
text='C++ Developer',
url='http://careers.mozilla.com/456',
active=True
)
msg3 = RecruitmentMessage.objects.create(
text='Fortran Developer',
url='http://careers.mozilla.com/000',
active=False # Note!
)
# if you don't have the right permission, you can't see this choice
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Recruitment message' not in response.content)
# give the user the necessary permission
recruiters = Group.objects.create(name='Recruiters')
permission = Permission.objects.get(
codename='change_recruitmentmessage'
)
recruiters.permissions.add(permission)
user.groups.add(recruiters)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Recruitment message' in response.content)
ok_(msg1.text in response.content)
ok_(msg2.text in response.content)
ok_(msg3.text not in response.content) # not active
with open('airmozilla/manage/tests/firefox.png') as fp:
picture = Picture.objects.create(file=File(fp))
data = {
'event_id': event.id,
'previous': previous,
'recruitmentmessage': msg1.pk,
'title': event.title,
'picture': picture.id,
'description': event.description,
'short_description': event.short_description,
'channels': [x.id for x in event.channels.all()],
'tags': [x.name for x in event.tags.all()],
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
eq_(initial.event, event)
ok_(not initial.recruitmentmessage)
eq_(current.recruitmentmessage, msg1)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, msg1)
# now change it to another message
data = self._event_to_dict(event)
previous = json.dumps(data)
data['recruitmentmessage'] = msg2.pk
data['previous'] = previous
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, msg2)
initial, __, current = (
EventRevision.objects.all().order_by('created')
)
eq_(current.recruitmentmessage, msg2)
# lastly, change it to blank
data = self._event_to_dict(event)
previous = json.dumps(data)
data['recruitmentmessage'] = ''
data['previous'] = previous
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# reload the event
event = Event.objects.get(pk=event.pk)
eq_(event.recruitmentmessage, None)
initial, __, __, current = (
EventRevision.objects.all().order_by('created')
)
eq_(current.recruitmentmessage, None)
def test_edit_placeholder_img(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
old_placeholder_img_path = event.placeholder_img.path
data = self._event_to_dict(event)
previous = json.dumps(data)
with open(self.other_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
ok_(initial.placeholder_img)
ok_(current.placeholder_img)
# reload the event
event = Event.objects.get(pk=event.pk)
new_placeholder_img_path = event.placeholder_img.path
ok_(old_placeholder_img_path != new_placeholder_img_path)
ok_(os.path.isfile(old_placeholder_img_path))
ok_(os.path.isfile(new_placeholder_img_path))
def test_edit_placeholder_img_to_unselect_picture(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# also, let's pretend the event has a picture already selected
with open(self.main_image) as fp:
picture = Picture.objects.create(file=File(fp))
event.picture = picture
event.save()
url = reverse('main:event_edit', args=(event.slug,))
self._login()
old_placeholder_img_path = event.placeholder_img.path
data = self._event_to_dict(event)
previous = json.dumps(data)
with open(self.other_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp,
# this is a hidden field you can't not send
'picture': picture.id,
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('main:event', args=(event.slug,))
)
# this should have created 2 EventRevision objects.
initial, current = EventRevision.objects.all().order_by('created')
ok_(initial.placeholder_img)
ok_(current.placeholder_img)
ok_(not current.picture)
# reload the event
event = Event.objects.get(pk=event.pk)
ok_(not event.picture)
new_placeholder_img_path = event.placeholder_img.path
ok_(old_placeholder_img_path != new_placeholder_img_path)
ok_(os.path.isfile(old_placeholder_img_path))
ok_(os.path.isfile(new_placeholder_img_path))
def test_edit_conflict(self):
"""You can't edit the title if someone else edited it since the
'previous' JSON dump was taken."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
event.title = 'Sneak Edit'
event.save()
data = {
'event_id': event.id,
'previous': previous,
'title': 'Different title',
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Conflict error!' in response.content)
def test_edit_conflict_on_placeholder_img(self):
"""You can't edit the title if someone else edited it since the
'previous' JSON dump was taken."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
self._attach_file(event, self.other_image)
with open(self.third_image) as fp:
data = {
'event_id': event.id,
'previous': previous,
'title': event.title,
'short_description': event.short_description,
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()],
'placeholder_img': fp
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Conflict error!' in response.content)
def test_edit_conflict_near_miss(self):
"""If the event changes between the time you load the edit page
and you pressing 'Save' it shouldn't be a problem as long as
you're changing something different."""
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
event.title = 'Sneak Edit'
event.save()
data = {
'event_id': event.id,
'previous': previous,
'title': 'Test event',
'short_description': 'new short description',
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
event = Event.objects.get(pk=event.pk)
eq_(event.title, 'Sneak Edit')
eq_(event.short_description, 'new short description')
def test_view_revision_change_links(self):
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
url = reverse('main:event_edit', args=(event.slug,))
user = self._login()
data = self._event_to_dict(event)
previous = json.dumps(data)
data = {
'event_id': event.id,
'previous': previous,
'title': 'Test event',
'short_description': 'new short description',
'description': event.description,
'additional_links': event.additional_links,
'tags': ', '.join(x.name for x in event.tags.all()),
'channels': [x.pk for x in event.channels.all()]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
eq_(EventRevision.objects.filter(event=event).count(), 2)
base_revision = EventRevision.objects.get(
event=event,
user__isnull=True
)
user_revision = EventRevision.objects.get(
event=event,
user=user
)
# reload the event edit page
response = self.client.get(url)
eq_(response.status_code, 200)
# because there's no difference between this and the event now
# we should NOT have a link to see the difference for the user_revision
ok_(
reverse('main:event_difference',
args=(event.slug, user_revision.pk))
not in response.content
)
# but there should be a link to the change
ok_(
reverse('main:event_change',
args=(event.slug, user_revision.pk))
in response.content
)
# since the base revision doesn't have any changes there shouldn't
# be a link to it
ok_(
reverse('main:event_change',
args=(event.slug, base_revision.pk))
not in response.content
)
# but there should be a link to the change
ok_(
reverse('main:event_difference',
args=(event.slug, base_revision.pk))
in response.content
)
def test_cant_view_all_revision_changes(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# base revision
base_revision = EventRevision.objects.create_from_event(event)
# change the event without saving so we can make a new revision
event.title = 'Different title'
user = User.objects.create_user(
'mary', 'mary@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
change_url = reverse(
'main:event_change',
args=(event.slug, user_revision.pk)
)
difference_url = reverse(
'main:event_difference',
args=(event.slug, base_revision.pk)
)
# you're not allowed to view these if you're not signed in
response = self.client.get(change_url)
eq_(response.status_code, 302)
response = self.client.get(difference_url)
eq_(response.status_code, 302)
def test_view_revision_change(self):
event = Event.objects.get(title='Test event')
event.tags.add(Tag.objects.create(name='testing'))
self._attach_file(event, self.main_image)
# base revision
base_revision = EventRevision.objects.create_from_event(event)
# change the event without saving so we can make a new revision
event.title = 'Different title'
event.description = 'New description'
event.short_description = 'New short description'
event.additional_links = 'New additional links'
event.save()
user = User.objects.create_user(
'bob', 'bob@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
user_revision.tags.add(Tag.objects.create(name='newtag'))
user_revision.channels.remove(Channel.objects.get(name='Main'))
user_revision.channels.add(
Channel.objects.create(name='Web dev', slug='webdev')
)
with open(self.other_image, 'rb') as f:
img = File(f)
user_revision.placeholder_img.save(
os.path.basename(self.other_image),
img
)
# view the change
url = reverse('main:event_change', args=(event.slug, user_revision.pk))
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Different title' in response.content)
ok_('New description' in response.content)
ok_('New short description' in response.content)
ok_('New additional links' in response.content)
ok_('Web dev' in response.content)
ok_('newtag, testing' in response.content)
event.tags.add(Tag.objects.create(name='newtag'))
event.channels.remove(Channel.objects.get(name='Main'))
event.channels.add(
Channel.objects.get(name='Web dev')
)
# view the difference
url = reverse(
'main:event_difference',
args=(event.slug, base_revision.pk))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Different title' in response.content)
ok_('New description' in response.content)
ok_('New short description' in response.content)
ok_('New additional links' in response.content)
ok_('Web dev' in response.content)
ok_('newtag, testing' in response.content)
def test_view_revision_change_on_recruitmentmessage(self):
event = Event.objects.get(title='Test event')
self._attach_file(event, self.main_image)
# base revision
EventRevision.objects.create_from_event(event)
user = User.objects.create_user(
'bob', 'bob@mozilla.com', 'secret'
)
user_revision = EventRevision.objects.create_from_event(
event,
user=user
)
msg1 = RecruitmentMessage.objects.create(
text='Web Developer',
url='http://careers.mozilla.com/123',
active=True
)
user_revision.recruitmentmessage = msg1
user_revision.save()
# view the change
url = reverse('main:event_change', args=(event.slug, user_revision.pk))
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(msg1.text in response.content)
| |
# Copyright (C) 2015 by Per Unneberg
import re
import os
from itertools import groupby
from snakemakelib.utils import isoformat
from snakemakelib.log import LoggerManager
smllogger = LoggerManager().getLogger(__name__)
# Match beginning of path if it starts with dot or slash(?)
REGEXP_DOT_MATCH = r"(?:[\.\w\/]+)?\/"
# Match generic spacer sequence of >=0 characters
REGEXP_SPACER_MATCH = r"(?:.*)"
class MissingRequiredKeyException(Exception):
"""Exception if required key is missing"""
class FormatException(Exception):
"""Exception for malformatted entry"""
class UnimplementedException(Exception):
"""Exception for unimplemeted method"""
class DisallowedKeyException(Exception):
"""Exception for disallowed key"""
class RegexpDict(dict):
_required_keys = []
_group_keys = []
_extra_keys = []
def __init__(self, regexp=None, concat="_", *args, **kwargs):
super(RegexpDict, self).__init__()
# Set key values if in kwargs
self.update({k: v for k, v in kwargs.items() if k in self._group_keys})
self._concat = concat
self._init_regexp(regexp)
self._init_format()
def _init_regexp(self, regexp):
self._regexp = re.compile(regexp)
# Group keys according to prefix, e.g. PU=[PU1, PU2, PU3], and
# SM = [SM]
keymap = sorted([(re.sub("[0-9]+$", "", k), k)
if re.search("[0-9]+$", k) else (k, k)
for k in list(self.re.groupindex.keys())])
self._keymap = {k: [y[1] for y in list(v)]
for (k, v) in groupby(keymap, lambda x: x[0])}
self._validate_keys()
@property
def re(self):
return self._regexp
@property
def pattern(self):
return self.re.pattern
@property
def basename_pattern(self):
"""Return the basename pattern
Return the basename pattern, replacing ?P=
expressions with the corresponding group expression
"""
invmap = {v: k for k, v in self.re.groupindex.items()}
remap = {k: "" for k, v in self.re.groupindex.items()}
i = 1
for m in re.finditer(r"\(?P<[A-Za-z0-9]+>([^\)]+)\)", self.pattern):
remap[invmap[i]] = m.group(1)
i += 1
fmt = re.sub(r"\(?P=([A-Za-z0-9]+)\)", "P<\\1>{\\1})", self.pattern)
return os.path.basename(fmt.format(**remap))
@property
def fmt(self):
return self._fmt
@property
def allowed_keys(self):
return list(set(self._required_keys + self._group_keys
+ self._extra_keys))
def _init_format(self):
"""Initialize formatting string. Find groups defined by (?P<GROUP>) as
well as constant expressions and concatenate"""
m = re.findall("(\(\?P[<=](\w+)>?|({sep})|(?:[\[\]A-Za-z0-9\-\+\_]+\))|([A-Za-z0-9]+))".format(sep=os.sep), self.pattern)
fmtlist = []
for x in m:
if x[1]:
fmtlist.append("{" + x[1] + "}")
elif x[2]:
fmtlist.append(x[2])
elif x[3]:
fmtlist.append(x[3])
self._fmt = re.sub("_{sep}_".format(sep=os.sep), os.sep, ("_".join(fmtlist)))
def _validate_keys(self):
"""Validate keys. Importantly, make sure keys with indices, i.e. keys
KEY1, KEY2, etc, stripped of indices are in the allowed keys.
The sole purpose of the indices is to make the keys unique
"""
seen_required = False
for key, group in self._keymap.items():
if key not in self.allowed_keys:
raise DisallowedKeyException("key {key} not in allowed key set {allowed}".format(key=key, allowed=self.allowed_keys))
if key in self._required_keys:
seen_required = True
if self._required_keys and not seen_required:
raise MissingRequiredKeyException("one or several of the required keys '{keys}' is/are missing".format(keys=self._required_keys))
def _parse_str(self, s, suffix):
"""Parse string and set read group dict"""
pattern = self.pattern
if s.startswith(os.curdir) or s.startswith(os.sep):
pattern = REGEXP_DOT_MATCH + pattern
if suffix:
pattern = pattern + REGEXP_SPACER_MATCH + suffix
m = re.match(pattern, s)
if m is None:
smllogger.warn("Unable to parse string {s} with regexp {re}"
.format(s=s, re=self.re.pattern))
return
# Regular keys
self.update({k: v for (k, v) in m.groupdict().items()
if k not in self._extra_keys})
self._process_indexed_keys(m)
self._post_process_keys(m)
def _process_indexed_keys(self, m):
"""Process indexed keys.
Process indexed keys to unindexed version,
e.g. collect PU1, PU2 to PU.
"""
# Add indexed keys
for key, group in self._keymap.items():
if key in self._extra_keys:
continue
self.update({key: self._concat.join(m.group(mkey)
for mkey in group)})
def _post_process_keys(self, m):
pass
def parse(self, s, suffix=""):
"""Parse string and return string representation.
Args:
s (string): string to parse
suffix (string): suffix to add to regular expression in search
Returns:
Regexp object
"""
self.clear()
self._parse_str(s, suffix)
return self
def __repr__(self):
return str(type(self))
class SampleRegexp(RegexpDict):
_required_keys = ['SM']
_group_keys = ['PU']
_extra_keys = ['PATH']
def __init__(self, regexp=None, *args, **kwargs):
super(SampleRegexp, self).__init__(regexp, *args, **kwargs)
def _post_process_keys(self, m):
self['PATH'] = os.path.dirname(m.string)
class RunRegexp(RegexpDict):
_group_keys = ['SM', 'PU', 'DT']
_extra_keys = ['PATH']
def __init__(self, regexp=None, *args, **kwargs):
super(RunRegexp, self).__init__(regexp, *args, **kwargs)
def _post_process_keys(self, m):
self['PATH'] = os.path.dirname(m.string)
# SAM format specification
# @RG Read group. Unordered multiple @RG lines are allowed.
# ID* Read group identifer. Each @RG line must have a unique ID. The
# value of ID is used in the RG tags of alignment records. Must be
# unique among all read groups in header section. Read group IDs may
# be modified when merging SAMfiles in order to handle collisions.
# CN Name of sequencing center producing the read.
# DS Description.
# DT Date the run was produced (ISO8601 date or date/time).
# FO Flow order. The array of nucleotide bases that correspond to the
# nucleotides used for each ow of each read. Multi-base rows are
# encoded in IUPAC format, and non-nucleotide rows by various other
# characters. Format: /\*|[ACMGRSVTWYHKDBN]+/
# KS The array of nucleotide bases that correspond to the key sequence
# of each read.
# LB Library.
# PG Programs used for processing the read group.
# PI Predicted median insert size.
# PL Platform/technology used to produce the reads. Valid values:
# CAPILLARY, LS454, ILLUMINA, SOLID, HELICOS, IONTORRENT, ONT, and
# PACBIO.
# PM Platform model. Free-form text providing further details of the platform/technology used.
# PU Platform unit (e.g. flowcell-barcode.lane for Illumina or slide for SOLiD). Unique identifier.
# SM Sample. Use pool name where a pool is being sequenced.
class ReadGroup(RunRegexp):
"""Adds formatting function for generating read group option string"""
_group_keys = ['ID', 'CN', 'DS', 'DT', 'FO', 'KS',
'LB', 'PG', 'PI', 'PL', 'PU', 'SM']
_extra_keys = ['PATH']
_group_dict = {'ID': 'identifier', 'CN': 'center', 'DS': 'description',
'DT': 'date', 'FO': 'floworder', 'KS': 'keysequence',
'LB': 'library', 'PG': 'program', 'PI': 'insertsize',
'PL': 'platform', 'PU': 'platform-unit', 'SM': 'sample'}
def __init__(self, regexp=None, opt_prefix="--", *args, **kwargs):
super(ReadGroup, self).__init__(regexp, *args, **kwargs)
self._opt_prefix = opt_prefix
def _post_process_keys(self, m):
self['PATH'] = os.path.dirname(m.string)
if 'ID' not in self.keys() or not self.get('ID', ""):
# inv_map = {v: k for (k, v) in list(self.re.groupindex.items())}
self['ID'] = os.path.basename(self.fmt.format(**self))
def _fmt_string(self, k):
"""Take care of date string"""
if k == 'DT':
return isoformat(self[k])
return self[k]
def __str__(self):
"""Return a generic program string"""
return " ".join([
"{dash}{key} {value}".format(dash=self._opt_prefix,
key=self._group_dict[k],
value=self._fmt_string(k))
for k in sorted(list(self.keys())) if not self[k] is None
and k in self._group_keys])
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like the identity matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
"LinearOperatorIdentity",
"LinearOperatorScaledIdentity",
]
class BaseLinearOperatorIdentity(linear_operator.LinearOperator):
"""Base class for Identity operators."""
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies(
[
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
],
self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _ones_diag(self):
"""Returns the diagonal of this operator as all ones."""
if self.shape.is_fully_defined():
d_shape = self.batch_shape.concatenate(
[min(self.domain_dimension.value, self.range_dimension.value)])
else:
d_shape = array_ops.concat(
[self.batch_shape_tensor(),
[math_ops.reduce_min(self.shape_tensor()[-2:])]], axis=0)
return array_ops.ones(shape=d_shape, dtype=self.dtype)
class LinearOperatorIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a [batch] square identity matrix.
This operator acts like a [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorIdentity` is initialized with `num_rows`, and optionally
`batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this
operator efficiently passes through all arguments. If `batch_shape` is
provided, broadcasting may occur, which will require making copies.
```python
# Create a 2 x 2 identity matrix.
operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32)
operator.to_dense()
==> [[1., 0.]
[0., 1.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> 0.
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> Shape [2, 4] Tensor, same as x.
y = tf.random_normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
# This broadcast does NOT require copying data, since we can infer that y
# will be passed through without changing shape. We are always able to infer
# this if the operator has no batch_shape.
x = operator.solve(y)
==> Shape [3, 2, 4] Tensor, same as y.
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2])
operator.to_dense()
==> [[[1., 0.]
[0., 1.]],
[[1., 0.]
[0., 1.]]]
# Here, even though the operator has a batch shape, the input is the same as
# the output, so x can be passed through without a copy. The operator is able
# to detect that no broadcast is necessary because both x and the operator
# have statically defined shape.
x = ... Shape [2, 2, 3]
operator.apply(x)
==> Shape [2, 2, 3] Tensor, same as x
# Here the operator and x have different batch_shape, and are broadcast.
# This requires a copy, since the output is different size than the input.
x = ... Shape [1, 2, 3]
operator.apply(x)
==> Shape [2, 2, 3] Tensor, equal to [x, x]
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
If `batch_shape` initialization arg is `None`:
* `operator.apply(x)` is `O(1)`
* `operator.solve(x)` is `O(1)`
* `operator.determinant()` is `O(1)`
If `batch_shape` initialization arg is provided, and static checks cannot
rule out the need to broadcast:
* `operator.apply(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(B1*...*Bb)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
batch_shape=None,
dtype=None,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=False,
name="LinearOperatorIdentity"):
r"""Initialize a `LinearOperatorIdentity`.
The `LinearOperatorIdentity` is initialized with arguments defining `dtype`
and shape.
This operator is able to broadcast the leading (batch) dimensions, which
sometimes requires copying data. If `batch_shape` is `None`, the operator
can take arguments of any batch shape without copying. See examples.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
dimensions. If `None`, this operator has no leading dimensions.
dtype: Data type of the matrix that this operator represents.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
ValueError: If `batch_shape` is determined statically to not be 1-D, or
negative.
ValueError: If any of the following is not `True`:
`{is_self_adjoint, is_non_singular, is_positive_definite}`.
"""
dtype = dtype or dtypes.float32
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name):
dtype = dtypes.as_dtype(dtype)
if not is_self_adjoint:
raise ValueError("An identity operator is always self adjoint.")
if not is_non_singular:
raise ValueError("An identity operator is always non-singular.")
if not is_positive_definite:
raise ValueError("An identity operator is always positive-definite.")
super(LinearOperatorIdentity, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
if batch_shape is None:
self._batch_shape_arg = None
else:
self._batch_shape_arg = linear_operator_util.shape_tensor(
batch_shape, name="batch_shape_arg")
self._batch_shape_static = tensor_util.constant_value(
self._batch_shape_arg)
self._check_batch_shape_possibly_add_asserts()
def _shape(self):
matrix_shape = tensor_shape.TensorShape(
(self._num_rows_static, self._num_rows_static))
if self._batch_shape_arg is None:
return matrix_shape
batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack(
(self._num_rows, self._num_rows), axis=0)
if self._batch_shape_arg is None:
return matrix_shape
return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
return control_flow_ops.no_op("assert_positive_definite")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.get_shape():
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
def _apply(self, x, adjoint=False):
# Note that adjoint has no effect since this matrix is self-adjoint.
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(
self, x)
x = control_flow_ops.with_dependencies([aps], x)
return self._possibly_broadcast_batch_shape(x)
def _determinant(self):
return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False):
return self._apply(rhs)
def _diag_part(self):
return self._ones_diag()
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[mat]):
mat = ops.convert_to_tensor(mat, name="mat")
mat_diag = array_ops.matrix_diag_part(mat)
new_diag = 1 + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies(
[
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
],
self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _check_batch_shape_possibly_add_asserts(self):
"""Static check of init arg `batch_shape`, possibly add asserts."""
if self._batch_shape_arg is None:
return
# Possibly add asserts
if self._assert_proper_shapes:
self._batch_shape_arg = control_flow_ops.with_dependencies(
[
check_ops.assert_rank(
self._batch_shape_arg,
1,
message="Argument batch_shape must be a 1-D Tensor."),
check_ops.assert_non_negative(
self._batch_shape_arg,
message="Argument batch_shape must be non-negative."),
],
self._batch_shape_arg)
# Static checks
if not self._batch_shape_arg.dtype.is_integer:
raise TypeError("Argument batch_shape must be integer type. Found:"
" %s" % self._batch_shape_arg)
if self._batch_shape_static is None:
return # Cannot do any other static checks.
if self._batch_shape_static.ndim != 1:
raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
" %s" % self._batch_shape_static)
if np.any(self._batch_shape_static < 0):
raise ValueError("Argument batch_shape must be non-negative. Found:"
"%s" % self._batch_shape_static)
class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`.
This operator acts like a scaled [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
a scaled version of the `N x N` identity matrix.
`LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier`
(a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the
`multiplier` determines the scale for each batch member.
```python
# Create a 2 x 2 scaled identity matrix.
operator = LinearOperatorIdentity(num_rows=2, multiplier=3.)
operator.to_dense()
==> [[3., 0.]
[0., 3.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> 2 * Log[3]
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> 3 * x
y = tf.random_normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
x = operator.solve(y)
==> 3 * x
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, multiplier=5.)
operator.to_dense()
==> [[[5., 0.]
[0., 5.]],
[[5., 0.]
[0., 5.]]]
x = ... Shape [2, 2, 3]
operator.apply(x)
==> 5 * x
# Here the operator and x have different batch_shape, and are broadcast.
x = ... Shape [1, 2, 3]
operator.apply(x)
==> 5 * x
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
* `operator.apply(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(D1*...*Dd)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
multiplier,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
assert_proper_shapes=False,
name="LinearOperatorScaledIdentity"):
r"""Initialize a `LinearOperatorScaledIdentity`.
The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which
determines the size of each identity matrix, and a `multiplier`,
which defines `dtype`, batch shape, and scale of each matrix.
This operator is able to broadcast the leading (batch) dimensions.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
"""
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name, values=[multiplier, num_rows]):
self._multiplier = ops.convert_to_tensor(multiplier, name="multiplier")
super(LinearOperatorScaledIdentity, self).__init__(
dtype=self._multiplier.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
# Shape [B1,...Bb, 1, 1]
self._multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(self.multiplier, -1), -1)
self._multiplier_matrix_conj = math_ops.conj(
self._multiplier_matrix)
self._abs_multiplier = math_ops.abs(self.multiplier)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)
self._num_rows_cast_to_real_dtype = math_ops.cast(
self._num_rows, self.dtype.real_dtype)
def _shape(self):
matrix_shape = tensor_shape.TensorShape(
(self._num_rows_static, self._num_rows_static))
batch_shape = self.multiplier.get_shape()
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack(
(self._num_rows, self._num_rows), axis=0)
batch_shape = array_ops.shape(self.multiplier)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _assert_non_singular(self):
return check_ops.assert_positive(
math_ops.abs(self.multiplier),
message="LinearOperator was singular")
def _assert_positive_definite(self):
return check_ops.assert_positive(
math_ops.real(self.multiplier),
message="LinearOperator was not positive definite.")
def _assert_self_adjoint(self):
imag_multiplier = math_ops.imag(self.multiplier)
return check_ops.assert_equal(
array_ops.zeros_like(imag_multiplier),
imag_multiplier,
message="LinearOperator was not self-adjoint")
def _apply(self, x, adjoint=False):
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(
self, x)
x = control_flow_ops.with_dependencies([aps], x)
return x * matrix
def _determinant(self):
return self.multiplier ** self._num_rows_cast_to_dtype
def _log_abs_determinant(self):
return self._num_rows_cast_to_real_dtype * math_ops.log(
self._abs_multiplier)
def _solve(self, rhs, adjoint=False):
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(
self, rhs)
rhs = control_flow_ops.with_dependencies([aps], rhs)
return rhs / matrix
def _diag_part(self):
return self._ones_diag() * self.multiplier[..., array_ops.newaxis]
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[mat]):
# Shape [B1,...,Bb, 1]
multiplier_vector = array_ops.expand_dims(self.multiplier, -1)
# Shape [C1,...,Cc, M, M]
mat = ops.convert_to_tensor(mat, name="mat")
# Shape [C1,...,Cc, M]
mat_diag = array_ops.matrix_diag_part(mat)
# multiplier_vector broadcasts here.
new_diag = multiplier_vector + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
@property
def multiplier(self):
"""The [batch] scalar `Tensor`, `c` in `cI`."""
return self._multiplier
| |
#!/usr/bin/env python
import sys
from datetime import datetime, date, timedelta, time
import subprocess
import pprint
from socket import socket
pp = pprint.PrettyPrinter(indent=4)
#
# SETTINGS
#
CARBON_SERVER = '127.0.0.1' # Your graphite server
CARBON_PORT = 2003 # Your graphite port
LOG_STATS=1 # Log the stats sent to graphite to a text file to see what is being sent
log_path="/tmp/haproxy_stats" # The file to log the stats too
test=1 # Turns off sending to graphite
one_levels=['creative', 'TOPLEVEL'] # What to report on. TOPLEVEL is '/' in the log line. All others are ignored.
#################################
################################################################
# SUBS
################################################################
def rollup():
lines=[]
sock = socket()
f = open("%s/%s" % (log_path, schema), 'a')
try:
sock.connect( (CARBON_SERVER,CARBON_PORT) )
except:
print "Couldn't connect to %(server)s on port %(port)d, is carbon-agent.py running?" % { 'server':CARBON_SERVER, 'port':CARBON_PORT }
sys.exit(1)
for key in server:
#
# Aborted
#
if key == 'aborted':
lines.append("%s.aborted %s %d" % (schema, server[key], unix_time))
continue
#
# hits per sec or min
#
if key == 'hps':
if int(delta) == 60:
key='hpm'
num=server['hps']
lines.append("%s.hpm %s %d" % (schema, num, unix_time))
continue
else:
num=server[key] / int(delta)
lines.append("%s.hps %s %d" % (schema, num, unix_time))
continue
#
# one level URL
#
if key[0] == 'one_level':
num=server[key[0], key[1]] / int(delta)
lines.append("%s.%s %s %d" % (schema, key[1], num, unix_time))
continue
if key[1] == 'lines':
hps=server[key[0],'lines'] / int(delta)
name=key[0] + '.hps'
lines.append("%s.%s %s %d" % (schema, name , hps, unix_time))
if key[1] == 'total_Tq':
avg=server[key[0], 'total_Tq'] / server[key[0], 'lines']
lines.append("%s.%s.avgTq %s %d" % (schema, key[0], avg, unix_time))
if key[1] == 'total_Tw':
avg=server[key[0], 'total_Tw'] / server[key[0], 'lines']
lines.append("%s.%s.avgTw %s %d" % (schema, key[0], avg, unix_time))
if key[1] == 'total_Tc':
avg=server[key[0], 'total_Tc'] / server[key[0], 'lines']
lines.append("%s.%s.avgTc %s %d" % (schema, key[0], avg, unix_time))
if key[1] == 'total_Tr':
avg=server[key[0], 'total_Tr'] / server[key[0], 'lines']
lines.append("%s.%s.avgTr %s %d" % (schema, key[0], avg, unix_time))
if key[1] == 'total_Tt':
avg=server[key[0], 'total_Tt'] / server[key[0], 'lines']
lines.append("%s.%s.avgTt %s %d" % (schema, key[0], avg, unix_time))
if key[1] == 'max_Tq':
lines.append("%s.%s.maxTq %s %d" % (schema, key[0], server[key], unix_time))
if key[1] == 'max_Tw':
lines.append("%s.%s.maxTw %s %d" % (schema, key[0], server[key], unix_time))
if key[1] == 'max_Tc':
lines.append("%s.%s.max_Tc %s %d" % (schema, key[0], server[key], unix_time))
if key[1] == 'max_Tr':
lines.append("%s.%s.maxTr %s %d" % (schema, key[0], server[key], unix_time))
if key[1] == 'max_Tt':
lines.append("%s.%s.maxTt %s %d" % (schema, key[0], server[key], unix_time))
message = '\n'.join(lines) + '\n' #all lines must end in a newline
if test == 0:
if LOG_STATS == 1:
f.write("sending message.....")
f.write(message)
sock.sendall(message)
#print message
else:
print message
#f.write(message)
f.write("sent.\n")
def run_total(total, new):
if (backend_srv, total) in server:
server[(backend_srv, total)]= server[(backend_srv, total)] + int(new)
else:
server[(backend_srv, total)]=int(new)
def max(max, new):
if (backend_srv, max) in server:
if int(new) > server[(backend_srv, max)]:
server[(backend_srv, max)]=int(new)
else:
server[(backend_srv, max)]=int(new)
################################################################################
#MAIN
################################################################################
try:
only_for=sys.argv[1]
delta=sys.argv[2]
check_vip=1
except:
pass
td=timedelta(seconds=+int(delta))
dtFirstLine=None
server={}
for line in sys.stdin:
#
# Parse Line
#
if 'GET' not in line and 'POST' not in line:
print "Not a GET or a POST? "
print line
continue
split_array=line.split() #split_array -- original split of line
log_time=split_array[6] #time -- the time of the event
backend=split_array[8].split("/") #the backend -- creative_backend/creative01
backend_srv=backend[1] #the adserver -- creative01
stats=split_array[9].split("/") #the stats -- 26/0/0/125/255
url_array=line.split("}") #the URL element
url=url_array[1].split()
ols=url[1].split("?")
one_level=ols[0].replace('/', '')
if one_level == '':
one_level='TOPLEVEL'
host=split_array[17].replace('{',' ').replace('|',' ').split()[0].split('.')
#print host
if len(host) > 3 or len(host) < 2: #has to be a hostname not 209.81
print 'ship: ' + host
continue
schema=host[0] + '.' + host[1] #graphite schema
try:
if check_vip == 1:
#print schema, only_for
if schema != only_for:
print "skipping " + schema, only_for
continue
except:
pass
#
#Get details of connections
#
Tq=stats[0]
Tw=stats[1]
Tc=stats[2]
Tr=stats[3]
Tt=stats[4]
if (Tw == "-1" or Tq == '-1' or Tc == '-1' or Tr == '-1' or Tt =='-1'):
if 'aborted' in server:
#f.write("ABORTED in SERVER %s" % server['aborted'])
server['aborted']= server['aborted'] + 1
else:
server['aborted']=1
continue
dtLine = datetime.strptime(log_time, "[%d/%b/%Y:%H:%M:%S.%f]")
int_unix_time=dtLine.strftime("%s")
unix_time=int(int_unix_time)
#
# Running Total
#
run_total('total_Tq', Tq)
run_total('total_Tc', Tc)
run_total('total_Tw', Tw)
run_total('total_Tr', Tr)
run_total('total_Tt', Tt)
#
# MAX's
#
max('max_Tq', Tq)
max('max_Tc', Tc)
max('max_Tw', Tw)
max('max_Tr', Tr)
max('max_Tt', Tt)
#
# Everything total
#
if 'hps' in server:
server['hps']=server['hps'] + 1
else:
server['hps']=server['hps'] = 1
if (backend_srv,'lines') in server:
server[(backend_srv,'lines')]=server[(backend_srv,'lines')] + 1
else:
server[(backend_srv,'lines')]= 1
if one_level in one_levels:
try:
server['one_level', one_level] = server['one_level',one_level] + 1
except KeyError:
server['one_level', one_level] = 1
#else:
#print "not reporting on: " + line
if dtFirstLine is None:
dtFirstLine=dtLine
dtStop=dtFirstLine + td
if dtLine > dtStop:
dtFirstLine=None
import time
now=int(time.time())
#pp.pprint(server)
rollup()
server={}
## 6 Tq '/' Tw '/' Tc '/' Tr '/' Tt* 10/0/30/69/109
#"Tq" total time in milliseconds spent waiting for the client to send a full HTTP request, not counting data.
#"Tc" total time in milliseconds spent waiting for the connection to establish to the final server, including retries.
#"Tw" total time in milliseconds spent waiting in the various queues.
#"Tr" total time in milliseconds spent waiting for the server to send a full HTTP response, not counting data.
#"Tt" is the total time in milliseconds elapsed between the accept and the last close
# 12 actconn '/' feconn '/' beconn '/' srv_conn '/' retries* 1/1/1/1/0
#f.write (server + Tq + "\n")
#0[ 'Aug',
#1 '28',
#2 '12:49:37',
#3 '10.10.106.202',
#4 'haproxy[28233]:',
#5 '174.62.109.81:54907',
#6 '[28/Aug/2012:12:49:37.487]',
#7 'frontend name',
#8 'backend name',
#9 '26/0/0/125/255',
#10 '200',
#11 '287',
#12 '-',
#13 '-',
#14 '----',
#15 '12/12/10/10/0',
# '0/0',
# 'acl name',
# '(iPad;',
# 'CPU',
# 'OS',
# '5_1_1',
# 'like',
# 'Mac',
# 'OS',
# 'X)',
# 'App}',
# '"GET',
# 'url'
# 'HTTP/1.1"']
| |
#pylint: disable=missing-docstring,invalid-name
from app import models
from app import utils
from app.constants import STUDENT_ROLE, STAFF_ROLE, VALID_ROLES
import json
SEED_OFFERING = "cal/cs61a/sp15"
def is_seeded():
is_seed = models.Course.offering == SEED_OFFERING
return bool(models.Course.query(is_seed).get())
def seed():
import os
import datetime
import random
from google.appengine.ext import ndb
def make_seed_course(creator):
return models.Course(
display_name="CS 61A",
institution="UC Soumya",
offering=SEED_OFFERING,
instructor=[creator.key])
def make_future_assignment(course, creator):
date = (datetime.datetime.now() + datetime.timedelta(days=365))
with open('app/seed/hog_template.py') as fp:
templates = {}
templates['hog.py'] = fp.read()
templates['hogq.scm'] = fp.read()
return models.Assignment(
name='cal/CS61A/sp15/proj1',
display_name="Hog",
points=20,
templates=json.dumps(templates),
creator=creator.key,
course=course.key,
max_group_size=4,
due_date=date,
lock_date=date,
)
# Will reject all scheme submissions
def make_past_assignment(course, creator):
date = (datetime.datetime.now() - datetime.timedelta(days=365))
with open('app/seed/scheme_templates/scheme.py') as sc, \
open('app/seed/scheme_templates/scheme_reader.py') as sr, \
open('app/seed/scheme_templates/tests.scm') as tests, \
open('app/seed/scheme_templates/questions.scm') as quest:
templates = {}
templates['scheme.py'] = sc.read(),
templates['scheme_reader.py'] = sr.read(),
templates['tests.scm'] = tests.read(),
templates['questsions.scm'] = quest.read(),
return models.Assignment(
name='cal/61A/fa14/proj4',
points=20,
display_name="Scheme",
templates=json.dumps(templates),
course=course.key,
creator=creator.key,
max_group_size=4,
due_date=date)
def make_hw_assignment(course, creator):
date = (datetime.datetime.now() + datetime.timedelta(days=2))
with open('app/seed/scheme_templates/scheme.py') as sc:
templates = {}
templates['scheme.py'] = sc.read(),
return models.Assignment(
name='cal/CS61A/sp15/hw1',
points=2,
display_name="Homework 1",
templates=json.dumps(templates),
course=course.key,
creator=creator.key,
max_group_size=4,
due_date=date)
def make_group(assign, members):
return models.Group(
member=[m.key for m in members],
assignment=assign.key
)
def make_invited_group(assign, members):
return models.Group(
member=[members[0].key],
invited=[members[1].key],
assignment=assign.key
)
def random_date():
days, seconds = random.randint(0, 12), random.randint(0, 86399)
delta = datetime.timedelta(days=days, seconds=seconds)
sdate = (datetime.datetime.now() - delta)
def make_seed_submission(assignment, submitter, final=False):
with open('app/seed/hog_modified.py') as fp:
messages = {}
messages['file_contents'] = {
'hog.py': fp.read(),
'hogq.scm': 'Blank Stuff',
'submit': final
}
g = models.User(
email=["test@example.com"],
is_admin=True
)
g.put()
score = models.Score(
score=88,
tag='test',
grader=g.key
)
score.put()
messages = [models.Message(kind=kind, contents=contents)
for kind, contents in messages.items()]
score = models.Score(
score=10
)
score.put()
backup = models.Backup(
messages=messages,
assignment=assignment.key,
submitter=submitter.key,
client_time=random_date())
backup.put()
return models.Submission(backup=backup.key, score=[score])
def make_seed_scheme_submission(assignment, submitter, final=False):
with open('app/seed/scheme.py') as sc, \
open('app/seed/scheme_reader.py') as sr, \
open('app/seed/tests.scm') as tests, \
open('app/seed/questions.scm') as quest:
messages = {}
messages['file_contents'] = {
'scheme.py': sc.read(),
'scheme_reader.py': sr.read(),
'tests.scm': tests.read(),
'questsions.scm': quest.read(),
'submit': final
}
messages = [models.Message(kind=kind, contents=contents)
for kind, contents in messages.items()]
backup = models.Backup(
messages=messages,
assignment=assignment.key,
submitter=submitter.key,
client_time=random_date())
backup.put()
return models.Submission(backup=backup.key)
def make_version(current_version):
return models.Version(
name='ok',
id='ok',
base_url='https://github.com/Cal-CS-61A-Staff/ok-client/releases/download',
versions=[current_version],
current_version=current_version
)
def make_queue(assignment, submissions, asignee):
queue = models.Queue(
assignment=assignment.key,
assigned_staff=[asignee.key],
owner=asignee.key)
queue = queue.put()
for subm in submissions:
backup = subm.backup.get()
group = None
if backup.submitter.get().get_group(assignment.key):
group = backup.submitter.get().get_group(assignment.key).key
fs = models.FinalSubmission(
assignment=assignment.key,
group=group,
submission=subm.key,
submitter=backup.submitter,
queue=queue)
fs.put()
def make_final_with_group(subm, assign, submitter, group):
fs = models.FinalSubmission(submission=subm.key,
assignment=assign.key,
submitter=submitter.key,
group=group.key)
fs.put()
return fs
def make_final(subm, assign, submitter):
fs = models.FinalSubmission(submission=subm.key,
assignment=assign.key,
submitter=submitter.key)
fs.put()
return fs
def score_seed_submission(final, score, msg, grader):
""" Add composition score """
score = models.Score(
tag='composition',
score=score,
message=msg,
grader=grader.key)
score.put()
subm = final.submission.get()
subm.score.append(score)
subm.put()
# Start putting things in the DB.
c = models.User(
email=["test@example.com"],
is_admin=True
)
c.put()
# Create a course
course = make_seed_course(c)
course.put()
a = models.User(
email=["dummy@admin.com"],
)
a.put()
models.Participant.add_role(a.key, course.key, STAFF_ROLE)
students = []
group_members = []
staff = []
for i in range(6):
s = models.User(
email=["partner" + str(i) + "@teamwork.com"],
)
s.put()
models.Participant.add_role(s.key, course.key, STUDENT_ROLE)
group_members += [s]
for i in range(9):
s = models.User(
email=["student" + str(i) + "@student.com"],
)
s.put()
models.Participant.add_role(s.key, course.key, STUDENT_ROLE)
students += [s]
for i in range(9):
s = models.User(
email=["grader" + str(i) + "@staff.com"],
)
s.put()
models.Participant.add_role(s.key, course.key, STAFF_ROLE)
staff += [s]
k = models.User(
email=["dummy2@admin.com"],
)
k.put()
models.Participant.add_role(k.key, course.key, STAFF_ROLE)
version = make_version('v1.3.0')
version.put()
version = make_version('v1.3.2')
version.put()
version = make_version('v1.3.15')
version.put()
# Put a few members on staff
course.instructor.append(c.key)
course.put()
course.instructor.append(a.key)
course.put()
# Create a few assignments
assignments = []
for _ in range(4):
assignments += [
make_future_assignment(course, c),
make_past_assignment(course, c),
make_hw_assignment(course, c)
]
for assign in assignments:
assign.put()
# Create submissions
subms = []
# Group submission
team1 = group_members[0:2]
g1 = make_group(assign, team1)
g1.put()
team2 = group_members[2:4]
g2 = make_invited_group(assign, team2)
g2.put()
team3 = group_members[4:6]
g3 = make_group(assign, team3)
g3.put()
# Have each member in the group submit one
for member in group_members:
subm = make_seed_submission(assign, member)
subm.put()
# for member in group_members:
# subm = make_seed_scheme_submission(assign2, member)
# subm.put()
group1_subm = make_seed_submission(assign, group_members[1])
group1_subm.put()
# Make team 1's submission final and score it.
final = make_final_with_group(group1_subm, assign, group_members[1], g1)
score_seed_submission(final, 2, "Nice job, group 1!", staff[8])
subms.append(group1_subm)
group3_subm = make_seed_submission(assign, group_members[5])
group3_subm.put()
# Make team 1's submission final and score it.
final3 = make_final_with_group(group3_subm, assign, group_members[5], g3)
score_seed_submission(final3, 1, "Awesome job, group 3!", staff[8])
subms.append(group3_subm)
# Make this one be a final submission though.
# subm = make_seed_submission(assign, group_members[1], True)
# subm.put()
# subms.append(subm)
# scheme final
# subm = make_seed_scheme_submission(assign2, group_members[1], True)
# subm.put()
# Now create indiviual submission
for i in range(9):
subm = make_seed_submission(assign, students[i])
subm.put()
#subms.append(subm)
subm = make_seed_submission(assign, students[i], True)
subm.put()
subms.append(subm)
# Make each individual submission final and score it.
final = make_final(subm, assign, students[i])
score_seed_submission(final, i, "Good job, student %s" % str(i), staff[i])
# Seed a queue. This should be auto-generated.
make_queue(assign, subms[:len(subms)//2], c)
make_queue(assign, subms[len(subms)//2:], k)
utils.add_to_grading_queues(assign.key)
| |
import pytest
from FeedUnit42v2 import Client, fetch_indicators, get_indicators_command, handle_multiple_dates_in_one_field, \
get_indicator_publication, get_attack_id_and_value_from_name, parse_indicators, parse_campaigns, \
parse_reports_and_report_relationships, create_attack_pattern_indicator, create_course_of_action_indicators, \
get_ioc_type, get_ioc_value, create_list_relationships, get_ioc_value_from_ioc_name, \
change_attack_pattern_to_stix_attack_pattern, DemistoException
from test_data.feed_data import INDICATORS_DATA, ATTACK_PATTERN_DATA, MALWARE_DATA, RELATIONSHIP_DATA, REPORTS_DATA, \
REPORTS_INDICATORS, ID_TO_OBJECT, INDICATORS_RESULT, CAMPAIGN_RESPONSE, CAMPAIGN_INDICATOR, COURSE_OF_ACTION_DATA, \
PUBLICATIONS, ATTACK_PATTERN_INDICATOR, COURSE_OF_ACTION_INDICATORS, RELATIONSHIP_OBJECTS, INTRUSION_SET_DATA, \
DUMMY_INDICATOR_WITH_RELATIONSHIP_LIST, STIX_ATTACK_PATTERN_INDICATOR, SUB_TECHNIQUE_INDICATOR, \
SUB_TECHNIQUE_DATA, INVALID_ATTACK_PATTERN_STRUCTURE
@pytest.mark.parametrize('command, args, response, length', [
(get_indicators_command, {'limit': 2, 'indicators_type': 'indicator'}, INDICATORS_DATA, 2),
(get_indicators_command, {'limit': 5, 'indicators_type': 'indicator'}, INDICATORS_DATA, 5)
]) # noqa: E124
def test_commands(command, args, response, length, mocker):
"""
Given
- get_indicators_command func
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- convert the result to human readable table
- create the context
validate the raw_response
"""
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', return_value=response)
command_results = command(client, args)
indicators = command_results.raw_response
assert len(indicators) == length
TYPE_TO_RESPONSE = {
'indicator': INDICATORS_DATA,
'report': REPORTS_DATA,
'attack-pattern': ATTACK_PATTERN_DATA,
'malware': MALWARE_DATA,
'campaign': CAMPAIGN_RESPONSE,
'relationship': RELATIONSHIP_DATA,
'course-of-action': COURSE_OF_ACTION_DATA,
'intrusion-set': INTRUSION_SET_DATA
}
TYPE_TO_RESPONSE_WIITH_INVALID_ATTACK_PATTERN_DATA = {
'indicator': INDICATORS_DATA,
'report': REPORTS_DATA,
'attack-pattern': INVALID_ATTACK_PATTERN_STRUCTURE,
'malware': MALWARE_DATA,
'campaign': CAMPAIGN_RESPONSE,
'relationship': RELATIONSHIP_DATA,
'course-of-action': COURSE_OF_ACTION_DATA,
'intrusion-set': INTRUSION_SET_DATA
}
def test_fetch_indicators_command(mocker):
"""
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate the amount of indicators fetched
Validate that the dummy indicator with the relationships list fetched
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client, create_relationships=True)
assert len(indicators) == 17
assert DUMMY_INDICATOR_WITH_RELATIONSHIP_LIST in indicators
def test_fetch_indicators_fails_on_invalid_attack_pattern_structure(mocker):
"""
Given
- Invalid attack pattern indicator structure
When
- fetching indicators
Then
- DemistoException is raised.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE_WIITH_INVALID_ATTACK_PATTERN_DATA[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
with pytest.raises(DemistoException, match=r"Failed parsing attack indicator"):
fetch_indicators(client, create_relationships=True)
def test_get_attack_id_and_value_from_name_on_invalid_indicator():
"""
Given
- Invalid attack indicator structure
When
- parsing the indicator name.
Then
- DemistoException is raised.
"""
with pytest.raises(DemistoException, match=r"Failed parsing attack indicator"):
get_attack_id_and_value_from_name({"name": "test"})
def test_feed_tags_param(mocker):
"""
Given
- fetch incidents command
- command args
- command raw response
When
- mock the feed tags param.
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate The value of the tags field.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client, ['test_tag'])
assert set(indicators[0].get('fields').get('tags')) == {'malicious-activity', 'test_tag'}
@pytest.mark.parametrize('field_name, field_value, expected_result', [
('created', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('created', '2019-04-25T20:53:07.719Z\n2019-04-25T20:53:07.814Z', '2019-04-25T20:53:07.719Z'),
('modified', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('modified', '2020-03-16T15:38:37.650Z\n2020-01-17T16:45:24.252Z', '2020-03-16T15:38:37.650Z'),
])
def test_handle_multiple_dates_in_one_field(field_name, field_value, expected_result):
"""
Given
- created / modified indicator field
When
- this field contains two dates
Then
- run the handle_multiple_dates_in_one_field
Validate The field contain one specific date.
"""
assert handle_multiple_dates_in_one_field(field_name, field_value) == expected_result
def test_get_indicator_publication():
"""
Given
- Indicator with external_reference field
When
- we extract this field to publications grid field
Then
- run the get_indicator_publication
Validate The grid field extracted successfully.
"""
assert get_indicator_publication(ATTACK_PATTERN_DATA[0]) == PUBLICATIONS
@pytest.mark.parametrize('indicator_name, expected_result', [
({"name": "T1564.004: NTFS File Attributes",
"x_mitre_is_subtechnique": True,
"x_panw_parent_technique_subtechnique": "Hide Artifacts: NTFS File Attributes"},
("T1564.004", "Hide Artifacts: NTFS File Attributes")),
({"name": "T1078: Valid Accounts"}, ("T1078", "Valid Accounts"))
])
def test_get_attack_id_and_value_from_name(indicator_name, expected_result):
"""
Given
- Indicator with name field
When
- we extract this field to ID and value fields
Then
- run the get_attack_id_and_value_from_name
Validate The ID and value fields extracted successfully.
"""
assert get_attack_id_and_value_from_name(indicator_name) == expected_result
def test_parse_indicators():
"""
Given
- list of IOCs in STIX format.
When
- we extract this IOCs list to Demisto format
Then
- run the parse_indicators
- Validate The IOCs list extracted successfully.
"""
assert parse_indicators(INDICATORS_DATA, [], '')[0] == INDICATORS_RESULT
def test_parse_reports():
"""
Given
- list of reports in STIX format.
When
- we extract this reports list to Demisto format
Then
- run the parse_reports
Validate The reports list extracted successfully.
"""
assert parse_reports_and_report_relationships(REPORTS_DATA, [], '') == REPORTS_INDICATORS
def test_parse_campaigns():
"""
Given
- list of campaigns in STIX format.
When
- we extract this campaigns list to Demisto format
Then
- run the parse_campaigns
Validate The campaigns list extracted successfully.
"""
assert parse_campaigns(CAMPAIGN_RESPONSE, [], '') == CAMPAIGN_INDICATOR
def test_create_attack_pattern_indicator():
"""
Given
- list of IOCs in STIX format.
When
- we extract this attack pattern list to Demisto format
Then
- run the attack_pattern_indicator
Validate The attack pattern list extracted successfully.
"""
assert create_attack_pattern_indicator(ATTACK_PATTERN_DATA, [], '', True) == ATTACK_PATTERN_INDICATOR
assert create_attack_pattern_indicator(ATTACK_PATTERN_DATA, [], '', False) == STIX_ATTACK_PATTERN_INDICATOR
assert create_attack_pattern_indicator(SUB_TECHNIQUE_DATA, [], '', True) == SUB_TECHNIQUE_INDICATOR
def test_create_course_of_action_indicators():
"""
Given
- list of course of action in STIX format.
When
- we extract this course of action list to Demisto format
Then
- run the create_course_of_action_indicators
Validate The course of action list extracted successfully.
"""
assert create_course_of_action_indicators(COURSE_OF_ACTION_DATA, [], '') == COURSE_OF_ACTION_INDICATORS
def test_get_ioc_type():
"""
Given
- IOC ID to get its type.
When
- we extract its type from the pattern field
Then
- run the get_ioc_type
Validate The IOC type extracted successfully.
"""
assert get_ioc_type('indicator--01a5a209-b94c-450b-b7f9-946497d91055', ID_TO_OBJECT) == 'IP'
assert get_ioc_type('indicator--fd0da09e-a0b2-4018-9476-1a7edd809b59', ID_TO_OBJECT) == 'URL'
def test_get_ioc_value():
"""
Given
- IOC ID to get its value.
When
- we extract its value from the name field
Then
- run the get_ioc_value
Validate The IOC value extracted successfully.
"""
assert get_ioc_value('indicator--01a5a209-b94c-450b-b7f9-946497d91055', ID_TO_OBJECT) == 'T111: Software Discovery'
assert get_ioc_value('indicator--fd0da09e-a0b2-4018-9476-1a7edd809b59', ID_TO_OBJECT) == 'Deploy XSOAR Playbook'
assert get_ioc_value('report--0f86dccd-29bd-46c6-83fd-e79ba040bf0', ID_TO_OBJECT) == '[Unit42 ATOM] Maze Ransomware'
assert get_ioc_value('attack-pattern--4bed873f-0b7d-41d4-b93a-b6905d1f90b0',
ID_TO_OBJECT) == "Virtualization/Sandbox Evasion: Time Based Evasion"
def test_create_list_relationships():
"""
Given
- list of relationships in STIX format.
When
- we extract this relationships list to Demisto format
Then
- run the create_list_relationships
Validate The relationships list extracted successfully.
"""
assert create_list_relationships(RELATIONSHIP_DATA, ID_TO_OBJECT) == RELATIONSHIP_OBJECTS
def test_get_ioc_value_from_ioc_name():
"""
Given
- IOC obj to get its value.
When
- we extract its value from the name field
Then
- run the get_ioc_value
Validate The IOC value extracted successfully.
"""
assert get_ioc_value_from_ioc_name({'name': "([file:name = 'blabla' OR file:name = 'blabla'] AND "
"[file:hashes.'SHA-256' = '4f75622c2dd839f'])"}) == "4f75622c2dd839f"
def test_change_attack_pattern_to_stix_attack_pattern():
assert change_attack_pattern_to_stix_attack_pattern({"type": "ind", "fields":
{"killchainphases": "kill chain", "description": "des"}}) == \
{"type": "STIX ind", "fields": {"stixkillchainphases": "kill chain", "stixdescription": "des"}}
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
Documentation: https://goto.google.com/perfkitbenchmarker-storage
"""
import json
import logging
import os
import re
import time
from perfkitbenchmarker import benchmark_spec as benchmark_spec_class
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.sample import PercentileCalculator # noqa
flags.DEFINE_enum('storage', benchmark_spec_class.GCP,
[benchmark_spec_class.GCP, benchmark_spec_class.AWS,
benchmark_spec_class.AZURE],
'storage provider (GCP/AZURE/AWS) to use.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations.')
flags.DEFINE_string('object_storage_credential_file', None,
'Directory of credential file.')
flags.DEFINE_string('boto_file_location', None,
'The location of the boto file.')
FLAGS = flags.FLAGS
# User a scratch disk here to simulate what most users would do when they
# use CLI tools to interact with the storage provider.
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': True,
'num_machines': 1}
AWS_CREDENTIAL_LOCATION = '.aws'
GCE_CREDENTIAL_LOCATION = '.config/gcloud'
AZURE_CREDENTIAL_LOCATION = '.azure'
DEFAULT_BOTO_LOCATION = '~/.boto'
OBJECT_STORAGE_CREDENTIAL_DEFAULT_LOCATION = {
benchmark_spec_class.GCP: '~/' + GCE_CREDENTIAL_LOCATION,
benchmark_spec_class.AWS: '~/' + AWS_CREDENTIAL_LOCATION,
benchmark_spec_class.AZURE: '~/' + AZURE_CREDENTIAL_LOCATION}
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
# The default number of iterations to run for the list consistency benchmark.
LIST_CONSISTENCY_ITERATIONS = 200
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p1', 'p5', 'p50', 'p90', 'p99', 'p99.9', 'average',
'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
DEFAULT_GCS_REGION = 'US-CENTRAL1'
def GetInfo():
return BENCHMARK_INFO
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _MakeAzureCommandSuffix(account_name, account_key, for_cli):
""" This function returns a suffix for Azure command.
Args:
account_name: The name of the Azure storage account.
account_key: The key to access the account.
for_cli: If true, the suffix can be passed to the Azure cli tool; if false,
the suffix created will be used to call our own test script for api-based
tests.
returns:
A string represents a command suffix.
"""
if for_cli:
return (' -a %s -k %s') % (account_name, account_key)
else:
return (' --azure_account=%s --azure_key=%s') % (account_name, account_key)
def ApiBasedBenchmarks(results, metadata, vm, storage, test_script_path,
bucket_name, regional_bucket_name=None,
azure_command_suffix=None):
"""This function contains all api-based benchmarks.
It uses the value of the global flag "object_storage_scenario" to
decide which scenario to run inside this function. The caller simply
invokes this function without having to worry about which scenario to
select.
Args:
vm: The vm being used to run the benchmark.
results: The results array to append to.
storage: The storage provider to run: S3 or GCS or Azure.
test_script_path: The complete path to the test script on the target VM.
bucket_name: The name of the bucket caller has created for this test.
regional_bucket_name: The name of the "regional" bucket, if applicable.
azure_command_suffix: A suffix for all Azure related test commands.
Raises:
ValueError: unexpected test outcome is found from the API test script.
"""
if FLAGS.object_storage_scenario == 'cli':
# User only wants to run the CLI based tests, do nothing here:
return
if (FLAGS.object_storage_scenario == 'all' or
FLAGS.object_storage_scenario == 'api_data'):
# One byte RW latency
buckets = [bucket_name]
if regional_bucket_name is not None:
buckets.append(regional_bucket_name)
for bucket in buckets:
one_byte_rw_cmd = ('%s --bucket=%s --storage_provider=%s '
'--scenario=OneByteRW') % (
test_script_path, bucket, storage)
if azure_command_suffix is not None:
one_byte_rw_cmd = ('%s %s') % (one_byte_rw_cmd, azure_command_suffix)
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s' % raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if bucket == regional_bucket_name:
sample_name = 'regional %s' % sample_name
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
# Single stream large object throughput metrics
single_stream_throughput_cmd = ('%s --bucket=%s --storage_provider=%s '
'--scenario=SingleStreamThroughput') % (
test_script_path,
bucket_name,
storage)
if azure_command_suffix is not None:
single_stream_throughput_cmd = ('%s %s') % (
single_stream_throughput_cmd, azure_command_suffix)
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s' % raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if len(result_string) > 0:
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
else:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
if (FLAGS.object_storage_scenario == 'all' or
FLAGS.object_storage_scenario == 'api_namespace'):
# list-after-write consistency metrics
list_consistency_cmd = ('%s --bucket=%s --storage_provider=%s '
'--iterations=%d --scenario=ListConsistency') % (
test_script_path,
bucket_name,
storage,
LIST_CONSISTENCY_ITERATIONS)
if azure_command_suffix is not None:
list_consistency_cmd = ('%s %s') % (list_consistency_cmd,
azure_command_suffix)
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s' % raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if len(result_string) > 0:
results.append(sample.Sample(metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
else:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def DeleteBucketWithRetry(vm, remove_content_cmd, remove_bucket_cmd):
""" Delete a bucket and all its contents robustly.
First we try to recursively delete its content with retries, if failed,
we raise the error. If successful, we move on to remove the empty bucket.
Due to eventual consistency issues, some provider may still think the
bucket is not empty, so we will add a few more retries when we attempt to
remove the empty bucket.
Args:
vm: the vm to run the command.
remove_content_cmd: the command line to run to remove objects in the
bucket.
remove_bucket_cmd: the command line to run to remove the empty bucket.
Raises:
BucketRemovalError: when we failed multiple times to remove the content
or the bucket itself.
"""
retry_limit = 0
for cmd in [remove_content_cmd, remove_bucket_cmd]:
if cmd is remove_content_cmd:
retry_limit = CONTENT_REMOVAL_RETRY_LIMIT
else:
retry_limit = BUCKET_REMOVAL_RETRY_LIMIT
removal_successful = False
logging.info('Performing removal action, cmd is %s', cmd)
for i in range(retry_limit):
try:
vm.RemoteCommand(cmd)
removal_successful = True
logging.info('Successfully performed the removal operation.')
break
except Exception as e:
logging.error('Failed to perform the removal op. Number '
'of attempts: %d. Error is %s', i + 1, e)
time.sleep(RETRY_WAIT_INTERVAL_SECONDS)
pass
if not removal_successful:
if cmd is remove_content_cmd:
logging.error('Exceeded max retry limit for removing the content of '
'bucket. But we will try to delete the bucket anyway.')
else:
logging.error('Exceeded max retry limit for removing the empty bucket')
raise BucketRemovalError('Failed to remove the bucket')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(DATA_FILE)
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def _CliBasedTests(output_results, metadata, vm, iteration_count,
clean_up_bucket_cmd, upload_cmd,
cleanup_local_temp_cmd, download_cmd):
""" Performs tests via cli tools
We will upload and download a set of files from/to a local directory via
cli tools and observe the throughput.
Args:
output_results: The collection to put results in.
metadata: The metadata to be included in the result.
vm: The vm to run the tests.
iteration_count: The number of iterations to run for this test.
clean_up_bucket_cmd: The command to run to cleanup the bucket.
upload_cmd: The command to run to upload the objects.
cleanup_local_temp_cmd: The command to run to cleanup the local temp dir.
download_cmd: The command to run to download the content.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
if (FLAGS.object_storage_scenario != 'all' and
FLAGS.object_storage_scenario != 'cli'):
# User does not want to run this scenario, do nothing.
return
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
for _ in range(iteration_count):
vm.RemoteCommand(clean_up_bucket_cmd, ignore_failure=True)
upload_successful = False
try:
_, res = vm.RemoteCommand(upload_cmd)
upload_successful = True
except:
logging.info('failed to upload, skip this iteration.')
pass
if upload_successful:
logging.debug(res)
throughput = DATA_SIZE_IN_MBITS / vm_util.ParseTimeCommandResult(res)
# Output some log traces to show we are making progress
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
download_successful = False
vm.RemoteCommand(cleanup_local_temp_cmd, ignore_failure=True)
try:
_, res = vm.RemoteCommand(download_cmd)
download_successful = True
except:
logging.info('failed to download, skip this iteration.')
pass
if download_successful:
logging.debug(res)
throughput = DATA_SIZE_IN_MBITS / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
_AppendPercentilesToResults(output_results,
cli_upload_results,
UPLOAD_THROUGHPUT_VIA_CLI,
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
DOWNLOAD_THROUGHPUT_VIA_CLI,
THROUGHPUT_UNIT,
metadata)
class S3StorageBenchmark(object):
"""S3 version of storage benchmark."""
def Prepare(self, vm):
"""Prepare vm with AWS s3 tool and create a bucket using vm.
Documentation: http://aws.amazon.com/cli/
Args:
vm: The vm being used to run the benchmark.
"""
vm.RemoteCommand('sudo pip install awscli')
vm.PushFile(FLAGS.object_storage_credential_file, AWS_CREDENTIAL_LOCATION)
vm.PushFile(FLAGS.boto_file_location, DEFAULT_BOTO_LOCATION)
self.bucket_name = 'pkb%s' % FLAGS.run_uri
vm.RemoteCommand(
'aws s3 mb s3://%s --region=us-east-1' % self.bucket_name)
def Run(self, vm, metadata):
"""Run upload/download on vm with s3 tool.
Args:
vm: The vm being used to run the benchmark.
metadata: the metadata to be stored with the results.
Returns:
A list of lists containing results of the tests. Each scenario outputs
results to a list of the following format:
name of the scenario, value, unit of the value, metadata
e.g.,
'one byte object upload latency p50', 0.800, 'seconds', 'storage=gcs'
Then the final return value is the list of the above list that reflect
the results of all scenarios run here.
"""
results = []
scratch_dir = vm.GetScratchDir()
# CLI tool based tests.
clean_up_bucket_cmd = 'aws s3 rm s3://%s --recursive' % self.bucket_name
upload_cmd = 'time aws s3 sync %s/run/data/ s3://%s/' % (scratch_dir,
self.bucket_name)
cleanup_local_temp_cmd = 'rm %s/run/temp/*' % scratch_dir
download_cmd = 'time aws s3 sync s3://%s/ %s/run/temp/' % (
self.bucket_name, scratch_dir)
_CliBasedTests(results, metadata, vm, CLI_TEST_ITERATION_COUNT,
clean_up_bucket_cmd, upload_cmd, cleanup_local_temp_cmd,
download_cmd)
# Now tests the storage provider via APIs
test_script_path = '%s/run/%s' % (scratch_dir, API_TEST_SCRIPT)
ApiBasedBenchmarks(results, metadata, vm, 'S3', test_script_path,
self.bucket_name)
return results
def Cleanup(self, vm):
"""Clean up S3 bucket and uninstall packages on vm.
Args:
vm: The vm needs cleanup.
"""
remove_content_cmd = 'aws s3 rm s3://%s --recursive' % self.bucket_name
remove_bucket_cmd = 'aws s3 rb s3://%s' % self.bucket_name
DeleteBucketWithRetry(vm, remove_content_cmd, remove_bucket_cmd)
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall awscli')
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall python-gflags')
class AzureBlobStorageBenchmark(object):
"""Azure Blob version of storage benchmark."""
def Prepare(self, vm):
"""Prepare vm with Azure CLI tool and create a storage container using vm.
Documentation: http://azure.microsoft.com/en-us/documentation/articles/
xplat-cli/
Args:
vm: The vm being used to run the benchmark.
"""
vm.Install('node_js')
vm.RemoteCommand('sudo npm install azure-cli -g')
vm.PushFile(FLAGS.object_storage_credential_file, AZURE_CREDENTIAL_LOCATION)
vm.RemoteCommand(
'azure storage account create --type ZRS -l \'East US\' ''"pkb%s"' %
(FLAGS.run_uri), ignore_failure=False)
vm.azure_account = ('pkb%s' % FLAGS.run_uri)
output, _ = (
vm.RemoteCommand(
'azure storage account keys list %s' %
vm.azure_account))
key = re.findall(r'Primary:* (.+)', output)
vm.azure_key = key[0]
azure_command_suffix = _MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
True)
vm.RemoteCommand('azure storage container create pkb%s %s' % (
FLAGS.run_uri, azure_command_suffix))
self.bucket_name = 'pkb%s' % FLAGS.run_uri
vm.RemoteCommand('azure storage blob list %s %s' % (
self.bucket_name, azure_command_suffix))
def Run(self, vm, metadata):
"""Run upload/download on vm with Azure CLI tool.
Args:
vm: The vm being used to run the benchmark.
metadata: the metadata to be stored with the results.
Returns:
A list of lists containing results of the tests. Each scenario outputs
results to a list of the following format:
name of the scenario, value, unit of the value, metadata
e.g.,
'one byte object upload latency p50', 0.800, 'seconds', 'storage=gcs'
Then the final return value is the list of the above list that reflect
the results of all scenarios run here.
"""
results = []
# CLI tool based tests.
scratch_dir = vm.GetScratchDir()
test_script_path = '%s/run/%s' % (scratch_dir, API_TEST_SCRIPT)
cleanup_bucket_cmd = ('%s --bucket=%s --storage_provider=AZURE '
' --scenario=CleanupBucket %s' %
(test_script_path,
self.bucket_name,
_MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
False)))
upload_cmd = ('time for i in {0..99}; do azure storage blob upload '
'%s/run/data/file-$i.dat %s %s; done' %
(scratch_dir,
self.bucket_name,
_MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
True)))
cleanup_local_temp_cmd = 'rm %s/run/temp/*' % scratch_dir
download_cmd = ('time for i in {0..99}; do azure storage blob download '
'%s file-$i.dat %s/run/temp/file-$i.dat %s; done' % (
self.bucket_name,
scratch_dir,
_MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
True)))
_CliBasedTests(results, metadata, vm, CLI_TEST_ITERATION_COUNT_AZURE,
cleanup_bucket_cmd, upload_cmd, cleanup_local_temp_cmd,
download_cmd)
ApiBasedBenchmarks(results, metadata, vm, 'AZURE', test_script_path,
self.bucket_name, regional_bucket_name=None,
azure_command_suffix=_MakeAzureCommandSuffix(
vm.azure_account, vm.azure_key, False))
return results
def Cleanup(self, vm):
"""Clean up Azure storage container and uninstall packages on vm.
Args:
vm: The vm needs cleanup.
"""
test_script_path = '%s/run/%s' % (vm.GetScratchDir(), API_TEST_SCRIPT)
remove_content_cmd = ('%s --bucket=%s --storage_provider=AZURE '
' --scenario=CleanupBucket %s' %
(test_script_path, self.bucket_name,
_MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
False)))
remove_bucket_cmd = ('azure storage container delete -q %s %s' % (
self.bucket_name,
_MakeAzureCommandSuffix(vm.azure_account,
vm.azure_key,
True)))
DeleteBucketWithRetry(vm, remove_content_cmd, remove_bucket_cmd)
vm.RemoteCommand('azure storage account delete -q pkb%s' %
FLAGS.run_uri)
class GoogleCloudStorageBenchmark(object):
"""Google Cloud Storage version of storage benchmark."""
def Prepare(self, vm):
"""Prepare vm with gsutil tool and create a bucket using vm.
Args:
vm: The vm being used to run the benchmark.
"""
vm.Install('wget')
vm.RemoteCommand(
'wget '
'https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz')
vm.RemoteCommand('tar xvf google-cloud-sdk.tar.gz')
vm.RemoteCommand('bash ./google-cloud-sdk/install.sh '
'--disable-installation-options '
'--usage-report=false '
'--rc-path=.bash_profile '
'--path-update=true '
'--bash-completion=true')
try:
vm.RemoteCommand('mkdir .config')
except errors.VirtualMachine.RemoteCommandError:
# If ran on existing machines, .config folder may already exists.
pass
vm.PushFile(FLAGS.object_storage_credential_file, '.config/')
vm.PushFile(FLAGS.boto_file_location, DEFAULT_BOTO_LOCATION)
vm.gsutil_path, _ = vm.RemoteCommand('which gsutil', login_shell=True)
vm.gsutil_path = vm.gsutil_path.split()[0]
self.bucket_name = 'pkb%s' % FLAGS.run_uri
vm.RemoteCommand('%s mb gs://%s' % (vm.gsutil_path, self.bucket_name))
self.regional_bucket_name = '%s-%s' % (self.bucket_name,
DEFAULT_GCS_REGION.lower())
vm.RemoteCommand('%s mb -c DRA -l %s gs://%s' % (vm.gsutil_path,
DEFAULT_GCS_REGION,
self.regional_bucket_name))
def Run(self, vm, metadata):
"""Run upload/download on vm with gsutil tool.
Args:
vm: The vm being used to run the benchmark.
metadata: the metadata to be stored with the results.
Returns:
A list of lists containing results of the tests. Each scenario outputs
results to a list of the following format:
name of the scenario, value, unit of the value, metadata
e.g.,
'one byte object upload latency p50', 0.800, 'seconds', 'storage=gcs'
Then the final return value is the list of the above list that reflect
the results of all scenarios run here.
"""
results = []
# CLI tool based tests.
scratch_dir = vm.GetScratchDir()
clean_up_bucket_cmd = '%s rm gs://%s/*' % (vm.gsutil_path, self.bucket_name)
upload_cmd = 'time %s -m cp %s/run/data/* gs://%s/' % (vm.gsutil_path,
scratch_dir,
self.bucket_name)
cleanup_local_temp_cmd = 'rm %s/run/temp/*' % scratch_dir
download_cmd = 'time %s -m cp gs://%s/* %s/run/temp/' % (vm.gsutil_path,
self.bucket_name,
scratch_dir)
_CliBasedTests(results, metadata, vm, CLI_TEST_ITERATION_COUNT,
clean_up_bucket_cmd, upload_cmd, cleanup_local_temp_cmd,
download_cmd)
# API-based benchmarking of GCS
test_script_path = '%s/run/%s' % (scratch_dir, API_TEST_SCRIPT)
ApiBasedBenchmarks(results, metadata, vm, 'GCS', test_script_path,
self.bucket_name, self.regional_bucket_name)
return results
def Cleanup(self, vm):
"""Clean up Google Cloud Storage bucket and uninstall packages on vm.
Args:
vm: The vm needs cleanup.
"""
for bucket in [self.bucket_name, self.regional_bucket_name]:
remove_content_cmd = '%s -m rm -r gs://%s/*' % (vm.gsutil_path,
bucket)
remove_bucket_cmd = '%s rb gs://%s' % (vm.gsutil_path, bucket)
DeleteBucketWithRetry(vm, remove_content_cmd, remove_bucket_cmd)
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall python-gflags')
OBJECT_STORAGE_BENCHMARK_DICTIONARY = {
benchmark_spec_class.GCP: GoogleCloudStorageBenchmark(),
benchmark_spec_class.AWS: S3StorageBenchmark(),
benchmark_spec_class.AZURE: AzureBlobStorageBenchmark()}
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
if not FLAGS.object_storage_credential_file:
FLAGS.object_storage_credential_file = (
OBJECT_STORAGE_CREDENTIAL_DEFAULT_LOCATION[
FLAGS.storage])
FLAGS.object_storage_credential_file = os.path.expanduser(
FLAGS.object_storage_credential_file)
if not (
os.path.isfile(FLAGS.object_storage_credential_file) or os.path.isdir(
FLAGS.object_storage_credential_file)):
raise errors.Benchmarks.MissingObjectCredentialException(
'Credential cannot be found in %s',
FLAGS.object_storage_credential_file)
if not FLAGS.boto_file_location:
FLAGS.boto_file_location = DEFAULT_BOTO_LOCATION
FLAGS.boto_file_location = os.path.expanduser(FLAGS.boto_file_location)
if not os.path.isfile(FLAGS.boto_file_location):
if FLAGS.storage is not benchmark_spec_class.AZURE:
raise errors.Benchmarks.MissingObjectCredentialException(
'Boto file cannot be found in %s but it is required for gcs or s3.',
FLAGS.boto_file_location)
vms[0].Install('pip')
vms[0].RemoteCommand('sudo pip install python-gflags==2.0')
vms[0].RemoteCommand('sudo pip install azure')
vms[0].Install('gcs_boto_plugin')
OBJECT_STORAGE_BENCHMARK_DICTIONARY[FLAGS.storage].Prepare(vms[0])
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Prepare data on vm, create a run directory on scratch drive, and add
# permission.
scratch_dir = vms[0].GetScratchDir()
vms[0].RemoteCommand('sudo mkdir %s/run/' % scratch_dir)
vms[0].RemoteCommand('sudo chmod 777 %s/run/' % scratch_dir)
vms[0].RemoteCommand('sudo mkdir %s/run/temp/' % scratch_dir)
vms[0].RemoteCommand('sudo chmod 777 %s/run/temp/' % scratch_dir)
file_path = data.ResourcePath(DATA_FILE)
vms[0].PushFile(file_path, '%s/run/' % scratch_dir)
api_test_script_path = data.ResourcePath(API_TEST_SCRIPT)
vms[0].PushFile(api_test_script_path, '%s/run/' % scratch_dir)
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
metadata = {'storage provider': FLAGS.storage}
vms = benchmark_spec.vms
# The client tool based tests requires some provisioning on the VMs first.
vms[0].RemoteCommand(
'cd %s/run/; bash cloud-storage-workload.sh' % vms[0].GetScratchDir())
results = OBJECT_STORAGE_BENCHMARK_DICTIONARY[FLAGS.storage].Run(vms[0],
metadata)
print results
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
OBJECT_STORAGE_BENCHMARK_DICTIONARY[FLAGS.storage].Cleanup(vms[0])
vms[0].RemoteCommand('rm -rf %s/run/' % vms[0].GetScratchDir())
| |
from __future__ import print_function
import sys; sys.path.append('contrib')
import traceback as tb
import inspect
import marshal
import pickle
import os
from random import randint
"""
Distant - RPC over SSH with Public Key Authentication
version: 2019-01-04
"""
# TODO rename contrib ??? -> site_pkg -> site_repo -> pkg_repo -> repo
# ---[ API LAYER ]--------------------------------------------------------------
def setup(host, cfg={}, verbose=True):
return call(host, None, None, cfg=cfg, verbose=verbose, setup=True)
def call(host, fun, data, cfg={}, verbose=False, setup=False):
"""Call fun(data) on remote host and return the results or raise the exception"""
ssh = cfg_get(cfg, 'ssh', 'ssh', host=host)
rsync = cfg_get(cfg, 'rsync', 'rsync -vr', host=host)
full_host = cfg_get_host(cfg, host)
remote_contrib = cfg_get(cfg, 'remote_contrib')
local_contrib = cfg.get('local_contrib')
work = cfg.get('work')
if fun:
assert fun.__name__ != '<lambda>'
name = str(time()).replace('.','_')
path = '{}/{}_{}_{}.job'.format(work,host,name,randint(100000,999999))
save_job(path, fun, data)
else:
path = None
return remote_run(path, full_host,
ssh=ssh, rsync=rsync,
remote_contrib=remote_contrib,
local_contrib=local_contrib,
verbose=verbose, setup=setup)
# ---[ INTERNAL LAYER ]---------------------------------------------------------
def remote_run(path, host,
ssh, rsync,
remote_contrib=None,
local_contrib=None,
verbose=False,
setup=False,
remove_local_files=True):
""""""
assert remote_contrib
assert ssh
assert host
# init host
if setup:
assert local_contrib
assert rsync
cmd = ssh+' '+host+' mkdir -p '+remote_contrib
run_cmd(cmd, verbose=verbose)
remote_contrib_parent = remote_contrib.rsplit('/',2 if remote_contrib.endswith('/') else 1)[0]
cmd = rsync+' '+local_contrib+' '+host+':'+remote_contrib_parent
run_cmd(cmd, verbose=verbose)
# empty job - just for init host
if not path: return None
# run job
out_path = path[:-3]+'out'
with open(path,'r') as fi:
with open(out_path,'w') as fo:
cmd = ssh+' '+host+" PYTHONPATH={} python -m distant run2".format(remote_contrib)
run_cmd(cmd, stdin=fi, stdout=fo, verbose=False)
# load output
with open(out_path,'r') as f:
result = marshal.load(f)
exc = marshal.load(f)
# remove local files
if remove_local_files:
os.remove(path)
os.remove(out_path)
if exc:
print(exc)
raise Exception('RemoteError')
else:
return result
# ---[ CORE LAYER ]-------------------------------------------------------------
m_ver = 0 # marshal version
p_ver = 0 # pickle version
def save_job(path, fun, data, meta={}):
"""Store job (function + data + metadata) in a file"""
if not meta: meta={}
with open(path,'wb') as f:
# meta
pickle.dump(meta, f, p_ver)
# function
if True:
# omit problems with pickle.dump(fun, f)
src = '\n' * (fun.__code__.co_firstlineno - 1) # preserve line numbers
src += inspect.getsource(fun)
pickle.dump((
fun.__name__,
src,
fun.__code__.co_filename
), f, p_ver)
# data
marshal.dump(data, f, m_ver)
def run(path):
"""Run job from input file and store results in output file"""
out_path = path[:-3]+'out'
with open(path,'rb') as fi:
with open(out_path,'wb') as fo:
out = _run(fi, fo)
return out
def run2():
"""Run job from stdin and store results in stdout"""
return _run(sys.stdin, sys.stdout)
def _run(fi,fo):
# load
f = fi
meta = pickle.load(f) # TODO globals ???
if 0:
fun = pickle.load(f)
else:
name,src,co_filename = pickle.load(f)
code = compile(src,co_filename,'exec')
exec(code)
fun = locals()[name]
data = marshal.load(f)
# call
try:
result = fun(data)
exc = ''
except Exception as e:
result = None
exc = tb.format_exc()
# output
f = fo
marshal.dump(result, f, m_ver)
marshal.dump(exc, f, m_ver)
return result,exc
# ---[ UTILS ]------------------------------------------------------------------
import subprocess as sp
from time import time
# TODO cleanup the code
def run_cmd(cmd,stdin=None,stdout=None,stderr=None,verbose=True, shell=True):
if verbose:
t0 = time()
print('CMD:',cmd)
sys.stdout.flush()
p = sp.Popen(cmd,stdin=stdin,shell=shell)
print('PID:',p.pid,'\n')
sys.stdout.flush()
rc = p.wait()
if rc:
sys.stdout.flush()
sys.stderr.flush()
raise Exception('RC != 0')
print('TIME: {:.2f}s'.format(time()-t0))
print()
elif stdout is None:
p = sp.Popen(cmd, stdin=stdin, stdout=sp.PIPE, stderr=sp.PIPE, shell=shell)
out,err = p.communicate()
rc = p.returncode
if rc:
print(err)
raise Exception('RC != 0')
else:
print('CMD:',cmd)
p = sp.Popen(cmd, stdin=stdin, stdout=stdout, stderr=sp.PIPE, shell=shell)
out,err = p.communicate()
rc = p.returncode
if rc:
print(err)
raise Exception('RC != 0')
def cfg_get(cfg, key, default=None, host=None):
if key+'_host' in cfg and host in cfg[key+'_host']:
return cfg[key+'_host'][host]
else:
return cfg.get(key,default)
def cfg_get_host(cfg, host):
if 'host' in cfg:
return cfg['host'].get(host,host)
else:
return host
# ------------------------------------------------------------------------------
if __name__=="__main__":
mode = sys.argv[1]
if mode == 'run':
path = sys.argv[2]
out = run(path)
print(out)
else:
run2()
| |
import click
import string
import random
import logging
import json
import requests
from hashlib import md5
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qs
from bs4 import BeautifulSoup
from mechanize import Browser
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from ips_vagrant.common import cookiejar, byteify
from ips_vagrant.common.progress import ProgressBar, Echo
from ips_vagrant.installer.dev_tools.latest import DevToolsInstaller
version = None
# noinspection PyMethodMayBeStatic
class Installer(object):
# License fields
FIELD_LICENSE_KEY = 'lkey'
# Server Detail fields
FIELD_SERVER_SQL_HOST = 'sql_host'
FIELD_SERVER_SQL_USER = 'sql_user'
FIELD_SERVER_SQL_PASS = 'sql_pass'
FIELD_SERVER_SQL_DATABASE = 'sql_database'
# Admin fields
FIELD_ADMIN_USER = 'admin_user'
FIELD_ADMIN_PASS = 'admin_pass1'
FIELD_ADMIN_PASS_CONFIRM = 'admin_pass2'
FIELD_ADMIN_EMAIL = 'admin_email'
def __init__(self, ctx, site, force=False):
"""
Initialize a new Installer instance
@type ctx: ips_vagrant.cli.Context
@param site: The IPS Site we are installing
@type site: ips_vagrant.models.sites.Site
@param force: Overwrite existing files / databases
@type force: bool
"""
self.log = logging.getLogger('ipsv.installer')
self.ctx = ctx
self.force = force
self._previous_title = None
self.url = '{scheme}://{host}/admin/install'.format(
scheme='https' if site.ssl else 'http', host=site.domain.name
)
self.site = site
self.mysql = create_engine('mysql://root:secret@localhost')
self._sessions = {}
self.cookiejar = cookiejar()
self.cookies = {cookie.name: cookie.value for cookie in self.cookiejar}
self.browser = Browser()
self.browser.set_cookiejar(self.cookiejar)
def _check_title(self, title):
"""
If we're on the same page, we got an error and need to raise an exception
@type title: str
@raise InstallationError: Title matches the previous page requests title (We're on the same page)
"""
self.log.info('Installation page loaded: %s', title)
if self._previous_title and title == self._previous_title:
raise InstallationError('Unexpected page error')
self._previous_title = title
def start(self):
"""
Start the installation wizard
"""
self.log.debug('Starting the installation process')
self.browser.open(self.url)
continue_link = next(self.browser.links(text_regex='Start Installation'))
self.browser.follow_link(continue_link)
self.system_check()
def system_check(self):
"""
System requirements check
"""
self._check_title(self.browser.title())
p = Echo('Running system check...')
rsoup = BeautifulSoup(self.browser.response().read(), "html.parser")
# Check for any errors
errors = []
for ul in rsoup.find_all('ul', {'class': 'ipsList_checks'}):
for li in ul.find_all('li', {'class': 'fail'}):
errors.append(li.text)
if errors:
raise InstallationError(errors)
# Continue
continue_link = next(self.browser.links(text_regex='Continue'))
p.done()
self.browser.follow_link(continue_link)
self.license()
def license(self):
"""
Submit our license to IPS' servers
"""
p = Echo('Submitting license key...')
self._check_title(self.browser.title())
self.browser.select_form(nr=0)
# Set the fields
self.browser.form[self.FIELD_LICENSE_KEY] = '{license}-TESTINSTALL'.format(license=self.site.license_key)
self.browser.find_control('eula_checkbox').items[0].selected = True # TODO: User prompt?
# Submit the request
self.log.debug('Submitting our license')
self.browser.submit()
self.log.debug('Response code: %s', self.browser.response().code)
p.done()
self.applications()
def applications(self):
"""
Select the applications to install (currently hardcoded to install all applications)
"""
# Check for license submission errors
try:
self._check_title(self.browser.title())
except InstallationError:
rsoup = BeautifulSoup(self.browser.response().read(), "html.parser")
error = rsoup.find('li', id='license_lkey').find('span', {'class': 'ipsType_warning'}).text
raise InstallationError(error)
# TODO: Make this configurable
p = Echo('Setting applications to install...')
self.browser.select_form(nr=0)
self.browser.submit()
p.done()
self.server_details()
def server_details(self):
"""
Input server details (database information, etc.)
"""
self._check_title(self.browser.title())
p = Echo('Creating MySQL database...')
# Create the database
md5hex = md5(self.site.domain.name + self.site.slug).hexdigest()
db_name = 'ipsv_{md5}'.format(md5=md5hex)
# MySQL usernames are limited to 16 characters max
db_user = 'ipsv_{md5}'.format(md5=md5hex[:11])
rand_pass = ''.join(random.SystemRandom()
.choice(string.ascii_letters + string.digits) for _ in range(random.randint(16, 24)))
db_pass = rand_pass
try:
self.mysql.execute('CREATE DATABASE `{db}`'.format(db=db_name))
except SQLAlchemyError:
if not self.force:
click.confirm('A previous database for this installation already exists.\n'
'Would you like to drop it now? The installation will be aborted if you do not',
abort=True)
self.log.info('Dropping existing database: {db}'.format(db=db_name))
self.mysql.execute('DROP DATABASE IF EXISTS `{db}`'.format(db=db_name))
self.mysql.execute('DROP USER `{u}`'.format(u=db_user))
self.mysql.execute('CREATE DATABASE `{db}`'.format(db=db_name))
self.mysql.execute("GRANT ALL ON {db}.* TO '{u}'@'localhost' IDENTIFIED BY '{p}'"
.format(db=db_name, u=db_user, p=db_pass))
# Save the database connection information
self.site.db_host = 'localhost'
self.site.db_name = db_name
self.site.db_user = db_user
self.site.db_pass = db_pass
self.ctx.db.commit()
self.log.debug('MySQL Database Name: %s', db_name)
self.log.debug('MySQL Database User: %s', db_user)
self.log.debug('MySQL Database Password: %s', db_pass)
# Set form fields and submit
self.browser.select_form(nr=0)
self.browser.form[self.FIELD_SERVER_SQL_HOST] = 'localhost'
self.browser.form[self.FIELD_SERVER_SQL_USER] = db_user
self.browser.form[self.FIELD_SERVER_SQL_PASS] = db_pass
self.browser.form[self.FIELD_SERVER_SQL_DATABASE] = db_name
self.browser.submit()
p.done()
self.admin()
def admin(self):
"""
Provide admin login credentials
"""
self._check_title(self.browser.title())
self.browser.select_form(nr=0)
# Get the admin credentials
prompted = []
user = self.ctx.config.get('User', 'AdminUser')
if not user:
user = click.prompt('Admin display name')
prompted.append('user')
password = self.ctx.config.get('User', 'AdminPass')
if not password:
password = click.prompt('Admin password', hide_input=True, confirmation_prompt='Confirm admin password')
prompted.append('password')
email = self.ctx.config.get('User', 'AdminEmail')
if not email:
email = click.prompt('Admin email')
prompted.append('email')
self.browser.form[self.FIELD_ADMIN_USER] = user
self.browser.form[self.FIELD_ADMIN_PASS] = password
self.browser.form[self.FIELD_ADMIN_PASS_CONFIRM] = password
self.browser.form[self.FIELD_ADMIN_EMAIL] = email
Echo('Submitting admin information...').done()
if len(prompted) >= 3:
save = click.confirm('Would you like to save and use these admin credentials for future installations?')
if save:
self.log.info('Saving admin login credentials')
self.ctx.config.set('User', 'AdminUser', user)
self.ctx.config.set('User', 'AdminPass', password)
self.ctx.config.set('User', 'AdminEmail', email)
with open(self.ctx.config_path, 'wb') as cf:
self.ctx.config.write(cf)
self.install()
def _start_install(self):
"""
Start the installation
"""
self.browser.submit()
self._check_title(self.browser.title())
def _get_mr_link(self):
"""
Get the MultipleRedirect URL
@rtype: str
"""
rsoup = BeautifulSoup(self.browser.response().read(), "html.parser")
mr_link = rsoup.find('a', {'class': 'button'}).get('href')
self.log.debug('MultipleRedirect link: %s', mr_link)
return mr_link
def _ajax(self, url, method='get', params=None, load_json=True, raise_request=True):
"""
Perform an Ajax request
@type url: str
@type method: str
@type params: dict or None
@type load_json: bool
@return: Tuple with the decoded JSON response and actual response, or just the response if load_json is False
@rtype: requests.Response or tuple of (dict or list, requests.Response)
"""
if 'ajax' in self._sessions:
ajax = self._sessions['ajax']
else:
self.log.debug('Instantiating a new Ajax session')
ajax = requests.Session()
ajax.headers.update({'X-Requested-With': 'XMLHttpRequest'})
ajax.cookies.update(cookiejar())
ajax.verify = False
self._sessions['ajax'] = ajax
response = ajax.request(method, url, params)
self.log.debug('Ajax response: %s', response.text)
if raise_request:
response.raise_for_status()
if load_json:
return byteify(json.loads(response.text)), response
return response
def _request(self, url, method='get', params=None, raise_request=True):
"""
Perform a standard HTTP request
@type url: str
@type method: str
@type params: dict or None
@param raise_request: Raise exceptions for HTTP status errors
@type raise_request: bool
@rtype: requests.Response
"""
if 'http' in self._sessions:
http = self._sessions['http']
else:
self.log.debug('Instantiating a new HTTP session')
http = requests.Session()
http.cookies.update(cookiejar())
http.verify = False
self._sessions['http'] = http
response = http.request(method, url, params)
self.log.debug('HTTP response code: %s', response.status_code)
if raise_request:
response.raise_for_status()
return response
def _parse_response(self, url, json_response):
"""
Parse response data and return the next request URL
@type url: str
@type json_response: list or dict
@rtype: str
"""
parts = list(urlparse(url))
query = parse_qs(parts[4])
query['mr'] = str(json_response[0]).replace('\'', '"')
parts[4] = urlencode(query, True)
return urlunparse(parts)
def _get_stage(self, json_response):
"""
Get the current installation stage
@type json_response: list or dict
@rtype: str
"""
try:
return json_response[1]
except IndexError:
return 'Installation complete!'
def _get_progress(self, json_response):
"""
Get the current installation progress
@type json_response: list or dict
@rtype: str
"""
try:
return round(float(json_response[2]))
except IndexError:
return 0
def _check_if_complete(self, url, json_response):
"""
Check if a request has been completed and return the redirect URL if it has
@type url: str
@type json_response: list or dict
@rtype: str or bool
"""
if 'redirect' in json_response and isinstance(json_response, dict):
self.log.info('Installation complete')
return json_response['redirect']
return False
def _finalize(self, response):
"""
Finalize the installation and display a link to the suite
"""
rsoup = BeautifulSoup(response.text, "html.parser")
click.echo('------')
click.secho(rsoup.find('h1', id='elInstaller_welcome').text.strip(), fg='yellow', bold=True)
click.secho(rsoup.find('p', {'class': 'ipsType_light'}).text.strip(), fg='yellow', dim=True)
link = rsoup.find('a', {'class': 'ipsButton_primary'}).get('href')
click.echo(click.style('Go to the suite: ', bold=True) + link + '\n')
# noinspection PyUnboundLocalVariable
def install(self):
"""
Run the actual installation
"""
self._start_install()
mr_link = self._get_mr_link()
# Set up the progress bar
pbar = ProgressBar(100, 'Running installation...')
pbar.start()
mr_j, mr_r = self._ajax(mr_link)
# Loop until we get a redirect json response
while True:
mr_link = self._parse_response(mr_link, mr_j)
stage = self._get_stage(mr_j)
progress = self._get_progress(mr_j)
mr_j, mr_r = self._ajax(mr_link)
pbar.update(min([progress, 100]), stage) # NOTE: Response may return progress values above 100
# If we're done, finalize the installation and break
redirect = self._check_if_complete(mr_link, mr_j)
if redirect:
pbar.finish()
break
p = Echo('Finalizing...')
mr_r = self._request(redirect, raise_request=False)
p.done()
# Install developer tools
if self.site.in_dev:
DevToolsInstaller(self.ctx, self.site).install()
# Get the link to our community homepage
self._finalize(mr_r)
class InstallationError(Exception):
pass
| |
# -*- coding: utf-8 -*-
"""
eve.methods.post
~~~~~~~~~~~~~~~~
This module imlements the POST method, supported by the resources
endopints.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from flask import current_app as app, abort
from eve.utils import config, parse_request, debug_error_message
from eve.auth import requires_auth
from eve.defaults import resolve_default_values
from eve.validation import ValidationError
from eve.methods.common import parse, payload, ratelimit, \
pre_event, store_media_files, resolve_user_restricted_access, \
resolve_embedded_fields, build_response_document, marshal_write_response, \
resolve_sub_resource_path, resolve_document_etag, oplog_push
from eve.versioning import resolve_document_version, \
insert_versioning_documents
@ratelimit()
@requires_auth('resource')
@pre_event
def post(resource, payl=None):
"""
Default function for handling POST requests, it has decorators for
rate limiting, authentication and for raising pre-request events. After the
decorators are applied forwards to call to :func:`post_internal`
.. versionchanged:: 0.5
Split original post() into post/post_internal combo.
"""
return post_internal(resource, payl, skip_validation=False)
def post_internal(resource, payl=None, skip_validation=False):
"""
Intended for internal post calls, this method is not rate limited,
authentication is not checked and pre-request events are not raised.
Adds one or more documents to a resource. Each document is validated
against the domain schema. If validation passes the document is inserted
and ID_FIELD, LAST_UPDATED and DATE_CREATED along with a link to the
document are returned. If validation fails, a list of validation issues
is returned.
:param resource: name of the resource involved.
:param payl: alternative payload. When calling post() from your own code
you can provide an alternative payload. This can be useful,
for example, when you have a callback function hooked to a
certain endpoint, and want to perform additional post() calls
from there.
Please be advised that in order to successfully use this
option, a request context must be available.
See https://github.com/nicolaiarocci/eve/issues/74 for a
discussion, and a typical use case.
:param skip_validation: skip payload validation before write (bool)
.. versionchanged:: 0.6
Initialize DELETED field when soft_delete is enabled.
.. versionchanged:: 0.5
Back to resolving default values after validaton as now the validator
can properly validate dependency even when some have default values. See
#353.
Push updates to the OpLog.
Original post() has been split into post() and post_internal().
ETAGS are now stored with documents (#369).
.. versionchanged:: 0.4
Resolve default values before validation is performed. See #353.
Support for document versioning.
.. versionchanged:: 0.3
Return 201 if at least one document has been successfully inserted.
Fix #231 auth field not set if resource level authentication is set.
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise 'on_pre_<method>' event.
Explictly resolve default values instead of letting them be resolved
by common.parse. This avoids a validation error when a read-only field
also has a default value.
Added ``on_inserted*`` events after the database insert
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
.. versionchanged:: 0.1.0
More robust handling of auth_field.
Support for optional HATEOAS.
.. versionchanged: 0.0.9
Event hooks renamed to be more robuts and consistent: 'on_posting'
renamed to 'on_insert'.
You can now pass a pre-defined custom payload to the funcion.
.. versionchanged:: 0.0.9
Storing self.app.auth.userid in auth_field when 'user-restricted
resource access' is enabled.
.. versionchanged: 0.0.7
Support for Rate-Limiting.
Support for 'extra_response_fields'.
'on_posting' and 'on_posting_<resource>' events are raised before the
documents are inserted into the database. This allows callback functions
to arbitrarily edit/update the documents being stored.
.. versionchanged:: 0.0.6
Support for bulk inserts.
Please note: validation constraints are checked against the database,
and not between the payload documents themselves. This causes an
interesting corner case: in the event of a multiple documents payload
where two or more documents carry the same value for a field where the
'unique' constraint is set, the payload will validate successfully, as
there are no duplicates in the database (yet). If this is an issue, the
client can always send the documents once at a time for insertion, or
validate locally before submitting the payload to the API.
.. versionchanged:: 0.0.5
Support for 'application/json' Content-Type .
Support for 'user-restricted resource access'.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
JSON links. Superflous ``response`` container removed.
"""
date_utc = datetime.utcnow().replace(microsecond=0)
resource_def = app.config['DOMAIN'][resource]
schema = resource_def['schema']
if not skip_validation:
validator = app.validator(schema, resource)
documents = []
results = []
failures = 0
if config.BANDWIDTH_SAVER is True:
embedded_fields = []
else:
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
# validation, and additional fields
if payl is None:
payl = payload()
if isinstance(payl, dict):
payl = [payl]
if not payl:
# empty bulkd insert
abort(400, description=debug_error_message(
'Empty bulk insert'
))
for value in payl:
document = []
doc_issues = {}
try:
document = parse(value, resource)
resolve_sub_resource_path(document, resource)
if skip_validation:
validation = True
else:
validation = validator.validate(document)
if validation: # validation is successful
# Populate meta and default fields
document[config.LAST_UPDATED] = \
document[config.DATE_CREATED] = date_utc
if config.DOMAIN[resource]['soft_delete'] is True:
document[config.DELETED] = False
resolve_user_restricted_access(document, resource)
resolve_default_values(document, resource_def['defaults'])
store_media_files(document, resource)
resolve_document_version(document, resource, 'POST')
else:
# validation errors added to list of document issues
doc_issues = validator.errors
except ValidationError as e:
doc_issues['validation exception'] = str(e)
except Exception as e:
# most likely a problem with the incoming payload, report back to
# the client as if it was a validation issue
app.logger.exception(e)
doc_issues['exception'] = str(e)
if len(doc_issues):
document = {
config.STATUS: config.STATUS_ERR,
config.ISSUES: doc_issues,
}
failures += 1
documents.append(document)
if failures:
# If at least one document got issues, the whole request fails and a
# ``422 Bad Request`` status is return.
for document in documents:
if config.STATUS in document \
and document[config.STATUS] == config.STATUS_ERR:
results.append(document)
else:
results.append({config.STATUS: config.STATUS_OK})
return_code = config.VALIDATION_ERROR_STATUS
else:
# notify callbacks
getattr(app, "on_insert")(resource, documents)
getattr(app, "on_insert_%s" % resource)(documents)
# compute etags here as documents might have been updated by callbacks.
resolve_document_etag(documents, resource)
# bulk insert
ids = app.data.insert(resource, documents)
# update oplog if needed
oplog_push(resource, documents, 'POST')
# assign document ids
for document in documents:
# either return the custom ID_FIELD or the id returned by
# data.insert().
document[resource_def['id_field']] = \
document.get(resource_def['id_field'], ids.pop(0))
# build the full response document
result = document
build_response_document(
result, resource, embedded_fields, document)
# add extra write meta data
result[config.STATUS] = config.STATUS_OK
# limit what actually gets sent to minimize bandwidth usage
result = marshal_write_response(result, resource)
results.append(result)
# insert versioning docs
insert_versioning_documents(resource, documents)
# notify callbacks
getattr(app, "on_inserted")(resource, documents)
getattr(app, "on_inserted_%s" % resource)(documents)
# request was received and accepted; at least one document passed
# validation and was accepted for insertion.
return_code = 201
if len(results) == 1:
response = results.pop(0)
else:
response = {
config.STATUS: config.STATUS_ERR if failures else config.STATUS_OK,
config.ITEMS: results,
}
if failures:
response[config.ERROR] = {
"code": return_code,
"message": "Insertion failure: %d document(s) contain(s) error(s)"
% failures,
}
return response, None, None, return_code
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
library to work with nova.
"""
import re
import shelve
import ldap
from keystone.common import logging
from keystone.common import utils
SCOPE_NAMES = {
ldap.SCOPE_BASE: 'SCOPE_BASE',
ldap.SCOPE_ONELEVEL: 'SCOPE_ONELEVEL',
ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE',
}
LOG = logging.getLogger(__name__)
#Only enable a lower level than WARN if you are actively debugging
LOG.level = logging.WARN
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
if inner.startswith(('&', '|')):
# cut off the & or |
groups = _paren_groups(inner[1:])
return all(_match_query(group, attrs) for group in groups)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
(k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
start = 0
result = []
for pos in xrange(len(source)):
if source[pos] == '(':
if count == 0:
start = pos
count += 1
if source[pos] == ')':
count -= 1
if count == 0:
result.append(source[start:pos + 1])
return result
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
if key not in attrs:
return False
# This is a wild card search. Implemented as all or nothing for now.
if value == '*':
return True
if key == 'serviceId':
# for serviceId, the backend is returning a list of numbers
# make sure we convert them to strings first before comparing
# them
str_sids = [str(x) for x in attrs[key]]
return str(value) in str_sids
if key != 'objectclass':
return value in attrs[key]
# it is an objectclass check, so check subclasses
values = _subs(value)
for v in values:
if v in attrs[key]:
return True
return False
def _subs(value):
"""Returns a list of subclass strings.
The strings represent the ldap objectclass plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['keystoneTenant',
'keystoneRole',
'keystoneTenantRole']}
if value in subs:
return [value] + subs[value]
return [value]
server_fail = False
class FakeShelve(dict):
@classmethod
def get_instance(cls):
try:
return cls.__instance
except AttributeError:
cls.__instance = cls()
return cls.__instance
def sync(self):
pass
class FakeLdap(object):
"""Fake LDAP connection."""
__prefix = 'ldap:'
def __init__(self, url):
LOG.debug(_('FakeLdap initialize url=%s'), url)
if url == 'fake://memory':
self.db = FakeShelve.get_instance()
else:
self.db = shelve.open(url[7:])
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise ldap.SERVER_DOWN
LOG.debug(_('FakeLdap bind dn=%s'), dn)
if dn == 'cn=Admin' and password == 'password':
return
try:
attrs = self.db['%s%s' % (self.__prefix, dn)]
except KeyError:
LOG.debug(_('FakeLdap bind fail: dn=%s not found'), dn)
raise ldap.NO_SUCH_OBJECT
db_password = None
try:
db_password = attrs['userPassword'][0]
except (KeyError, IndexError):
LOG.debug(_('FakeLdap bind fail: password for dn=%s not found'),
dn)
raise ldap.INAPPROPRIATE_AUTH
if not utils.ldap_check_password(password, db_password):
LOG.debug(_('FakeLdap bind fail: password for dn=%s does'
' not match') % dn)
raise ldap.INVALID_CREDENTIALS
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise ldap.SERVER_DOWN
def add_s(self, dn, attrs):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise ldap.SERVER_DOWN
key = '%s%s' % (self.__prefix, dn)
LOG.debug(_('FakeLdap add item: dn=%s, attrs=%s'), dn, attrs)
if key in self.db:
LOG.debug(_('FakeLdap add item failed: dn=%s is'
' already in store.'), dn)
raise ldap.ALREADY_EXISTS(dn)
self.db[key] = dict([(k, v if isinstance(v, list) else [v])
for k, v in attrs])
self.db.sync()
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise ldap.SERVER_DOWN
key = '%s%s' % (self.__prefix, dn)
LOG.debug(_('FakeLdap delete item: dn=%s'), dn)
try:
del self.db[key]
except KeyError:
LOG.debug(_('FakeLdap delete item failed: dn=%s not found.'), dn)
raise ldap.NO_SUCH_OBJECT
self.db.sync()
def delete_ext_s(self, dn, serverctrls):
"""Remove the ldap object at specified dn."""
if server_fail:
raise ldap.SERVER_DOWN
key = '%s%s' % (self.__prefix, dn)
LOG.debug(_('FakeLdap delete item: dn=%s'), dn)
try:
del self.db[key]
except KeyError:
LOG.debug(_('FakeLdap delete item failed: dn=%s not found.'), dn)
raise ldap.NO_SUCH_OBJECT
self.db.sync()
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
:param dn: an LDAP DN
:param attrs: a list of tuples in the following form:
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise ldap.SERVER_DOWN
key = '%s%s' % (self.__prefix, dn)
LOG.debug(_('FakeLdap modify item: dn=%s attrs=%s'), dn, attrs)
try:
entry = self.db[key]
except KeyError:
LOG.debug(_('FakeLdap modify item failed: dn=%s not found.'), dn)
raise ldap.NO_SUCH_OBJECT
for cmd, k, v in attrs:
values = entry.setdefault(k, [])
if cmd == ldap.MOD_ADD:
if v in values:
raise ldap.TYPE_OR_VALUE_EXISTS
if isinstance(v, list):
values += v
else:
values.append(v)
elif cmd == ldap.MOD_REPLACE:
values[:] = v if isinstance(v, list) else [v]
elif cmd == ldap.MOD_DELETE:
if v is None:
if len(values) == 0:
LOG.debug(_('FakeLdap modify item failed: '
'item has no attribute "%s" to delete'), k)
raise ldap.NO_SUCH_ATTRIBUTE
values[:] = []
else:
if not isinstance(v, list):
v = [v]
for val in v:
try:
values.remove(val)
except ValueError:
LOG.debug(_('FakeLdap modify item failed:'
' item has no attribute "%s" with'
' value "%s" to delete'), k, val)
raise ldap.NO_SUCH_ATTRIBUTE
else:
LOG.debug(_('FakeLdap modify item failed: unknown'
' command %s'), cmd)
raise NotImplementedError(_('modify_s action %s not'
' implemented') % cmd)
self.db[key] = entry
self.db.sync()
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
Args:
dn -- dn to search under
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise ldap.SERVER_DOWN
LOG.debug(_('FakeLdap search at dn=%s scope=%s query=%s'),
dn, SCOPE_NAMES.get(scope, scope), query)
if scope == ldap.SCOPE_BASE:
try:
item_dict = self.db['%s%s' % (self.__prefix, dn)]
except KeyError:
LOG.debug(_('FakeLdap search fail: dn not found for'
' SCOPE_BASE'))
raise ldap.NO_SUCH_OBJECT
results = [(dn, item_dict)]
elif scope == ldap.SCOPE_SUBTREE:
results = [(k[len(self.__prefix):], v)
for k, v in self.db.iteritems()
if re.match('%s.*,%s' % (self.__prefix, dn), k)]
elif scope == ldap.SCOPE_ONELEVEL:
results = [(k[len(self.__prefix):], v)
for k, v in self.db.iteritems()
if re.match('%s\w+=[^,]+,%s' % (self.__prefix, dn), k)]
else:
LOG.debug('FakeLdap search fail: unknown scope %s', scope)
raise NotImplementedError(_('Search scope %s not implemented.')
% scope)
objects = []
for dn, attrs in results:
# filter the objects by query
id_attr, id_val = dn.partition(',')[0].split('=', 1)
match_attrs = attrs.copy()
match_attrs[id_attr] = [id_val]
if not query or _match_query(query, match_attrs):
# filter the attributes by fields
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((dn, attrs))
LOG.debug('FakeLdap search result: %s', objects)
return objects
| |
import re
to_canonical = {
'BOND': ['bond'],
'ANGLE': ['angle'],
'UB': ['angle','urey_bradley'],
'DIHED': ['proper'],
'IMP': ['improper'],
'CMAP': ['cmap'],
'HBOND': ['h-bond'],
'VDWAALS': ['vdw_total'],
'1-4 VDW': ['vdw_14'],
'EEL': ['coulomb_total'],
'1-4 EEL': ['coulomb-14'],
'ENERGY': ['potential']
}
# Possible size keys to look for in the header
size_keys = [
"NATOM", "NTYPES", "NBONH", "MBONA", "NTHETH", "MTHETA", "NPHIH", "MPHIA", "NHPARM", "NPARM", "NNB", "NRES",
"NBONA", "NTHETA", "NPHIA", "NUMBND", "NUMANG", "NPTRA", "NATYP", "NPHB", "IFPERT", "NBPER", "NGPER", "NDPER",
"MBPER", "MGPER", "MDPER", "IFBOX", "NMXRS", "IFCAP", "NUMEXTRA"
]
data_labels = {
"POINTERS" : [31, "%FORMAT(10I8)"],
"ATOM_NAME": ["NATOM", "%FORMAT(20a4)"],
"CHARGE": ["NATOM", "%FORMAT(5E16.8)"],
"ATOMIC_NUMBER": ["NATOM", "%FORMAT(10I8)" ],
"MASS": ["NATOM", "%FORMAT(5E16.8)"],
"ATOM_TYPE_INDEX": ["NATOM", "%FORMAT(10I8)"],
"NUMBER_EXCLUDED_ATOMS": ["NATOM", "%FORMAT(10I8)"],
"NONBONDED_PARM_INDEX": ["NTYPES ** 2", "%FORMAT(10I8)"],
"RESIDUE_LABEL": ["NRES", "%FORMAT(20a4)"],
"RESIDUE_POINTER": ["NRES", "%FORMAT(10I8)"],
"BOND_FORCE_CONSTANT": ["NUMBND", "%FORMAT(5E16.8)"],
"BOND_EQUIL_VALUE": ["NUMBND", "%FORMAT(5E16.8)"],
"ANGLE_FORCE_CONSTANT": ["NUMANG", "%FORMAT(5E16.8)"],
"ANGLE_EQUIL_VALUE": ["NUMANG", "%FORMAT(5E16.8)"],
"DIHEDRAL_FORCE_CONSTANT": ["NPTRA", "%FORMAT(5E16.8)"],
"DIHEDRAL_PERIODICITY": ["NPTRA", "%FORMAT(5E16.8)"],
"DIHEDRAL_PHASE": ["NPTRA", "%FORMAT(5E16.8)"],
"SCEE_SCALE_FACTOR": ["NPTRA", "%FORMAT(5E16.8)"],
"SCNB_SCALE_FACTOR": ["NPTRA", "%FORMAT(5E16.8)"],
"SOLTY": ["NATYP", "%FORMAT(5E16.8)"],
"LENNARD_JONES_ACOEF": ["(NTYPES * (NTYPES + 1)) / 2", "%FORMAT(5E16.8)"],
"LENNARD_JONES_BCOEF": ["(NTYPES * (NTYPES + 1)) / 2", "%FORMAT(5E16.8)"],
"BONDS_INC_HYDROGEN": ["3 * NBONH", "%FORMAT(10I8)"],
"BONDS_WITHOUT_HYDROGEN": ["3 * NBONA", "%FORMAT(10I8)"],
"ANGLES_INC_HYDROGEN": ["4 * NTHETH", "%FORMAT(10I8)"],
"ANGLES_WITHOUT_HYDROGEN": ["4 * NTHETA", "%FORMAT(10I8)"],
"DIHEDRALS_INC_HYDROGEN": ["5 * NPHIH", "%FORMAT(10I8)"],
"DIHEDRALS_WITHOUT_HYDROGEN": ["5 * NPHIA", "%FORMAT(10I8)"],
"EXCLUDED_ATOMS_LIST": ["NNB", "%FORMAT(10I8)"],
"HBOND_ACOEF": ["NPHB", "%FORMAT(5E16.8)"],
"HBOND_BCOEF": ["NPHB", "%FORMAT(5E16.8)"],
"HBCUT": ["NPHB", "%FORMAT(5E16.8)"],
"AMBER_ATOM_TYPE": ["NATOM", "%FORMAT(20a4)"],
"TREE_CHAIN_CLASSIFICATION": ["NATOM", "%FORMAT(20a4)"],
"JOIN_ARRAY": ["NATOM", "%FORMAT(10I8)"], # This section is filled with zeros, but is unused. We should not store it.
"IROTAT": ["NATOM", "%FORMAT(10I8)"],
"SOLVENT_POINTERS": ["3 if IFBOX else 0", "%FORMAT(3I8)"],
"ATOMS_PER_MOLECULE": ["NATOM if IFBOX else 0", "%FORMAT(10I8)"],
# "ATOMS_PER_MOLECULE": ["SOLVENT_POINTERS[1] if IFBOX else 0"], # SOLVENT_POINTERS[1] == NPSM
"BOX_DIMENSIONS": [4, "%FORMAT(5E16.8)"],
"CAP_INFO": ["1 if IFCAP else 0", "%FORMAT(10I8)"],
"CAP_INFO2": ["4 if IFCAP else 0", "%FORMAT(5E16.8)"],
"RADIUS_SET": [1, "%FORMAT(1a80)"],
"RADII": ["NATOM", "%FORMAT(5E16.8)"],
"SCREEN": ["NATOM", "%FORMAT(5E16.8)"],
"IPOL": [1, "%FORMAT(1I8)"],
# "POLARIZABILITY": [0, "%FORMAT(5E16.8)"]
# "POLARIZABILITY": ["NATOM if IPOL else 0"]
}
atom_data_units = {
"charge": "e / 18.2223", # Internal units
"mass": "g * mol ** -1",
"length" : "angstrom",
}
box_units = {
"length" : "angstrom",
"angle": "degree",
"center": [ "a/2", "b/2", "c/2" ] ,
}
# Box boundaries are always periodic if there is a box (i.e. IFBOX > 0).
# Else, this is a vacuum simulation where there are no boundaries. This
# is most similar to the 'shrink-wrapped' option in lammps and the EEX
# data layer
box_boundaries = {
"x" : ["'periodic' if IFBOX>0, else 'shrink-wrapped'"],
"y" : ["'periodic' if IFBOX>0, else 'shrink-wrapped'"],
"z" : ["'periodic' if IFBOX>0, else 'shrink-wrapped'"],
}
atom_property_names = {
"ATOM_NAME": "atom_name",
"CHARGE": "charge",
"MASS": "mass",
"ATOM_TYPE_INDEX": "atom_type",
"ATOMIC_NUMBER": "atomic_number",
# "AMBER_ATOM_TYPE": "atom_type_name",
# "RADII" : "implicit_solvent_radius",
}
residue_store_names = ["RESIDUE_LABEL", "RESIDUE_POINTER"]
molecule_store_names = ["ATOMS_PER_MOLECULE"]
topology_store_names = [
"BONDS_INC_HYDROGEN", "BONDS_WITHOUT_HYDROGEN", "ANGLES_INC_HYDROGEN", "ANGLES_WITHOUT_HYDROGEN",
"DIHEDRALS_INC_HYDROGEN", "DIHEDRALS_WITHOUT_HYDROGEN"
]
forcefield_parameters = {
"bond": {
"order": 2,
"form": "harmonic",
"units": {
"K": "kcal * mol ** -1 angstrom ** -2",
"R0": "angstrom"
},
"column_names": {
"BOND_FORCE_CONSTANT": "K",
"BOND_EQUIL_VALUE": "R0"
}
},
"angle": {
"order": 3,
"form": "harmonic",
"units": {
"K": "kcal * mol ** -1 radian ** -2",
"theta0": "radian"
},
"column_names": {
"ANGLE_FORCE_CONSTANT": "K",
"ANGLE_EQUIL_VALUE": "theta0"
}
},
"dihedral": {
"order": 4,
"form": "charmmfsw",
"units": {
"K": "kcal * mol ** -1",
"n": "phase",
"d": "radians",
},
"column_names": {
"DIHEDRAL_FORCE_CONSTANT": "K",
"DIHEDRAL_PERIODICITY": "n",
"DIHEDRAL_PHASE": "d",
}
},
"nonbond" : {
"order" : None,
"form" : {"name": "LJ", "form": "AB"},
"units" : {
"A" : "kcal * mol ** -1 * angstrom ** 12",
"B" : "kcal * mol ** -1 * angstrom ** 6",
},
"column_names" : {
"LENNARD_JONES_ACOEF": "A",
"LENNARD_JONES_BCOEF": "B",
"NONBONDED_PARM_INDEX": ""
},
},
}
## Exclusions and scalings
# NUMBER_EXCLUDED_ATOMS has NATOMS elements and contains the number of atoms that need to be excluded from the non-bonded
# calculation loop for atom i because i is involved in a bond, angle, or torsion with those atoms.
# Points to EXCLUDED_ATOMS_LIST, which contains the atom indices for atoms which should be excluded from ith atom
# calcuation.
exclusion_sections = ["NUMBER_EXCLUDED_ATOMS", "EXCLUDED_ATOMS_LIST"]
# "canonical" amber simulations will have values of 2 (SCNB) or 1.2 (SCEE), which should allow us to use the
# dl.set_nb_scaling_factor function. If not,
scaling_sections = ["SCEE_SCALE_FACTOR", "SCNB_SCALE_FACTOR"]
#----------------------------------------------------------------------------
store_other = []
for k, v in forcefield_parameters.items():
store_other.extend(list(v["column_names"]))
store_other.extend(residue_store_names)
store_other.extend(molecule_store_names)
store_other.extend(exclusion_sections)
def parse_format(string):
"""
Parses an AMBER style format string.
Example:
>>> string = "%FORMAT(10I8)"
>>> _parse_format(string)
[10, int, 8]
"""
if "FORMAT" not in string:
raise ValueError("AMBER: Did not understand format line '%s'." % string)
pstring = string.replace("%FORMAT(", "").replace(")", "").strip()
ret = [x for x in re.split('(\d+)', pstring) if x]
if ret[1] == "I":
if len(ret) != 3:
raise ValueError("AMBER: Did not understand format line '%s'." % string)
ret[1] = int
ret[0] = int(ret[0])
ret[2] = int(ret[2])
elif ret[1] == "E":
if len(ret) != 5:
raise ValueError("AMBER: Did not understand format line '%s'." % string)
ret[1] = float
ret[0] = int(ret[0])
ret[2] = int(ret[2])
# The .8 is not interesting to us
elif ret[1] == "a":
if len(ret) != 3:
raise ValueError("AMBER: Did not understand format line '%s'." % string)
ret[1] = str
ret[0] = int(ret[0])
ret[2] = int(ret[2])
else:
raise ValueError("AMBER: Type symbol '%s' not understood from line '%s'." % (ret[1], string))
return ret
def build_format(fmt):
if fmt[1] == str:
fmt = "%-" + str(fmt[2]) + "s"
elif fmt[1] == float:
fmt = " % " + str(fmt[2] - 1) + "." + str(fmt[4]) + "E"
elif fmt[1] == int:
fmt = " % " + str(fmt[2] - 1) + "d"
else:
raise TypeError("Type (%s) not recognized" % type(fmt[1]))
return fmt
| |
from __future__ import annotations
import dataclasses
import datetime
import hashlib
import inspect
import os
import pickle
import threading
import uuid
import warnings
from collections import OrderedDict
from collections.abc import Callable, Iterator, Mapping
from concurrent.futures import Executor
from contextlib import contextmanager
from functools import partial
from numbers import Integral, Number
from operator import getitem
from packaging.version import parse as parse_version
from tlz import curry, groupby, identity, merge
from tlz.functoolz import Compose
from . import config, local, threaded
from .compatibility import _PY_VERSION
from .context import thread_state
from .core import flatten
from .core import get as simple_get
from .core import literal, quote
from .hashing import hash_buffer_hex
from .system import CPU_COUNT
from .utils import Dispatch, apply, ensure_dict, key_split
__all__ = (
"DaskMethodsMixin",
"annotate",
"is_dask_collection",
"compute",
"persist",
"optimize",
"visualize",
"tokenize",
"normalize_token",
"get_collection_names",
"get_name_from_key",
"replace_name_in_key",
"clone_key",
)
@contextmanager
def annotate(**annotations):
"""Context Manager for setting HighLevelGraph Layer annotations.
Annotations are metadata or soft constraints associated with
tasks that dask schedulers may choose to respect: They signal intent
without enforcing hard constraints. As such, they are
primarily designed for use with the distributed scheduler.
Almost any object can serve as an annotation, but small Python objects
are preferred, while large objects such as NumPy arrays are discouraged.
Callables supplied as an annotation should take a single *key* argument and
produce the appropriate annotation. Individual task keys in the annotated collection
are supplied to the callable.
Parameters
----------
**annotations : key-value pairs
Examples
--------
All tasks within array A should have priority 100 and be retried 3 times
on failure.
>>> import dask
>>> import dask.array as da
>>> with dask.annotate(priority=100, retries=3):
... A = da.ones((10000, 10000))
Prioritise tasks within Array A on flattened block ID.
>>> nblocks = (10, 10)
>>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]):
... A = da.ones((1000, 1000), chunks=(100, 100))
Annotations may be nested.
>>> with dask.annotate(priority=1):
... with dask.annotate(retries=3):
... A = da.ones((1000, 1000))
... B = A + 1
"""
# Sanity check annotations used in place of
# legacy distributed Client.{submit, persist, compute} keywords
if "workers" in annotations:
if isinstance(annotations["workers"], (list, set, tuple)):
annotations["workers"] = list(annotations["workers"])
elif isinstance(annotations["workers"], str):
annotations["workers"] = [annotations["workers"]]
elif callable(annotations["workers"]):
pass
else:
raise TypeError(
"'workers' annotation must be a sequence of str, a str or a callable, but got %s."
% annotations["workers"]
)
if (
"priority" in annotations
and not isinstance(annotations["priority"], Number)
and not callable(annotations["priority"])
):
raise TypeError(
"'priority' annotation must be a Number or a callable, but got %s"
% annotations["priority"]
)
if (
"retries" in annotations
and not isinstance(annotations["retries"], Number)
and not callable(annotations["retries"])
):
raise TypeError(
"'retries' annotation must be a Number or a callable, but got %s"
% annotations["retries"]
)
if (
"resources" in annotations
and not isinstance(annotations["resources"], dict)
and not callable(annotations["resources"])
):
raise TypeError(
"'resources' annotation must be a dict, but got %s"
% annotations["resources"]
)
if (
"allow_other_workers" in annotations
and not isinstance(annotations["allow_other_workers"], bool)
and not callable(annotations["allow_other_workers"])
):
raise TypeError(
"'allow_other_workers' annotations must be a bool or a callable, but got %s"
% annotations["allow_other_workers"]
)
prev_annotations = config.get("annotations", {})
new_annotations = {
**prev_annotations,
**{f"annotations.{k}": v for k, v in annotations.items()},
}
with config.set(new_annotations):
yield
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin:
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename="mydask", format=None, optimize_graph=False, **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
return visualize(
self,
filename=filename,
format=format,
optimize_graph=optimize_graph,
**kwargs,
)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask array turns into a NumPy array and a Dask dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def __await__(self):
try:
from distributed import futures_of, wait
except ImportError as e:
raise ImportError(
"Using async/await with dask requires the `distributed` package"
) from e
from tornado import gen
@gen.coroutine
def f():
if futures_of(self):
yield wait(self)
raise gen.Return(self)
return f().__await__()
def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
dsk2 = optimization_function(cls)(dsk, keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, "__dask_optimize__", dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, optimizations=(), **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
from .highlevelgraph import HighLevelGraph
optimizations = tuple(optimizations) + tuple(config.get("optimizations", ()))
if optimize_graph:
groups = groupby(optimization_function, collections)
graphs = []
for opt, val in groups.items():
dsk, keys = _extract_graph_and_keys(val)
dsk = opt(dsk, keys, **kwargs)
for opt_inner in optimizations:
dsk = opt_inner(dsk, keys, **kwargs)
graphs.append(dsk)
# Merge all graphs
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
dsk = HighLevelGraph.merge(*graphs)
else:
dsk = merge(*map(ensure_dict, graphs))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``."""
from .highlevelgraph import HighLevelGraph
graphs, keys = [], []
for v in vals:
graphs.append(v.__dask_graph__())
keys.append(v.__dask_keys__())
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
graph = HighLevelGraph.merge(*graphs)
else:
graph = merge(*map(ensure_dict, graphs))
return graph, keys
def unpack_collections(*args, traverse=True):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif dataclasses.is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in dataclasses.fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, traverse=True, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = d.optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = []
for a in collections:
r, s = a.__dask_postpersist__()
postpersists.append(r(dsk, *s))
return repack(postpersists)
def compute(
*args, traverse=True, optimize_graph=True, scheduler=None, get=None, **kwargs
):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
get : ``None``
Should be left to ``None`` The get= keyword has been removed.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> d.compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> d.compute({'a': a, 'b': b, 'c': 1})
({'a': 45, 'b': 4.5, 'c': 1},)
"""
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=scheduler,
collections=collections,
get=get,
)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postcomputes = [], []
for x in collections:
keys.append(x.__dask_keys__())
postcomputes.append(x.__dask_postcompute__())
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(
*args, filename="mydask", traverse=True, optimize_graph=False, maxval=None, **kwargs
):
"""
Visualize several dask graphs simultaneously.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
args : object
Any number of objects. If it is a dask collection (for example, a
dask DataFrame, Array, Bag, or Delayed), its associated graph
will be included in the output of visualize. By default, python builtin
collections are also traversed to look for dask objects (for more
information see the ``traverse`` keyword). Arguments lacking an
associated graph will be ignored.
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
traverse : bool, optional
By default, dask traverses builtin python collections looking for dask
objects passed to ``visualize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color : {None, 'order', 'ages', 'freed', 'memoryincreases', 'memorydecreases', 'memorypressure'}, optional
Options to color nodes. colormap:
- None, the default, no colors.
- 'order', colors the nodes' border based on the order they appear in the graph.
- 'ages', how long the data of a node is held.
- 'freed', the number of dependencies released after running a node.
- 'memoryincreases', how many more outputs are held after the lifetime of a node.
Large values may indicate nodes that should have run later.
- 'memorydecreases', how many fewer outputs are held after the lifetime of a node.
Large values may indicate nodes that should have run sooner.
- 'memorypressure', the number of data held when the node is run (circle), or
the data is released (rectangle).
maxval : {int, float}, optional
Maximum value for colormap to normalize form 0 to 1.0. Default is ``None``
will make it the max number of values
collapse_outputs : bool, optional
Whether to collapse output boxes, which often have empty labels.
Default is False.
verbose : bool, optional
Whether to label output and input boxes even if the data aren't chunked.
Beware: these labels can get very long. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
args, _ = unpack_collections(*args, traverse=traverse)
dsk = dict(collections_to_dsk(args, optimize_graph=optimize_graph))
color = kwargs.get("color")
if color in {
"order",
"order-age",
"order-freed",
"order-memoryincreases",
"order-memorydecreases",
"order-memorypressure",
"age",
"freed",
"memoryincreases",
"memorydecreases",
"memorypressure",
}:
import matplotlib.pyplot as plt
from .order import diagnostics, order
o = order(dsk)
try:
cmap = kwargs.pop("cmap")
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
def label(x):
return str(values[x])
data_values = None
if color != "order":
info = diagnostics(dsk, o)[0]
if color.endswith("age"):
values = {key: val.age for key, val in info.items()}
elif color.endswith("freed"):
values = {key: val.num_dependencies_freed for key, val in info.items()}
elif color.endswith("memorypressure"):
values = {key: val.num_data_when_run for key, val in info.items()}
data_values = {
key: val.num_data_when_released for key, val in info.items()
}
elif color.endswith("memoryincreases"):
values = {
key: max(0, val.num_data_when_released - val.num_data_when_run)
for key, val in info.items()
}
else: # memorydecreases
values = {
key: max(0, val.num_data_when_run - val.num_data_when_released)
for key, val in info.items()
}
if color.startswith("order-"):
def label(x):
return str(o[x]) + "-" + str(values[x])
else:
values = o
if maxval is None:
maxval = max(1, max(values.values()))
colors = {k: _colorize(cmap(v / maxval, bytes=True)) for k, v in values.items()}
if data_values is None:
data_values = values
data_colors = colors
else:
data_colors = {
k: _colorize(cmap(v / maxval, bytes=True))
for k, v in data_values.items()
}
kwargs["function_attributes"] = {
k: {"color": v, "label": label(k)} for k, v in colors.items()
}
kwargs["data_attributes"] = {k: {"color": v} for k, v in data_colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, traverse=True, optimize_graph=True, scheduler=None, **kwargs):
"""Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(scheduler=scheduler, collections=collections)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(
collections, optimize_graph=optimize_graph, **kwargs
)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
# Pass `usedforsecurity=False` for Python 3.9+ to support FIPS builds of Python
if _PY_VERSION >= parse_version("3.9"):
def _md5(x, _hashlib_md5=hashlib.md5):
return _hashlib_md5(x, usedforsecurity=False)
else:
_md5 = hashlib.md5
def tokenize(*args, **kwargs):
"""Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
hasher = _md5(str(tuple(map(normalize_token, args))).encode())
if kwargs:
hasher.update(str(normalize_token(kwargs)).encode())
return hasher.hexdigest()
normalize_token = Dispatch()
normalize_token.register(
(
int,
float,
str,
bytes,
type(None),
type,
slice,
complex,
type(Ellipsis),
datetime.date,
),
identity,
)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
def _normalize_seq_func(seq):
# Defined outside normalize_seq to avoid unneccessary redefinitions and
# therefore improving computation times.
try:
return list(map(normalize_token, seq))
except RecursionError:
if not config.get("tokenize.ensure-deterministic"):
return uuid.uuid4().hex
raise RuntimeError(
f"Sequence {str(seq)} cannot be deterministically hashed. Please, see "
"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing "
"for more information"
)
@normalize_token.register((tuple, list))
def normalize_seq(seq):
return type(seq).__name__, _normalize_seq_func(seq)
@normalize_token.register(literal)
def normalize_literal(lit):
return "literal", normalize_token(lit())
@normalize_token.register(range)
def normalize_range(r):
return list(map(normalize_token, [r.start, r.stop, r.step]))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, "__dask_tokenize__", None)
if method is not None:
return method()
if callable(o):
return normalize_function(o)
if dataclasses.is_dataclass(o):
return normalize_dataclass(o)
if not config.get("tokenize.ensure-deterministic"):
return uuid.uuid4().hex
raise RuntimeError(
f"Object {str(o)} cannot be deterministically hashed. Please, see "
"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing "
"for more information"
)
function_cache: dict[Callable, Callable] = {}
function_cache_lock = threading.Lock()
def normalize_function(func: Callable) -> Callable:
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func: Callable) -> Callable:
if isinstance(func, Compose):
first = getattr(func, "first", None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, (partial, curry)):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple(
(k, normalize_token(v)) for k, v in sorted(func.keywords.items())
)
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=4)
if b"__main__" not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=4)
except Exception:
return str(func)
def normalize_dataclass(obj):
fields = [
(field.name, getattr(obj, field.name)) for field in dataclasses.fields(obj)
]
return (
normalize_function(type(obj)),
_normalize_seq_func(fields),
)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
PANDAS_GT_130 = parse_version(pd.__version__) >= parse_version("1.3.0")
@normalize_token.register(pd.Index)
def normalize_index(ind):
values = ind.array
return [ind.name, normalize_token(values)]
@normalize_token.register(pd.MultiIndex)
def normalize_index(ind):
codes = ind.codes
return (
[ind.name]
+ [normalize_token(x) for x in ind.levels]
+ [normalize_token(x) for x in codes]
)
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes), normalize_token(cat.dtype)]
@normalize_token.register(pd.arrays.PeriodArray)
@normalize_token.register(pd.arrays.DatetimeArray)
@normalize_token.register(pd.arrays.TimedeltaArray)
def normalize_period_array(arr):
return [normalize_token(arr.asi8), normalize_token(arr.dtype)]
@normalize_token.register(pd.arrays.IntervalArray)
def normalize_interval_array(arr):
return [
normalize_token(arr.left),
normalize_token(arr.right),
normalize_token(arr.closed),
]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [
s.name,
s.dtype,
normalize_token(s._values),
normalize_token(s.index),
]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
mgr = df._data
if PANDAS_GT_130:
# for compat with ArrayManager, pandas 1.3.0 introduced a `.arrays`
# attribute that returns the column arrays/block arrays for both
# BlockManager and ArrayManager
data = list(mgr.arrays)
else:
data = [block.values for block in mgr.blocks]
data.extend([df.columns, df.index])
return list(map(normalize_token, data))
@normalize_token.register(pd.api.extensions.ExtensionArray)
def normalize_extension_array(arr):
import numpy as np
return normalize_token(np.asarray(arr))
# Dtypes
@normalize_token.register(pd.api.types.CategoricalDtype)
def normalize_categorical_dtype(dtype):
return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]
@normalize_token.register(pd.api.extensions.ExtensionDtype)
def normalize_period_dtype(dtype):
return normalize_token(dtype.name)
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (x.item(), x.dtype)
if hasattr(x, "mode") and getattr(x, "filename", None):
if hasattr(x.base, "ctypes"):
offset = (
x.ctypes._as_parameter_.value - x.base.ctypes._as_parameter_.value
)
else:
offset = 0 # root memmap's have mmap object as base
if hasattr(
x, "offset"
): # offset numpy used while opening, and not the offset to the beginning of the file
offset += getattr(x, "offset")
return (
x.filename,
os.path.getmtime(x.filename),
x.dtype,
x.shape,
x.strides,
offset,
)
if x.dtype.hasobject:
try:
try:
# string fast-path
data = hash_buffer_hex(
"-".join(x.flat).encode(
encoding="utf-8", errors="surrogatepass"
)
)
except UnicodeDecodeError:
# bytes fast-path
data = hash_buffer_hex(b"-".join(x.flat))
except (TypeError, UnicodeDecodeError):
try:
data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
except Exception:
# pickling not supported, use UUID4-based fallback
if not config.get("tokenize.ensure-deterministic"):
data = uuid.uuid4().hex
else:
raise RuntimeError(
f"``np.ndarray`` with object ``dtype`` {str(x)} cannot "
"be deterministically hashed. Please, see "
"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing " # noqa: E501
"for more information"
)
else:
try:
data = hash_buffer_hex(x.ravel(order="K").view("i1"))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order="K").view("i1"))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return "np." + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return (
type(x).__name__,
normalize_seq(normalize_token(getattr(x, key)) for key in attrs),
)
for cls, attrs in [
(sp.dia_matrix, ("data", "offsets", "shape")),
(sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")),
(sp.coo_matrix, ("data", "row", "col", "shape")),
(sp.csr_matrix, ("data", "indices", "indptr", "shape")),
(sp.csc_matrix, ("data", "indices", "indptr", "shape")),
(sp.lil_matrix, ("data", "rows", "shape")),
]:
normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
"""Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = "0" * (6 - len(h)) + h
return "#" + h
named_schedulers = {
"sync": local.get_sync,
"synchronous": local.get_sync,
"single-threaded": local.get_sync,
"threads": threaded.get,
"threading": threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update(
{
"processes": dask_multiprocessing.get,
"multiprocessing": dask_multiprocessing.get,
}
)
get_err_msg = """
The get= keyword has been removed.
Please use the scheduler= keyword instead with the name of
the desired scheduler like 'threads' or 'processes'
x.compute(scheduler='single-threaded')
x.compute(scheduler='threads')
x.compute(scheduler='processes')
or with a function that takes the graph and keys
x.compute(scheduler=my_scheduler_function)
or with a Dask client
x.compute(scheduler=client)
""".strip()
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
"""Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in scheduler= parameters
2. Passing these into global configuration
3. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get:
raise TypeError(get_err_msg)
if scheduler is not None:
if callable(scheduler):
return scheduler
elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"):
return scheduler.get
elif isinstance(scheduler, str):
scheduler = scheduler.lower()
if scheduler in named_schedulers:
if config.get("scheduler", None) in ("dask.distributed", "distributed"):
warnings.warn(
"Running on a single-machine scheduler when a distributed client "
"is active might lead to unexpected results."
)
return named_schedulers[scheduler]
elif scheduler in ("dask.distributed", "distributed"):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError(
"Expected one of [distributed, %s]"
% ", ".join(sorted(named_schedulers))
)
elif isinstance(scheduler, Executor):
# Get `num_workers` from `Executor`'s `_max_workers` attribute.
# If undefined, fallback to `config` or worst case CPU_COUNT.
num_workers = getattr(scheduler, "_max_workers", None)
if num_workers is None:
num_workers = config.get("num_workers", CPU_COUNT)
assert isinstance(num_workers, Integral) and num_workers > 0
return partial(local.get_async, scheduler.submit, num_workers)
else:
raise ValueError("Unexpected scheduler: %s" % repr(scheduler))
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get("scheduler", None):
return get_scheduler(scheduler=config.get("scheduler", None))
if config.get("get", None):
raise ValueError(get_err_msg)
if getattr(thread_state, "key", False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError(
"Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `dask.config.set`."
)
return get
return None
def wait(x, timeout=None, return_when="ALL_COMPLETED"):
"""Wait until computation has finished
This is a compatibility alias for ``dask.distributed.wait``.
If it is applied onto Dask collections without Dask Futures or if Dask
distributed is not installed then it is a no-op
"""
try:
from distributed import wait
return wait(x, timeout=timeout, return_when=return_when)
except (ImportError, ValueError):
return x
def get_collection_names(collection) -> set[str]:
"""Infer the collection names from the dask keys, under the assumption that all keys
are either tuples with matching first element, and that element is a string, or
there is exactly one key and it is a string.
Examples
--------
>>> a.__dask_keys__() # doctest: +SKIP
["foo", "bar"]
>>> get_collection_names(a) # doctest: +SKIP
{"foo", "bar"}
>>> b.__dask_keys__() # doctest: +SKIP
[[("foo-123", 0, 0), ("foo-123", 0, 1)], [("foo-123", 1, 0), ("foo-123", 1, 1)]]
>>> get_collection_names(b) # doctest: +SKIP
{"foo-123"}
"""
if not is_dask_collection(collection):
raise TypeError(f"Expected Dask collection; got {type(collection)}")
return {get_name_from_key(k) for k in flatten(collection.__dask_keys__())}
def get_name_from_key(key) -> str:
"""Given a dask collection's key, extract the collection name.
Parameters
----------
key: string or tuple
Dask collection's key, which must be either a single string or a tuple whose
first element is a string (commonly referred to as a collection's 'name'),
Examples
--------
>>> get_name_from_key("foo")
'foo'
>>> get_name_from_key(("foo-123", 1, 2))
'foo-123'
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return key[0]
if isinstance(key, str):
return key
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
def replace_name_in_key(key, rename: Mapping[str, str]):
"""Given a dask collection's key, replace the collection name with a new one.
Parameters
----------
key: string or tuple
Dask collection's key, which must be either a single string or a tuple whose
first element is a string (commonly referred to as a collection's 'name'),
rename:
Mapping of zero or more names from : to. Extraneous names will be ignored.
Names not found in this mapping won't be replaced.
Examples
--------
>>> replace_name_in_key("foo", {})
'foo'
>>> replace_name_in_key("foo", {"foo": "bar"})
'bar'
>>> replace_name_in_key(("foo-123", 1, 2), {"foo-123": "bar-456"})
('bar-456', 1, 2)
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return (rename.get(key[0], key[0]),) + key[1:]
if isinstance(key, str):
return rename.get(key, key)
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
def clone_key(key, seed):
"""Clone a key from a Dask collection, producing a new key with the same prefix and
indices and a token which is a deterministic function of the previous key and seed.
Examples
--------
>>> clone_key("x", 123)
'x-dc2b8d1c184c72c19faa81c797f8c6b0'
>>> clone_key("inc-cbb1eca3bafafbb3e8b2419c4eebb387", 123)
'inc-f81b5a88038a2132882aa29a9fcfec06'
>>> clone_key(("sum-cbb1eca3bafafbb3e8b2419c4eebb387", 4, 3), 123)
('sum-fd6be9e9fe07fc232ad576fa997255e8', 4, 3)
"""
if isinstance(key, tuple) and key and isinstance(key[0], str):
return (clone_key(key[0], seed),) + key[1:]
if isinstance(key, str):
prefix = key_split(key)
return prefix + "-" + tokenize(key, seed)
raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
| |
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import traceback
import logging
from time import time
import click
import sqlparse
from prompt_toolkit import CommandLineInterface, Application, AbortAction
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.shortcuts import create_default_layout, create_eventloop
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Always, HasFocus, IsDone
from prompt_toolkit.layout.processors import (ConditionalProcessor,
HighlightMatchingBracketProcessor)
from prompt_toolkit.history import FileHistory
from pygments.lexers.sql import PostgresLexer
from pygments.token import Token
from .packages.tabulate import tabulate
from .packages.expanded import expanded_table
from .packages.pgspecial.main import (PGSpecial, NO_QUERY)
import pgcli.packages.pgspecial as special
from .pgcompleter import PGCompleter
from .pgtoolbar import create_toolbar_tokens_func
from .pgstyle import style_factory
from .pgexecute import PGExecute
from .pgbuffer import PGBuffer
from .config import write_default_config, load_config
from .key_bindings import pgcli_bindings
from .encodingutils import utf8tounicode
from .__init__ import __version__
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from getpass import getuser
from psycopg2 import OperationalError
from collections import namedtuple
# Query tuples are used for maintaining history
Query = namedtuple('Query', ['query', 'successful', 'mutating'])
class PGCli(object):
def __init__(self, force_passwd_prompt=False, never_passwd_prompt=False,
pgexecute=None, pgclirc_file=None):
self.force_passwd_prompt = force_passwd_prompt
self.never_passwd_prompt = never_passwd_prompt
self.pgexecute = pgexecute
from pgcli import __file__ as package_root
package_root = os.path.dirname(package_root)
default_config = os.path.join(package_root, 'pgclirc')
write_default_config(default_config, pgclirc_file)
self.pgspecial = PGSpecial()
# Load config.
c = self.config = load_config(pgclirc_file, default_config)
self.multi_line = c['main'].as_bool('multi_line')
self.vi_mode = c['main'].as_bool('vi')
self.pgspecial.timing_enabled = c['main'].as_bool('timing')
self.table_format = c['main']['table_format']
self.syntax_style = c['main']['syntax_style']
self.cli_style = c['colors']
self.wider_completion_menu = c['main'].as_bool('wider_completion_menu')
self.logger = logging.getLogger(__name__)
self.initialize_logging()
self.query_history = []
# Initialize completer
smart_completion = c['main'].as_bool('smart_completion')
completer = PGCompleter(smart_completion, pgspecial=self.pgspecial)
self.completer = completer
self.register_special_commands()
def register_special_commands(self):
self.pgspecial.register(self.change_db, '\\c',
'\\c[onnect] database_name',
'Change to a new database.',
aliases=('use', '\\connect', 'USE'))
self.pgspecial.register(self.refresh_completions, '\\#', '\\#',
'Refresh auto-completions.', arg_type=NO_QUERY)
self.pgspecial.register(self.refresh_completions, '\\refresh', '\\refresh',
'Refresh auto-completions.', arg_type=NO_QUERY)
def change_db(self, pattern, **_):
if pattern:
db = pattern[1:-1] if pattern[0] == pattern[-1] == '"' else pattern
self.pgexecute.connect(database=db)
else:
self.pgexecute.connect()
yield (None, None, None, 'You are now connected to database "%s" as '
'user "%s"' % (self.pgexecute.dbname, self.pgexecute.user))
def initialize_logging(self):
log_file = self.config['main']['log_file']
log_level = self.config['main']['log_level']
level_map = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
handler = logging.FileHandler(os.path.expanduser(log_file))
formatter = logging.Formatter(
'%(asctime)s (%(process)d/%(threadName)s) '
'%(name)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_logger = logging.getLogger('pgcli')
root_logger.addHandler(handler)
root_logger.setLevel(level_map[log_level.upper()])
root_logger.debug('Initializing pgcli logging.')
root_logger.debug('Log file %r.', log_file)
def connect_uri(self, uri):
uri = urlparse(uri)
database = uri.path[1:] # ignore the leading fwd slash
self.connect(database, uri.hostname, uri.username,
uri.port, uri.password)
def connect(self, database='', host='', user='', port='', passwd=''):
# Connect to the database.
if not user:
user = getuser()
if not database:
database = user
# If password prompt is not forced but no password is provided, try
# getting it from environment variable.
if not self.force_passwd_prompt and not passwd:
passwd = os.environ.get('PGPASSWORD', '')
# Prompt for a password immediately if requested via the -W flag. This
# avoids wasting time trying to connect to the database and catching a
# no-password exception.
# If we successfully parsed a password from a URI, there's no need to
# prompt for it, even with the -W flag
if self.force_passwd_prompt and not passwd:
passwd = click.prompt('Password', hide_input=True,
show_default=False, type=str)
# Prompt for a password after 1st attempt to connect without a password
# fails. Don't prompt if the -w flag is supplied
auto_passwd_prompt = not passwd and not self.never_passwd_prompt
# Attempt to connect to the database.
# Note that passwd may be empty on the first attempt. If connection
# fails because of a missing password, but we're allowed to prompt for
# a password (no -w flag), prompt for a passwd and try again.
try:
try:
pgexecute = PGExecute(database, user, passwd, host, port)
except OperationalError as e:
if ('no password supplied' in utf8tounicode(e.args[0]) and
auto_passwd_prompt):
passwd = click.prompt('Password', hide_input=True,
show_default=False, type=str)
pgexecute = PGExecute(database, user, passwd, host, port)
else:
raise e
except Exception as e: # Connecting to a database could fail.
self.logger.debug('Database connection failed: %r.', e)
self.logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
exit(1)
self.pgexecute = pgexecute
def handle_editor_command(self, cli, document):
"""
Editor command is any query that is prefixed or suffixed
by a '\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\e" to edit it again.
:param cli: CommandLineInterface
:param document: Document
:return: Document
"""
while special.editor_command(document.text):
filename = special.get_filename(document.text)
sql, message = special.open_external_editor(filename,
sql=document.text)
if message:
# Something went wrong. Raise an exception and bail.
raise RuntimeError(message)
cli.current_buffer.document = Document(sql, cursor_position=len(sql))
document = cli.run(False)
continue
return document
def run_cli(self):
pgexecute = self.pgexecute
logger = self.logger
original_less_opts = self.adjust_less_opts()
completer = self.completer
self.refresh_completions()
def set_vi_mode(value):
self.vi_mode = value
key_binding_manager = pgcli_bindings(
get_vi_mode_enabled=lambda: self.vi_mode,
set_vi_mode_enabled=set_vi_mode)
print('Version:', __version__)
print('Chat: https://gitter.im/dbcli/pgcli')
print('Mail: https://groups.google.com/forum/#!forum/pgcli')
print('Home: http://pgcli.com')
def prompt_tokens(cli):
return [(Token.Prompt, '%s> ' % pgexecute.dbname)]
get_toolbar_tokens = create_toolbar_tokens_func(lambda: self.vi_mode)
layout = create_default_layout(lexer=PostgresLexer,
reserve_space_for_menu=True,
get_prompt_tokens=prompt_tokens,
get_bottom_toolbar_tokens=get_toolbar_tokens,
display_completions_in_columns=self.wider_completion_menu,
extra_input_processors=[
# Highlight matching brackets while editing.
ConditionalProcessor(
processor=HighlightMatchingBracketProcessor(chars='[](){}'),
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()),
])
history_file = self.config['main']['history_file']
buf = PGBuffer(always_multiline=self.multi_line, completer=completer,
history=FileHistory(os.path.expanduser(history_file)),
complete_while_typing=Always())
application = Application(style=style_factory(self.syntax_style, self.cli_style),
layout=layout, buffer=buf,
key_bindings_registry=key_binding_manager.registry,
on_exit=AbortAction.RAISE_EXCEPTION)
cli = CommandLineInterface(application=application,
eventloop=create_eventloop())
try:
while True:
document = cli.run()
# The reason we check here instead of inside the pgexecute is
# because we want to raise the Exit exception which will be
# caught by the try/except block that wraps the pgexecute.run()
# statement.
if quit_command(document.text):
raise EOFError
try:
document = self.handle_editor_command(cli, document)
except RuntimeError as e:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
continue
# Keep track of whether or not the query is mutating. In case
# of a multi-statement query, the overall query is considered
# mutating if any one of the component statements is mutating
mutating = False
try:
logger.debug('sql: %r', document.text)
successful = False
# Initialized to [] because res might never get initialized
# if an exception occurs in pgexecute.run(). Which causes
# finally clause to fail.
res = []
start = time()
# Run the query.
res = pgexecute.run(document.text, self.pgspecial)
duration = time() - start
successful = True
output = []
total = 0
for title, cur, headers, status in res:
logger.debug("headers: %r", headers)
logger.debug("rows: %r", cur)
logger.debug("status: %r", status)
start = time()
threshold = 1000
if (is_select(status) and
cur and cur.rowcount > threshold):
click.secho('The result set has more than %s rows.'
% threshold, fg='red')
if not click.confirm('Do you want to continue?'):
click.secho("Aborted!", err=True, fg='red')
break
formatted = format_output(title, cur, headers, status,
self.table_format,
self.pgspecial.expanded_output)
output.extend(formatted)
end = time()
total += end - start
mutating = mutating or is_mutating(status)
except KeyboardInterrupt:
# Restart connection to the database
pgexecute.connect()
logger.debug("cancelled query, sql: %r", document.text)
click.secho("cancelled query", err=True, fg='red')
except NotImplementedError:
click.secho('Not Yet Implemented.', fg="yellow")
except OperationalError as e:
reconnect = True
if ('server closed the connection' in utf8tounicode(e.args[0])):
reconnect = click.prompt('Connection reset. Reconnect (Y/n)',
show_default=False, type=bool, default=True)
if reconnect:
try:
pgexecute.connect()
click.secho('Reconnected!\nTry the command again.', fg='green')
except OperationalError as e:
click.secho(str(e), err=True, fg='red')
else:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
except Exception as e:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
else:
try:
click.echo_via_pager('\n'.join(output))
except KeyboardInterrupt:
pass
if self.pgspecial.timing_enabled:
print('Command Time: %0.03fs' % duration)
print('Format Time: %0.03fs' % total)
# Refresh the table names and column names if necessary.
if need_completion_refresh(document.text):
self.refresh_completions()
# Refresh search_path to set default schema.
if need_search_path_refresh(document.text):
logger.debug('Refreshing search path')
completer.set_search_path(pgexecute.search_path())
logger.debug('Search path: %r', completer.search_path)
query = Query(document.text, successful, mutating)
self.query_history.append(query)
except EOFError:
print ('Goodbye!')
finally: # Reset the less opts back to original.
logger.debug('Restoring env var LESS to %r.', original_less_opts)
os.environ['LESS'] = original_less_opts
def adjust_less_opts(self):
less_opts = os.environ.get('LESS', '')
self.logger.debug('Original value for LESS env var: %r', less_opts)
os.environ['LESS'] = '-RXF'
return less_opts
def refresh_completions(self):
completer = self.completer
completer.reset_completions()
pgexecute = self.pgexecute
# schemata
completer.set_search_path(pgexecute.search_path())
completer.extend_schemata(pgexecute.schemata())
# tables
completer.extend_relations(pgexecute.tables(), kind='tables')
completer.extend_columns(pgexecute.table_columns(), kind='tables')
# views
completer.extend_relations(pgexecute.views(), kind='views')
completer.extend_columns(pgexecute.view_columns(), kind='views')
# functions
completer.extend_functions(pgexecute.functions())
# types
completer.extend_datatypes(pgexecute.datatypes())
# databases
completer.extend_database_names(pgexecute.databases())
return [(None, None, None, 'Auto-completions refreshed.')]
def get_completions(self, text, cursor_positition):
return self.completer.get_completions(
Document(text=text, cursor_position=cursor_positition), None)
@click.command()
# Default host is '' so psycopg2 can default to either localhost or unix socket
@click.option('-h', '--host', default='', envvar='PGHOST',
help='Host address of the postgres database.')
@click.option('-p', '--port', default=5432, help='Port number at which the '
'postgres instance is listening.', envvar='PGPORT')
@click.option('-U', '--user', envvar='PGUSER', help='User name to '
'connect to the postgres database.')
@click.option('-W', '--password', 'prompt_passwd', is_flag=True, default=False,
help='Force password prompt.')
@click.option('-w', '--no-password', 'never_prompt', is_flag=True,
default=False, help='Never prompt for password.')
@click.option('-v', '--version', is_flag=True, help='Version of pgcli.')
@click.option('-d', '--dbname', default='', envvar='PGDATABASE',
help='database name to connect to.')
@click.option('--pgclirc', default='~/.pgclirc', envvar='PGCLIRC',
help='Location of .pgclirc file.')
@click.argument('database', default=lambda: None, envvar='PGDATABASE', nargs=1)
@click.argument('username', default=lambda: None, envvar='PGUSER', nargs=1)
def cli(database, user, host, port, prompt_passwd, never_prompt, dbname,
username, version, pgclirc):
if version:
print('Version:', __version__)
sys.exit(0)
pgcli = PGCli(prompt_passwd, never_prompt, pgclirc_file=pgclirc)
# Choose which ever one has a valid value.
database = database or dbname
user = username or user
if '://' in database:
pgcli.connect_uri(database)
else:
pgcli.connect(database, host, user, port)
pgcli.logger.debug('Launch Params: \n'
'\tdatabase: %r'
'\tuser: %r'
'\thost: %r'
'\tport: %r', database, user, host, port)
pgcli.run_cli()
def format_output(title, cur, headers, status, table_format, expanded=False):
output = []
if title: # Only print the title if it's not None.
output.append(title)
if cur:
headers = [utf8tounicode(x) for x in headers]
if expanded:
output.append(expanded_table(cur, headers))
else:
output.append(tabulate(cur, headers, tablefmt=table_format,
missingval='<null>'))
if status: # Only print the status if it's not None.
output.append(status)
return output
def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
return first_token.lower() in ('alter', 'create', 'use', '\\c',
'\\connect', 'drop')
except Exception:
return False
def need_search_path_refresh(sql):
"""Determines if the search_path should be refreshed by checking if the
sql has 'set search_path'."""
return 'set search_path' in sql.lower()
def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop'])
return status.split(None, 1)[0].lower() in mutating
def is_select(status):
"""Returns true if the first word in status is 'select'."""
if not status:
return False
return status.split(None, 1)[0].lower() == 'select'
def quit_command(sql):
return (sql.strip().lower() == 'exit'
or sql.strip().lower() == 'quit'
or sql.strip() == '\q'
or sql.strip() == ':q')
if __name__ == "__main__":
cli()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import copy
from collections import OrderedDict
from . import core
from astropy.table import Table
from . import cparser
from astropy.utils.misc import _set_locale
class FastBasic(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = 'fast_basic'
_description = 'Basic table with custom delimiter using the fast C engine'
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (default_kwargs.get('header_start', 0) is not None
and user_kwargs.get('header_start', 0) is None):
raise ValueError('header_start cannot be set to None for this Reader')
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop('delimiter', ' ')
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get('comment', '# ')
self.comment = kwargs.pop('comment', '#')
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop('quotechar', '"'))
self.header_start = kwargs.pop('header_start', 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get('data_start', self.header_start
+ 1 if self.header_start is not None else 1)
self.data_start = kwargs.pop('data_start', data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError("The C reader does not allow data_start to be None")
elif self.header_start is not None and self.header_start < 0 and \
not isinstance(self, FastCommentedHeader):
raise core.ParameterError("The C reader does not allow header_start to be "
"negative except for commented-header files")
elif self.data_start < 0:
raise core.ParameterError("The C reader does not allow data_start to be negative")
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError("The C reader only supports a length-1 quote character")
elif 'converters' in self.kwargs:
raise core.ParameterError("The C reader does not support passing "
"specialized converters")
elif 'encoding' in self.kwargs:
raise core.ParameterError("The C reader does not use the encoding parameter")
elif 'Outputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Outputter parameter")
elif 'Inputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Inputter parameter")
elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop('strict_names', False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get('fast_reader', True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop('enable', None)
self.return_header_chars = fast_reader.pop('return_header_chars', False)
# Put fast_reader dict back into kwargs.
self.kwargs['fast_reader'] = fast_reader
self.engine = cparser.CParser(table, self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with _set_locale('C'):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta['__ascii_fast_reader_header_chars__'] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = OrderedDict()
if comments:
meta['comments'] = comments
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (core._is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads):
raise ValueError('Column name {!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(names))
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(self, table, output, default_kwargs,
header_output=True, output_types=False):
write_kwargs = {'delimiter': self.delimiter,
'quotechar': self.quotechar,
'strip_whitespace': self.strip_whitespace_fields,
'comment': self.write_comment
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
class FastCsv(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = 'fast_csv'
_description = 'Comma-separated values table using the fast C engine'
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({'delimiter': ',', 'comment': None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {'fill_values': [(core.masked, '')]})
class FastTab(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = 'fast_tab'
_description = 'Tab-separated values table using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t'}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
class FastNoHeader(FastBasic):
"""
This class uses the fast C engine to read tables with no header line. If
the names parameter is unspecified, the columns will be autonamed with
"col{}".
"""
_format_name = 'fast_no_header'
_description = 'Basic table with no headers using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'header_start': None, 'data_start': 0}, **kwargs)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that columns names are not included in output.
"""
self._write(table, output, {}, header_output=None)
class FastCommentedHeader(FastBasic):
"""
A faster version of the :class:`CommentedHeader` reader, which looks for
column names in a commented line. ``header_start`` denotes the index of
the header line among all commented lines and is 0 by default.
"""
_format_name = 'fast_commented_header'
_description = 'Columns name in a commented line using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({}, **kwargs)
# Mimic CommentedHeader's behavior in which data_start
# is relative to header_start if unspecified; see #2692
if 'data_start' not in kwargs:
self.data_start = 0
def make_table(self, data, comments):
"""
Actually make the output table give the data and comments. This is
slightly different from the base FastBasic method in the way comments
are handled.
"""
meta = OrderedDict()
if comments:
idx = self.header_start
if idx < 0:
idx = len(comments) + idx
meta['comments'] = comments[:idx] + comments[idx+1:] # noqa
if not meta['comments']:
del meta['comments']
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def _read_header(self):
tmp = self.engine.source
commented_lines = []
for line in tmp.splitlines():
line = line.lstrip()
if line and line[0] == self.comment: # line begins with a comment
commented_lines.append(line[1:])
if len(commented_lines) == self.header_start + 1:
break
if len(commented_lines) <= self.header_start:
raise cparser.CParserError('not enough commented lines')
self.engine.setup_tokenizer([commented_lines[self.header_start]])
self.engine.header_start = 0
self.engine.read_header()
self.engine.setup_tokenizer(tmp)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that column names are commented.
"""
self._write(table, output, {}, header_output='comment')
class FastRdb(FastBasic):
"""
A faster version of the :class:`Rdb` reader. This format is similar to
tab-delimited, but it also contains a header line after the column
name line denoting the type of each column (N for numeric, S for string).
"""
_format_name = 'fast_rdb'
_description = 'Tab-separated with a type definition header line'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t', 'data_start': 2}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
def _read_header(self):
tmp = self.engine.source
line1 = ''
line2 = ''
for line in tmp.splitlines():
# valid non-comment line
if not line1 and line.strip() and line.lstrip()[0] != self.comment:
line1 = line
elif not line2 and line.strip() and line.lstrip()[0] != self.comment:
line2 = line
break
else: # less than 2 lines in table
raise ValueError('RDB header requires 2 lines')
# Tokenize the two header lines separately.
# Each call to self.engine.read_header by default
# - calls _deduplicate_names to ensure unique header_names
# - sets self.names from self.header_names if not provided as kwarg
# - applies self.include_names/exclude_names to self.names.
# For parsing the types disable 1+3, but self.names needs to be set.
self.engine.setup_tokenizer([line2])
self.engine.header_start = 0
self.engine.read_header(deduplicate=False, filter_names=False)
types = self.engine.get_header_names()
# If no kwarg names have been passed, reset to have column names read from header line 1.
if types == self.engine.get_names():
self.engine.set_names([])
self.engine.setup_tokenizer([line1])
# Get full list of column names prior to applying include/exclude_names,
# which have to be applied to the unique name set after deduplicate.
self.engine.read_header(deduplicate=True, filter_names=False)
col_names = self.engine.get_names()
self.engine.read_header(deduplicate=False)
if len(col_names) != len(types):
raise core.InconsistentTableError('RDB header mismatch between number of '
'column names and column types')
# If columns have been removed via include/exclude_names, extract matching types.
if len(self.engine.get_names()) != len(types):
types = [types[col_names.index(n)] for n in self.engine.get_names()]
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types):
raise core.InconsistentTableError('RDB type definitions do not all match '
'[num](N|S): {}'.format(types))
try_int = {}
try_float = {}
try_string = {}
for name, col_type in zip(self.engine.get_names(), types):
if col_type[-1].lower() == 's':
try_int[name] = 0
try_float[name] = 0
try_string[name] = 1
else:
try_int[name] = 1
try_float[name] = 1
try_string[name] = 0
self.engine.setup_tokenizer(tmp)
return (try_int, try_float, try_string)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` to
output a line with column types after the column name line.
"""
self._write(table, output, {}, output_types=True)
| |
# coding: utf-8
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
# In[3]:
from keras.utils import np_utils
from keras.models import Sequential
from keras.callbacks import EarlyStopping, History, ModelCheckpoint
from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda
from keras.layers.normalization import BatchNormalization
# In[16]:
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
# In[8]:
import numpy as np
# In[9]:
train_features = np.load('train_preprocesed.npy')
valid_features = np.load('valid_preprocessed.npy')
# In[10]:
train_dir = "new_train/"
valid_dir = "new_valid/"
# In[11]:
classes = os.listdir(train_dir)
# In[12]:
# Get the labels
train_labels = []
for c in classes:
l = [c]*len(os.listdir(train_dir+c+'/'))
train_labels.extend(l)
# In[25]:
len(train_labels)
# In[17]:
valid_labels = []
for c in classes:
l = [c]*len(os.listdir(valid_dir+c+'/'))
valid_labels.extend(l)
# In[18]:
onehot_train = to_categorical(LabelEncoder().fit_transform(train_labels))
# In[19]:
onehot_valid = to_categorical(LabelEncoder().fit_transform(valid_labels))
# In[20]:
vgg16_base = VGG16(include_top=False, weights='imagenet',
input_tensor=None, input_shape=(150, 150,3))
# Note that the preprocessing of InceptionV3 is:
# (x / 255 - 0.5) x 2
print('Adding new layers...')
output = vgg16_base.get_layer(index = -1).output
output = Flatten()(output)
# let's add a fully-connected layer
output = Dense(4096,activation = "relu")(output)
output = BatchNormalization()(output)
output = Dropout(0.5)(output)
output = Dense(512,activation = "relu")(output)
output = BatchNormalization()(output)
output = Dropout(0.5)(output)
# and a logistic layer -- let's say we have 200 classes
output = Dense(8, activation='softmax')(output)
vgg16_model = Model(vgg16_base.input, output)
#InceptionV3_model.summary()
# In[ ]:
for layer in vgg16_model.layers[:19]:
layer.trainable = False
# In[21]:
vgg16_model.compile(optimizer="adam",loss="categorical_crossentropy",metrics =["accuracy"])
# In[35]:
train_datagen = ImageDataGenerator(
shear_range=0.1,
zoom_range=0.1,
rotation_range=10.,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
val_datagen = ImageDataGenerator()
# In[38]:
callbacks = EarlyStopping(monitor='val_loss', patience=1, verbose=1, mode='auto')
# autosave best Model
best_model_file = "./data_augmented_weights.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
# In[39]:
history = vgg16_model.fit_generator(train_datagen.flow(train_features, onehot_train, batch_size=10), nb_epoch=5,
samples_per_epoch = 3019,
validation_data=val_datagen.flow(valid_features,onehot_valid,batch_size=10,shuffle=False),
nb_val_samples=758,callbacks = [callbacks,best_model])
# In[34]:
#model.load_weights("batch_normalized_weights.h5")
# In[ ]:
# summarize history for accuracy
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(history.history['acc']); plt.plot(history.history['val_acc']);
plt.title('model accuracy'); plt.ylabel('accuracy');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(history.history['loss']); plt.plot(history.history['val_loss']);
plt.title('model loss'); plt.ylabel('loss');
plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left');
plt.show()
# In[17]:
test_features = np.load("test_features.npy")
# In[18]:
test_preds = model.predict_proba(test_features, verbose=1)
# In[19]:
test_preds[0:5]
# In[21]:
submission1 = pd.DataFrame(test_preds, columns= os.listdir(train_dir))
test_files = os.listdir("test_stg1/test_stg1/")
submission1.insert(0, 'image', test_files)
submission1.head()
# In[27]:
clipped_preds = np.clip(test_preds,(1-0.82)/7,0.82)
submission2 = pd.DataFrame(clipped_preds, columns= os.listdir("train/train/"))
submission2.insert(0, 'image', test_files)
submission2.head()
# In[28]:
submission2.to_csv("batch_normalized.csv",index = False)
# In[ ]:
| |
"""Tests for Vizio config flow."""
from contextlib import asynccontextmanager
from datetime import timedelta
import logging
from typing import Any, Dict, List, Optional
import pytest
from pytest import raises
from pyvizio.api.apps import AppConfig
from pyvizio.const import (
DEVICE_CLASS_SPEAKER as VIZIO_DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV as VIZIO_DEVICE_CLASS_TV,
INPUT_APPS,
MAX_VOLUME,
UNKNOWN_APP,
)
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
DEVICE_CLASS_SPEAKER,
DEVICE_CLASS_TV,
DOMAIN as MP_DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.components.vizio import validate_apps
from homeassistant.components.vizio.const import (
CONF_ADDITIONAL_CONFIGS,
CONF_APPS,
CONF_VOLUME_STEP,
DOMAIN,
VIZIO_SCHEMA,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .const import (
ADDITIONAL_APP_CONFIG,
APP_LIST,
CURRENT_APP,
CURRENT_APP_CONFIG,
CURRENT_EQ,
CURRENT_INPUT,
CUSTOM_CONFIG,
ENTITY_ID,
EQ_LIST,
INPUT_LIST,
INPUT_LIST_WITH_APPS,
MOCK_SPEAKER_APPS_FAILURE,
MOCK_SPEAKER_CONFIG,
MOCK_TV_APPS_FAILURE,
MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG,
MOCK_TV_WITH_EXCLUDE_CONFIG,
MOCK_TV_WITH_INCLUDE_CONFIG,
MOCK_USER_VALID_TV_CONFIG,
NAME,
UNIQUE_ID,
UNKNOWN_APP_CONFIG,
VOLUME_STEP,
)
from tests.async_mock import call, patch
from tests.common import MockConfigEntry, async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
async def _add_config_entry_to_hass(
hass: HomeAssistantType, config_entry: MockConfigEntry
) -> None:
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
def _get_ha_power_state(vizio_power_state: Optional[bool]) -> str:
"""Return HA power state given Vizio power state."""
if vizio_power_state:
return STATE_ON
if vizio_power_state is False:
return STATE_OFF
return STATE_UNAVAILABLE
def _assert_sources_and_volume(attr: Dict[str, Any], vizio_device_class: str) -> None:
"""Assert source list, source, and volume level based on attr dict and device class."""
assert attr["source_list"] == INPUT_LIST
assert attr["source"] == CURRENT_INPUT
assert (
attr["volume_level"]
== float(int(MAX_VOLUME[vizio_device_class] / 2))
/ MAX_VOLUME[vizio_device_class]
)
def _get_attr_and_assert_base_attr(
hass: HomeAssistantType, device_class: str, power_state: str
) -> Dict[str, Any]:
"""Return entity attributes after asserting name, device class, and power state."""
attr = hass.states.get(ENTITY_ID).attributes
assert attr["friendly_name"] == NAME
assert attr["device_class"] == device_class
assert hass.states.get(ENTITY_ID).state == power_state
return attr
@asynccontextmanager
async def _cm_for_test_setup_without_apps(
all_settings: Dict[str, Any], vizio_power_state: Optional[bool]
) -> None:
"""Context manager to setup test for Vizio devices without including app specific patches."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_all_settings",
return_value=all_settings,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_setting_options",
return_value=EQ_LIST,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=vizio_power_state,
):
yield
async def _test_setup_tv(
hass: HomeAssistantType, vizio_power_state: Optional[bool]
) -> None:
"""Test Vizio TV entity setup."""
ha_power_state = _get_ha_power_state(vizio_power_state)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_USER_VALID_TV_CONFIG),
unique_id=UNIQUE_ID,
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2), "mute": "Off"},
vizio_power_state,
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, ha_power_state)
if ha_power_state == STATE_ON:
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_TV)
assert "sound_mode" not in attr
async def _test_setup_speaker(
hass: HomeAssistantType, vizio_power_state: Optional[bool]
) -> None:
"""Test Vizio Speaker entity setup."""
ha_power_state = _get_ha_power_state(vizio_power_state)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_SPEAKER_CONFIG),
unique_id=UNIQUE_ID,
)
async with _cm_for_test_setup_without_apps(
{
"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_SPEAKER] / 2),
"mute": "Off",
"eq": CURRENT_EQ,
},
vizio_power_state,
):
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
) as service_call:
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(
hass, DEVICE_CLASS_SPEAKER, ha_power_state
)
if ha_power_state == STATE_ON:
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_SPEAKER)
assert not service_call.called
assert "sound_mode" in attr
@asynccontextmanager
async def _cm_for_test_setup_tv_with_apps(
hass: HomeAssistantType, device_config: Dict[str, Any], app_config: Dict[str, Any]
) -> None:
"""Context manager to setup test for Vizio TV with support for apps."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=vol.Schema(VIZIO_SCHEMA)(device_config), unique_id=UNIQUE_ID
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2), "mute": "Off"}, True,
):
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
return_value=AppConfig(**app_config),
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, STATE_ON)
assert (
attr["volume_level"]
== float(int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2))
/ MAX_VOLUME[VIZIO_DEVICE_CLASS_TV]
)
yield
def _assert_source_list_with_apps(
list_to_test: List[str], attr: Dict[str, Any]
) -> None:
"""Assert source list matches list_to_test after removing INPUT_APPS from list."""
for app_to_remove in INPUT_APPS:
if app_to_remove in list_to_test:
list_to_test.remove(app_to_remove)
assert attr["source_list"] == list_to_test
async def _test_setup_failure(hass: HomeAssistantType, config: str) -> None:
"""Test generic Vizio entity setup failure."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=False,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=config, unique_id=UNIQUE_ID)
await _add_config_entry_to_hass(hass, config_entry)
assert len(hass.states.async_entity_ids(MP_DOMAIN)) == 0
async def _test_service(
hass: HomeAssistantType,
vizio_func_name: str,
ha_service_name: str,
additional_service_data: Optional[Dict[str, Any]],
*args,
**kwargs,
) -> None:
"""Test generic Vizio media player entity service."""
service_data = {ATTR_ENTITY_ID: ENTITY_ID}
if additional_service_data:
service_data.update(additional_service_data)
with patch(
f"homeassistant.components.vizio.media_player.VizioAsync.{vizio_func_name}"
) as service_call:
await hass.services.async_call(
MP_DOMAIN, ha_service_name, service_data=service_data, blocking=True,
)
assert service_call.called
if args or kwargs:
assert service_call.call_args == call(*args, **kwargs)
async def test_speaker_on(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when on."""
await _test_setup_speaker(hass, True)
async def test_speaker_off(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when off."""
await _test_setup_speaker(hass, False)
async def test_speaker_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio Speaker entity setup when unavailable."""
await _test_setup_speaker(hass, None)
async def test_init_tv_on(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when on."""
await _test_setup_tv(hass, True)
async def test_init_tv_off(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when off."""
await _test_setup_tv(hass, False)
async def test_init_tv_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when unavailable."""
await _test_setup_tv(hass, None)
async def test_setup_failure_speaker(
hass: HomeAssistantType, vizio_connect: pytest.fixture
) -> None:
"""Test speaker entity setup failure."""
await _test_setup_failure(hass, MOCK_SPEAKER_CONFIG)
async def test_setup_failure_tv(
hass: HomeAssistantType, vizio_connect: pytest.fixture
) -> None:
"""Test TV entity setup failure."""
await _test_setup_failure(hass, MOCK_USER_VALID_TV_CONFIG)
async def test_services(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test all Vizio media player entity services."""
await _test_setup_tv(hass, True)
await _test_service(hass, "pow_on", SERVICE_TURN_ON, None)
await _test_service(hass, "pow_off", SERVICE_TURN_OFF, None)
await _test_service(
hass, "mute_on", SERVICE_VOLUME_MUTE, {ATTR_MEDIA_VOLUME_MUTED: True}
)
await _test_service(
hass, "mute_off", SERVICE_VOLUME_MUTE, {ATTR_MEDIA_VOLUME_MUTED: False}
)
await _test_service(
hass, "set_input", SERVICE_SELECT_SOURCE, {ATTR_INPUT_SOURCE: "USB"}, "USB"
)
await _test_service(hass, "vol_up", SERVICE_VOLUME_UP, None)
await _test_service(hass, "vol_down", SERVICE_VOLUME_DOWN, None)
await _test_service(
hass, "vol_up", SERVICE_VOLUME_SET, {ATTR_MEDIA_VOLUME_LEVEL: 1}
)
await _test_service(
hass, "vol_down", SERVICE_VOLUME_SET, {ATTR_MEDIA_VOLUME_LEVEL: 0}
)
await _test_service(hass, "ch_up", SERVICE_MEDIA_NEXT_TRACK, None)
await _test_service(hass, "ch_down", SERVICE_MEDIA_PREVIOUS_TRACK, None)
await _test_service(
hass, "set_setting", SERVICE_SELECT_SOUND_MODE, {ATTR_SOUND_MODE: "Music"}
)
async def test_options_update(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test when config entry update event fires."""
await _test_setup_speaker(hass, True)
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.options
new_options = config_entry.options.copy()
updated_options = {CONF_VOLUME_STEP: VOLUME_STEP}
new_options.update(updated_options)
hass.config_entries.async_update_entry(
entry=config_entry, options=new_options,
)
assert config_entry.options == updated_options
await _test_service(hass, "vol_up", SERVICE_VOLUME_UP, None, num=VOLUME_STEP)
async def _test_update_availability_switch(
hass: HomeAssistantType,
initial_power_state: Optional[bool],
final_power_state: Optional[bool],
caplog: pytest.fixture,
) -> None:
now = dt_util.utcnow()
future_interval = timedelta(minutes=1)
# Setup device as if time is right now
with patch("homeassistant.util.dt.utcnow", return_value=now):
await _test_setup_speaker(hass, initial_power_state)
# Clear captured logs so that only availability state changes are captured for
# future assertion
caplog.clear()
# Fast forward time to future twice to trigger update and assert vizio log message
for i in range(1, 3):
future = now + (future_interval * i)
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=final_power_state,
), patch("homeassistant.util.dt.utcnow", return_value=future), patch(
"homeassistant.util.utcnow", return_value=future
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
if final_power_state is None:
assert hass.states.get(ENTITY_ID).state == STATE_UNAVAILABLE
else:
assert hass.states.get(ENTITY_ID).state != STATE_UNAVAILABLE
# Ensure connection status messages from vizio.media_player appear exactly once
# (on availability state change)
vizio_log_list = [
log
for log in caplog.records
if log.name == "homeassistant.components.vizio.media_player"
]
assert len(vizio_log_list) == 1
async def test_update_unavailable_to_available(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device becomes available after being unavailable."""
await _test_update_availability_switch(hass, None, True, caplog)
async def test_update_available_to_unavailable(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device becomes unavailable after being available."""
await _test_update_availability_switch(hass, True, None, caplog)
async def test_setup_with_apps(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_LIST), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
await _test_service(
hass,
"launch_app",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: CURRENT_APP},
CURRENT_APP,
)
async def test_setup_with_apps_include(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["include"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_TV_WITH_INCLUDE_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + [CURRENT_APP]), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
async def test_setup_with_apps_exclude(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["exclude"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_TV_WITH_EXCLUDE_CONFIG, CURRENT_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + [CURRENT_APP]), attr)
assert CURRENT_APP in attr["source_list"]
assert attr["source"] == CURRENT_APP
assert attr["app_name"] == CURRENT_APP
assert "app_id" not in attr
async def test_setup_with_apps_additional_apps_config(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps and apps["additional_configs"] in config."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG, ADDITIONAL_APP_CONFIG["config"],
):
attr = hass.states.get(ENTITY_ID).attributes
assert attr["source_list"].count(CURRENT_APP) == 1
_assert_source_list_with_apps(
list(
INPUT_LIST_WITH_APPS
+ APP_LIST
+ [
app["name"]
for app in MOCK_TV_WITH_ADDITIONAL_APPS_CONFIG[CONF_APPS][
CONF_ADDITIONAL_CONFIGS
]
if app["name"] not in APP_LIST
]
),
attr,
)
assert ADDITIONAL_APP_CONFIG["name"] in attr["source_list"]
assert attr["source"] == ADDITIONAL_APP_CONFIG["name"]
assert attr["app_name"] == ADDITIONAL_APP_CONFIG["name"]
assert "app_id" not in attr
await _test_service(
hass,
"launch_app",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: "Netflix"},
"Netflix",
)
await _test_service(
hass,
"launch_app_config",
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: CURRENT_APP},
**CUSTOM_CONFIG,
)
# Test that invalid app does nothing
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.launch_app"
) as service_call1, patch(
"homeassistant.components.vizio.media_player.VizioAsync.launch_app_config"
) as service_call2:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_SELECT_SOURCE,
service_data={ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "_"},
blocking=True,
)
assert not service_call1.called
assert not service_call2.called
def test_invalid_apps_config(hass: HomeAssistantType):
"""Test that schema validation fails on certain conditions."""
with raises(vol.Invalid):
vol.Schema(vol.All(VIZIO_SCHEMA, validate_apps))(MOCK_TV_APPS_FAILURE)
with raises(vol.Invalid):
vol.Schema(vol.All(VIZIO_SCHEMA, validate_apps))(MOCK_SPEAKER_APPS_FAILURE)
async def test_setup_with_unknown_app_config(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps where app config returned is unknown."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, UNKNOWN_APP_CONFIG
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_LIST), attr)
assert attr["source"] == UNKNOWN_APP
assert attr["app_name"] == UNKNOWN_APP
assert attr["app_id"] == UNKNOWN_APP_CONFIG
async def test_setup_with_no_running_app(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update_with_apps: pytest.fixture,
caplog: pytest.fixture,
) -> None:
"""Test device setup with apps where no app is running."""
async with _cm_for_test_setup_tv_with_apps(
hass, MOCK_USER_VALID_TV_CONFIG, vars(AppConfig())
):
attr = hass.states.get(ENTITY_ID).attributes
_assert_source_list_with_apps(list(INPUT_LIST_WITH_APPS + APP_LIST), attr)
assert attr["source"] == "CAST"
assert "app_id" not in attr
assert "app_name" not in attr
async def test_setup_tv_without_mute(
hass: HomeAssistantType,
vizio_connect: pytest.fixture,
vizio_update: pytest.fixture,
) -> None:
"""Test Vizio TV entity setup when mute property isn't returned by Vizio API."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=vol.Schema(VIZIO_SCHEMA)(MOCK_USER_VALID_TV_CONFIG),
unique_id=UNIQUE_ID,
)
async with _cm_for_test_setup_without_apps(
{"volume": int(MAX_VOLUME[VIZIO_DEVICE_CLASS_TV] / 2)}, STATE_ON,
):
await _add_config_entry_to_hass(hass, config_entry)
attr = _get_attr_and_assert_base_attr(hass, DEVICE_CLASS_TV, STATE_ON)
_assert_sources_and_volume(attr, VIZIO_DEVICE_CLASS_TV)
assert "sound_mode" not in attr
assert "is_volume_muted" not in attr
| |
#!/usr/bin/env python
# coding : utf-8
"""
GUI interface for VOI analyzer.
"""
import wx
import os
import numpy as np
import pandas as pd
from base import _analysis
import utils as utils
class VOIAnalyzerGUI(wx.Frame):
""" Frame for VOIAnalyzer.
"""
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY,
"VOIAnalyzer",
size=(400, 500))
# Panel
self.panel = VOIAnalyzerGUIPanel(self, wx.ID_ANY,
size=(400, 500))
# Layout
layout = wx.BoxSizer(wx.VERTICAL)
layout.Add(self.panel, flag=wx.EXPAND)
self.SetSizer(layout)
self.Show()
class VOIAnalyzerGUIPanel(wx.Panel):
""" Panel for VOIAnalyzerGUI.
"""
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
self.parent = self.GetParent()
# Border size
self.bdsize = 5
# List for image to extract
self.listbox_img = wx.ListBox(self, wx.ID_ANY,
style=wx.LB_HSCROLL|wx.LB_NEEDED_SB|wx.LB_EXTENDED)
# Buttons to control image list
self.button_plus_img = wx.BitmapButton(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_PLUS))
self.button_minus_img = wx.BitmapButton(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_MINUS))
font4imgbutton = wx.Font(18, wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL)
self.button_plus_img.SetFont(font4imgbutton)
self.button_minus_img.SetFont(font4imgbutton)
self.button_plus_img.Bind(wx.EVT_BUTTON, self.OnPushButton_img_plus)
self.button_minus_img.Bind(wx.EVT_BUTTON, self.OnPushButton_img_minus)
# Layout for buttons to control image
layout_imgbutton = wx.BoxSizer(wx.VERTICAL)
layout_imgbutton.Add(self.button_plus_img,
flag=wx.EXPAND)
layout_imgbutton.Add(self.button_minus_img,
flag=wx.EXPAND)
# Layout for image list
box_imglist = wx.StaticBox(self, wx.ID_ANY, "Images to analyze")
layout_imglist = wx.StaticBoxSizer(box_imglist, wx.HORIZONTAL)
layout_imglist.Add(self.listbox_img, flag=wx.EXPAND,
proportion=12)
layout_imglist.Add(layout_imgbutton, flag=wx.EXPAND,
proportion=1)
# Text control for VOI map file
style4text = wx.TE_READONLY|wx.HSCROLL|wx.TE_RIGHT
self.text_voi = wx.TextCtrl(self, wx.ID_ANY,
style=style4text)
# Button to open VOI map file
self.button_voi = wx.BitmapButton(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
self.button_voi.Bind(wx.EVT_BUTTON, self.OnPushButton_VOI)
# Layout for VOI map
box_voi = wx.StaticBox(self, wx.ID_ANY, "VOI map")
layout_voi = wx.StaticBoxSizer(box_voi, wx.HORIZONTAL)
layout_voi.Add(self.text_voi, flag=wx.EXPAND,
proportion=12)
layout_voi.Add(self.button_voi,
proportion=1)
# Text control for table
self.text_tab = wx.TextCtrl(self, wx.ID_ANY,
style=style4text)
# Button to open output file
self.button_tab = wx.BitmapButton(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
self.button_tab.Bind(wx.EVT_BUTTON, self.OnPushButton_out)
# Layout for output file
box_tab = wx.StaticBox(self, wx.ID_ANY,
"Output CSV file")
layout_tab = wx.StaticBoxSizer(box_tab, wx.HORIZONTAL)
layout_tab.Add(self.text_tab, flag=wx.EXPAND,
proportion=12)
layout_tab.Add(self.button_tab,
proportion=1)
# Text control for VOI look-up table file
self.text_lut = wx.TextCtrl(self, wx.ID_ANY,
style=style4text)
# Button to open VOI look-up table file
self.button_lut = wx.BitmapButton(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
self.button_lut.Bind(wx.EVT_BUTTON, self.OnPushButton_LUT)
# Layout for VOI map
box_lut = wx.StaticBox(self, wx.ID_ANY,
"Look-up table (optional)")
layout_lut = wx.StaticBoxSizer(box_lut, wx.HORIZONTAL)
layout_lut.Add(self.text_lut, flag=wx.EXPAND,
proportion=12)
layout_lut.Add(self.button_lut,
proportion=1)
# Button to analyze and close
self.button_analyze = wx.Button(self, wx.ID_ANY, "Analyze")
self.button_analyze.Bind(wx.EVT_BUTTON, self.OnPushButton_analyze)
self.button_analyze.Disable()
self.button_close = wx.Button(self, wx.ID_ANY, "Close")
self.button_close.Bind(wx.EVT_BUTTON, self.OnPushButton_close)
# Layout for buttons to analyze
layout_ana = wx.BoxSizer(wx.HORIZONTAL)
layout_ana.Add(self.button_analyze)
layout_ana.Add(self.button_close)
# Layout for panel
layout_main = wx.BoxSizer(wx.VERTICAL)
layout_main.Add(layout_imglist, flag=wx.EXPAND,
proportion=5)
layout_main.Add(layout_voi, flag=wx.EXPAND,
proportion=2)
layout_main.Add(layout_tab, flag=wx.EXPAND,
proportion=2)
layout_main.Add(layout_lut, flag=wx.EXPAND,
proportion=2)
layout_main.Add(layout_ana, flag=wx.ALIGN_RIGHT,
proportion=1)
self.SetSizer(layout_main)
def check_enable(self):
""" Check enable to analyze.
"""
isEnable = self.listbox_img.GetCount() > 0 and self.text_voi and self.text_voi
if isEnable:
self.button_analyze.Enable()
else:
self.button_analyze.Disable()
return None
def OnPushButton_img_plus(self, evt):
""" Callback for plus button
"""
# File dialog
dlg = wx.FileDialog(self, style=wx.FD_OPEN|wx.FD_MULTIPLE|wx.FD_FILE_MUST_EXIST, wildcard="NIfTI image (.nii)|*.nii")
res = dlg.ShowModal()
if res == wx.ID_OK:
flist = dlg.GetPaths()
# Append files
[self.listbox_img.Append(f) for f in flist]
dlg.Destroy()
self.check_enable()
return None
def OnPushButton_img_minus(self, evt):
""" Callback for minus button
"""
idx_selected = self.listbox_img.GetSelections()
[self.listbox_img.Delete(idx) for idx in reversed(idx_selected)]
self.check_enable()
return None
def open_file(self, style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST, **kwargs):
""" Pop-up dialog to open file.
"""
dlg = wx.FileDialog(self, style=style, **kwargs)
res = dlg.ShowModal()
if res == wx.ID_OK:
fname = dlg.GetFilename()
dirname = dlg.GetDirectory()
fpath = os.path.join(dirname, fname)
else:
fpath = None
dlg.Destroy()
return fpath
def OnPushButton_VOI(self, evt):
""" Callback for open button for VOI.
"""
# Open dialog
fpath = self.open_file(message="Open VOI map file",
wildcard="NIfTI image (.nii)|*.nii")
if fpath is not None:
self.text_voi.SetValue(fpath)
self.check_enable()
return None
def OnPushButton_out(self, evt):
""" Callback for open button for look-up table file.
"""
# Open dialog
fpath = self.open_file(message="Select file to output",
style=wx.FD_SAVE)
if fpath is not None:
self.text_tab.SetValue(fpath)
self.check_enable()
return None
def OnPushButton_LUT(self, evt):
""" Callback for open button for look-up table file.
"""
# Open dialog
fpath = self.open_file(message="Open VOI look-up table file")
if fpath is not None:
self.text_lut.SetValue(fpath)
return None
def OnPushButton_analyze(self, evt):
""" Callback for analyze button.
"""
# Image files
img_list = self.listbox_img.GetItems()
# VOI map
voi_file = self.text_voi.GetValue()
voi_mat, aff = utils.loadImage(voi_file)[:2]
voi_mat = voi_mat.astype(np.int16)
vno_list = np.unique(voi_mat)
nVOI = vno_list.size
# Volume per voxel (unit: cm3)
vol_per_vox = np.abs(np.prod(np.diag(aff[:3, :3])))
# Maximum progress
progress_max = nVOI * len(img_list) + 1
cur_progress = 0
# Output file
out_file = self.text_tab.GetValue()
# Show progress dialog
self.progress = wx.ProgressDialog("Analyzing...",
"Initiate analysis",
maximum=progress_max,
parent=self)
#self.progress.ShowModal()
self.progress.Show()
# VOI analysis
tab_list = []
for img_path in img_list:
img_mat, img_aff = utils.loadImage(img_path)[:2]
# Check shape
if not np.all(img_aff == aff):
dlg = wx.MessageDialog(self,
"Image orientation and size must be same as VOI map",
"Caution",
wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
self.progress.Destroy()
return None
img_file = os.path.basename(img_path)
for vno in vno_list:
# Progress
msg = "Extracting value on VOI #{0:d}, {1:s}".format(vno, img_file)
self.progress.Update(cur_progress,
newmsg=msg)
# Extract
tab0 = _analysis(img_mat, voi_mat, vno)
tab0.loc[:, "Path"] = [img_file]
tab_list.append(tab0)
cur_progress += 1
out_tab = pd.concat(tab_list)
# Calculate volumes (unit: cm3)
self.progress.Update(cur_progress,
newmsg="Calculating VOI volumes")
out_tab.loc[:, "Volume"] = out_tab.loc[:, "No. of voxels"].values * vol_per_vox / 1000.
cur_progress += 1
# LUT file
self.progress.Update(cur_progress,
newmsg="Applying look-up table")
col_list = ["VOI No.", "No. of voxels",
"Mean", "SD", "CoV",
"Max", "Min", "Volume"]
if not self.text_lut.IsEmpty:
lut_file = self.text_lut.GetValue()
lut = utils.loadLUT(lut_file)
out_tab.loc[:, "VOI"] = out_tab.loc[:, "VOI No."].map(lut)
col_list.append("VOI")
col_list.append("Path")
cur_progress += 1
# Output
out_tab = out_tab.loc[:, col_list]
out_tab.to_csv(out_file, index=False)
self.progress.Update(progress_max,
newmsg="Complete.")
dlg = wx.MessageDialog(self,
"Complete.",
"Message",
wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
return None
def OnPushButton_close(self, evt):
""" Callback for close button.
"""
self.parent.Destroy()
self.Destroy()
if __name__ == "__main__":
app = wx.App(False)
VOIAnalyzerGUI()
app.MainLoop()
| |
"""Test kytos.core.auth module."""
import asyncio
import base64
import hashlib
from unittest import TestCase
from unittest.mock import Mock, patch
from kytos.core import Controller
from kytos.core.auth import Auth
from kytos.core.config import KytosConfig
KYTOS_CORE_API = "http://127.0.0.1:8181/api/kytos/"
API_URI = KYTOS_CORE_API+"core"
STOREHOUSE_API_URI = KYTOS_CORE_API+"storehouse/v1/kytos.core.auth.users"
# pylint: disable=unused-argument
class TestAuth(TestCase):
"""Auth tests."""
def setUp(self):
"""Instantiate a controller and an Auth."""
self.patched_events = [] # {'event_name': box_object}
self.server_name_url = 'http://localhost:8181/api/kytos'
self.controller = self._get_controller_mock()
self.auth = Auth(self.controller)
self.username, self.password = self._create_super_user()
self.token = self._get_token()
self.user_data = {
"username": "authtempuser",
"email": "temp@kytos.io",
"password": "password",
}
def _patch_event_trigger(self, event):
"""Patch event callback trigger."""
for patched_event in self.patched_events:
box = patched_event.get(event.content.get('callback').__name__)
event.content.get('callback')(None, box, None)
def _get_controller_mock(self):
"""Return a controller mock."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
options = KytosConfig().options['daemon']
options.jwt_secret = 'jwt_secret'
controller = Controller(options, loop=loop)
controller.log = Mock()
# Patch event callback trigger.
controller.buffers.app.put = self._patch_event_trigger
return controller
@staticmethod
def get_auth_test_client(auth):
"""Return a flask api test client."""
return auth.controller.api_server.app.test_client()
@patch('kytos.core.auth.Auth._create_superuser')
def _create_super_user(self, mock_username=None):
"""Create a superuser to integration test."""
username = "test"
password = "password"
email = "test@kytos.io"
mock_username.return_value.get_username.return_value = username
mock_username.return_value.get_email.return_value = email
self.auth._create_superuser() # pylint: disable=protected-access
return username, password
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def _get_token(self, mock_jwt_secret=None):
"""Make a request to get a token to be used in tests."""
box = Mock()
box.data = {
# "password" digested
'password': 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e073'
'94c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103'
'fd07c95385ffab0cacbc86'
}
header = {
"Authorization": "Basic "
+ base64.b64encode(
bytes(self.username + ":" + self.password, "ascii")
).decode("ascii")
}
# Patch _find_user_callback event callback.
self.patched_events.append({'_find_user_callback': box})
url = "%s/auth/login/" % API_URI
api = self.get_auth_test_client(self.auth)
success_response = api.open(url, method='GET', headers=header)
json_response = success_response.json
return json_response["token"]
def _validate_schema(self, my_dict, check_against):
"""Check if a dict respects a given schema."""
for key, value in check_against.items():
if isinstance(value, dict):
return self._validate_schema(my_dict[key], value)
if not isinstance(my_dict[key], value):
return False
return True
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_01_login_request(self, mock_jwt_secret):
"""Test auth login endpoint."""
valid_header = {
"Authorization": "Basic "
+ base64.b64encode(
bytes(self.username + ":" + self.password, "ascii")
).decode("ascii")
}
invalid_header = {
"Authorization": "Basic "
+ base64.b64encode(
bytes("nonexistent" + ":" + "nonexistent", "ascii")
).decode("ascii")
}
box = Mock()
box.data = {
# "password" digested
'password': 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e073'
'94c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103'
'fd07c95385ffab0cacbc86'
}
# Patch _find_user_callback event callback.
self.patched_events.append({'_find_user_callback': box})
url = "%s/auth/login/" % API_URI
api = self.get_auth_test_client(self.auth)
success_response = api.open(url, method='GET', headers=valid_header)
error_response = api.open(url, method='GET', headers=invalid_header)
self.assertEqual(success_response.status_code, 200)
self.assertEqual(error_response.status_code, 401)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_02_list_users_request(self, mock_jwt_secret):
"""Test auth list users endpoint."""
valid_header = {"Authorization": "Bearer %s" % self.token}
invalid_header = {"Authorization": "Bearer invalidtoken"}
schema = {"users": list}
password = "password".encode()
# Patch _list_users_callback event callback.
event_boxes = [self.user_data,
{"username": "authtempuser2",
"email": "tempuser2@kytos.io",
"password": hashlib.sha512(password).hexdigest()}]
self.patched_events.append({'_list_users_callback': event_boxes})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/" % API_URI
success_response = api.open(url, method='GET', headers=valid_header)
error_response = api.open(url, method='GET', headers=invalid_header)
is_valid = self._validate_schema(success_response.json, schema)
self.assertEqual(success_response.status_code, 200)
self.assertEqual(error_response.status_code, 401)
self.assertTrue(is_valid)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_03_create_user_request(self, mock_jwt_secret):
"""Test auth create user endpoint."""
header = {"Authorization": "Bearer %s" % self.token}
# Patch _create_user_callback event callback.
self.patched_events.append({'_create_user_callback': self.user_data})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/" % API_URI
success_response = api.open(url, method='POST', json=self.user_data,
headers=header)
self.assertEqual(success_response.status_code, 200)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_03_create_user_request_error(self, mock_jwt_secret):
"""Test auth create user endpoint."""
header = {"Authorization": "Bearer %s" % self.token}
# Patch _create_user_callback event callback.
self.patched_events.append({'_create_user_callback': None})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/" % API_URI
error_response = api.open(url, method='POST', json=self.user_data,
headers=header)
self.assertEqual(error_response.status_code, 409)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_04_list_user_request(self, mock_jwt_secret):
"""Test auth list user endpoint."""
valid_header = {"Authorization": "Bearer %s" % self.token}
schema = {"data": {"email": str, "username": str}}
box = Mock()
box.data = self.user_data
self.patched_events.append({'_find_user_callback': box})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, self.user_data.get("username"))
success_response = api.open(url, method='GET', headers=valid_header)
is_valid = self._validate_schema(success_response.json, schema)
self.assertEqual(success_response.status_code, 200)
self.assertTrue(is_valid)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_04_list_user_request_error(self, mock_jwt_secret):
"""Test auth list user endpoint."""
valid_header = {"Authorization": "Bearer %s" % self.token}
self.patched_events.append({'_find_user_callback': None})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, 'user3')
error_response = api.open(url, method='GET', headers=valid_header)
self.assertEqual(error_response.status_code, 404)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_05_update_user_request(self, mock_jwt_secret):
"""Test auth update user endpoint."""
valid_header = {"Authorization": "Bearer %s" % self.token}
data = {"email": "newemail_tempuser@kytos.io"}
self.patched_events.append({'_update_user_callback': data})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, self.user_data.get("username"))
success_response = api.open(url, method='PATCH', json=data,
headers=valid_header)
self.assertEqual(success_response.status_code, 200)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_05_update_user_request_error(self, mock_jwt_secret):
"""Test auth update user endpoint."""
valid_header = {"Authorization": "Bearer %s" % self.token}
self.patched_events.append({'_update_user_callback': None})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, 'user5')
error_response = api.open(url, method='PATCH', json={},
headers=valid_header)
self.assertEqual(error_response.status_code, 404)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_06_delete_user_request(self, mock_jwt_secret):
"""Test auth delete user endpoint."""
header = {"Authorization": "Bearer %s" % self.token}
# Patch _delete_user_callback event callback.
self.patched_events.append({'_delete_user_callback': self.user_data})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, self.user_data.get("username"))
success_response = api.open(url, method='DELETE', headers=header)
self.assertEqual(success_response.status_code, 200)
@patch('kytos.core.auth.Auth.get_jwt_secret', return_value="abc")
def test_06_delete_user_request_error(self, mock_jwt_secret):
"""Test auth delete user endpoint."""
header = {"Authorization": "Bearer %s" % self.token}
# Patch _delete_user_callback event callback.
self.patched_events.append({'_delete_user_callback': None})
api = self.get_auth_test_client(self.auth)
url = "%s/auth/users/%s" % (API_URI, "nonexistent")
success_response = api.open(url, method='DELETE', headers=header)
self.assertEqual(success_response.status_code, 404)
| |
"""
fs.s3fs
=======
**Currently only avaiable on Python2 due to boto not being available for Python3**
FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem
interface for objects stored in Amazon Simple Storage Service (S3).
"""
import os
import datetime
import tempfile
from fnmatch import fnmatch
import stat as statinfo
import boto.s3.connection
from boto.s3.prefix import Prefix
from boto.exception import S3ResponseError
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.remote import *
from fs.filelike import LimitBytesFile
# Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"):
thread_local = threading.local
else:
class thread_local(object):
def __init__(self):
self._map = {}
def __getattr__(self,attr):
try:
return self._map[(threading.currentThread(),attr)]
except KeyError:
raise AttributeError, attr
def __setattr__(self,attr,value):
self._map[(threading.currentThread(),attr)] = value
class S3FS(FS):
"""A filesystem stored in Amazon S3.
This class provides the FS interface for files stored in Amazon's Simple
Storage Service (S3). It should be instantiated with the name of the
S3 bucket to use, and optionally a prefix under which the files should
be stored.
Local temporary files are used when opening files from this filesystem,
and any changes are only pushed back into S3 when the files are closed
or flushed.
"""
_meta = { 'thread_safe' : True,
'virtual': False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True,
'atomic.move' : True,
'atomic.copy' : True,
'atomic.makedir' : True,
'atomic.rename' : False,
'atomic.setconetns' : True
}
class meta:
PATH_MAX = None
NAME_MAX = None
def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_synchronize=True, key_sync_timeout=1):
"""Constructor for S3FS objects.
S3FS objects require the name of the S3 bucket in which to store
files, and can optionally be given a prefix under which the files
should be stored. The AWS public and private keys may be specified
as additional arguments; if they are not specified they will be
read from the two environment variables AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY.
The keyword argument 'key_sync_timeout' specifies the maximum
time in seconds that the filesystem will spend trying to confirm
that a newly-uploaded S3 key is available for reading. For no
timeout set it to zero. To disable these checks entirely (and
thus reduce the filesystem's consistency guarantees to those of
S3's "eventual consistency" model) set it to None.
By default the path separator is "/", but this can be overridden
by specifying the keyword 'separator' in the constructor.
"""
self._bucket_name = bucket
self._access_keys = (aws_access_key,aws_secret_key)
self._separator = separator
self._key_sync_timeout = key_sync_timeout
# Normalise prefix to this form: path/to/files/
prefix = normpath(prefix)
while prefix.startswith(separator):
prefix = prefix[1:]
if not prefix.endswith(separator) and prefix != "":
prefix = prefix + separator
if isinstance(prefix,unicode):
prefix = prefix.encode("utf8")
if aws_access_key is None:
if "AWS_ACCESS_KEY_ID" not in os.environ:
raise CreateFailedError("AWS_ACCESS_KEY_ID not set")
if aws_secret_key is None:
if "AWS_SECRET_ACCESS_KEY" not in os.environ:
raise CreateFailedError("AWS_SECRET_ACCESS_KEY not set")
self._prefix = prefix
self._tlocal = thread_local()
super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
# Make _s3conn and _s3bukt properties that are created on demand,
# since they cannot be stored during pickling.
def _s3conn(self):
try:
(c,ctime) = self._tlocal.s3conn
if time.time() - ctime > 60:
raise AttributeError
return c
except AttributeError:
c = boto.s3.connection.S3Connection(*self._access_keys)
self._tlocal.s3conn = (c,time.time())
return c
_s3conn = property(_s3conn)
def _s3bukt(self):
try:
(b,ctime) = self._tlocal.s3bukt
if time.time() - ctime > 60:
raise AttributeError
return b
except AttributeError:
try:
b = self._s3conn.get_bucket(self._bucket_name, validate=True)
except S3ResponseError, e:
if "404 Not Found" not in str(e):
raise
b = self._s3conn.create_bucket(self._bucket_name)
self._tlocal.s3bukt = (b,time.time())
return b
_s3bukt = property(_s3bukt)
def __getstate__(self):
state = super(S3FS,self).__getstate__()
del state['_tlocal']
return state
def __setstate__(self,state):
super(S3FS,self).__setstate__(state)
self._tlocal = thread_local()
def __repr__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args
__str__ = __repr__
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
path = relpath(normpath(path))
path = self._separator.join(iteratepath(path))
s3path = self._prefix + path
if s3path and s3path[-1] == self._separator:
s3path = s3path[:-1]
if isinstance(s3path,unicode):
s3path = s3path.encode("utf8")
return s3path
def _uns3path(self,s3path,roots3path=None):
"""Get the local path for a file stored in S3.
This is essentially the opposite of self._s3path().
"""
if roots3path is None:
roots3path = self._s3path("")
i = len(roots3path)
return s3path[i:]
def _sync_key(self,k):
"""Synchronise on contents of the given key.
Since S3 only offers "eventual consistency" of data, it is possible
to create a key but be unable to read it back straight away. This
method works around that limitation by polling the key until it reads
back the value expected by the given key.
Note that this could easily fail if the key is modified by another
program, meaning the content will never be as specified in the given
key. This is the reason for the timeout argument to the construtcor.
"""
timeout = self._key_sync_timeout
if timeout is None:
return k
k2 = self._s3bukt.get_key(k.name)
t = time.time()
while k2 is None or k2.etag != k.etag:
if timeout > 0:
if t + timeout < time.time():
break
time.sleep(0.1)
k2 = self._s3bukt.get_key(k.name)
return k2
def _sync_set_contents(self,key,contents):
"""Synchronously set the contents of a key."""
if isinstance(key,basestring):
key = self._s3bukt.new_key(key)
if isinstance(contents,basestring):
key.set_contents_from_string(contents)
elif hasattr(contents,"md5"):
hexmd5 = contents.md5
b64md5 = hexmd5.decode("hex").encode("base64").strip()
key.set_contents_from_file(contents,md5=(hexmd5,b64md5))
else:
try:
contents.seek(0)
except (AttributeError,EnvironmentError):
tf = tempfile.TemporaryFile()
data = contents.read(524288)
while data:
tf.write(data)
data = contents.read(524288)
tf.seek(0)
key.set_contents_from_file(tf)
else:
key.set_contents_from_file(contents)
return self._sync_key(key)
def makepublic(self, path):
"""Mark given path as publicly accessible using HTTP(S)"""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
k.make_public()
def getpathurl(self, path, allow_none=False, expires=3600):
"""Returns a url that corresponds to the given path."""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and \
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants ]
url = k.generate_url(expires, force_http=is_public)
if url == None:
if not allow_none:
raise NoPathURLError(path=path)
return None
if is_public:
# Strip time token; it has no sense for public resource
url = url.split('?')[0]
return url
def setcontents(self, path, data, chunk_size=64*1024):
s3path = self._s3path(path)
self._sync_set_contents(s3path, data)
def open(self,path,mode="r"):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary file
so that it can be worked on efficiently. Any changes made to the
file are only sent back to S3 when the file is flushed or closed.
"""
if self.isdir(path):
raise ResourceInvalidError(path)
s3path = self._s3path(path)
# Truncate the file if requested
if "w" in mode:
k = self._sync_set_contents(s3path,"")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
# Create the file if it's missing
if "w" not in mode and "a" not in mode:
raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
raise ParentDirectoryMissingError(path)
k = self._sync_set_contents(s3path,"")
# Make sure nothing tries to read past end of socket data
f = LimitBytesFile(k.size,k,"r")
# For streaming reads, return the key object directly
if mode == "r-":
return f
# For everything else, use a RemoteFileBuffer.
# This will take care of closing the socket when it's done.
return RemoteFileBuffer(self,path,mode,f)
def exists(self,path):
"""Check whether a path exists."""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
# The root directory always exists
if self._prefix.startswith(s3path):
return True
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
# A regular file
if _eq_utf8(k.name,s3path):
return True
# A directory
if _eq_utf8(k.name,s3pathD):
return True
return False
def isdir(self,path):
"""Check whether a path exists and is a directory."""
s3path = self._s3path(path) + self._separator
# Root is always a directory
if s3path == "/" or s3path == self._prefix:
return True
# Use a list request so that we return true if there are any files
# in that directory. This avoids requiring a special file for the
# the directory itself, which other tools may not create.
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
try:
iter(ks).next()
except StopIteration:
return False
else:
return True
def isfile(self,path):
"""Check whether a path exists and is a regular file."""
s3path = self._s3path(path)
# Root is never a file
if self._prefix.startswith(s3path):
return False
k = self._s3bukt.get_key(s3path)
if k is not None:
return True
return False
def listdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
return list(self.ilistdir(path,wildcard,full,absolute,
dirs_only,files_only))
def listdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
return list(self.ilistdirinfo(path,wildcard,full,absolute,
dirs_only,files_only))
def ilistdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return (nm for (nm,k) in entries)
def ilistdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return ((nm,self._get_key_info(k,nm)) for (nm,k) in entries)
def _iter_keys(self,path):
"""Iterator over keys contained in the given directory.
This generator yields (name,key) pairs for each entry in the given
directory. If the path is not a directory, it raises the approprate
error.
"""
s3path = self._s3path(path) + self._separator
if s3path == "/":
s3path = ""
isDir = False
for k in self._s3bukt.list(prefix=s3path,delimiter=self._separator):
if not isDir:
isDir = True
# Skip over the entry for the directory itself, if it exists
name = self._uns3path(k.name,s3path)
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if name.endswith(self._separator):
name = name[:-1]
yield (name,k)
if not isDir:
if s3path != self._prefix:
if self.isfile(path):
msg = "that's not a directory: %(path)s"
raise ResourceInvalidError(path,msg=msg)
raise ResourceNotFoundError(path)
def _key_is_dir(self, k):
if isinstance(k,Prefix):
return True
if k.name.endswith(self._separator):
return True
return False
def _filter_keys(self,path,keys,wildcard,full,absolute,
dirs_only,files_only):
"""Filter out keys not matching the given criteria.
Given a (name,key) iterator as returned by _iter_keys, this method
applies the given filtering criteria and returns a filtered iterator.
"""
sep = self._separator
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
if dirs_only:
keys = ((nm,k) for (nm,k) in keys if self._key_is_dir(k))
elif files_only:
keys = ((nm,k) for (nm,k) in keys if not self._key_is_dir(k))
if wildcard is not None:
if callable(wildcard):
keys = ((nm,k) for (nm,k) in keys if wildcard(nm))
else:
keys = ((nm,k) for (nm,k) in keys if fnmatch(nm,wildcard))
if full:
return ((relpath(pathjoin(path, nm)),k) for (nm,k) in keys)
elif absolute:
return ((abspath(pathjoin(path, nm)),k) for (nm,k) in keys)
return keys
def makedir(self,path,recursive=False,allow_recreate=False):
"""Create a directory at the given path.
The 'mode' argument is accepted for compatibility with the standard
FS interface, but is currently ignored.
"""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
if s3pathD == self._prefix:
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
s3pathP = self._s3path(dirname(path))
if s3pathP:
s3pathP = s3pathP + self._separator
# Check various preconditions using list of parent dir
ks = self._s3bukt.list(prefix=s3pathP,delimiter=self._separator)
if s3pathP == self._prefix:
parentExists = True
else:
parentExists = False
for k in ks:
if not parentExists:
parentExists = True
if _eq_utf8(k.name,s3path):
# It's already a file
msg = "Destination exists as a regular file: %(path)s"
raise ResourceInvalidError(path, msg=msg)
if _eq_utf8(k.name,s3pathD):
# It's already a directory
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
# Create parent if required
if not parentExists:
if recursive:
self.makedir(dirname(path),recursive,allow_recreate)
else:
msg = "Parent directory does not exist: %(path)s"
raise ParentDirectoryMissingError(path, msg=msg)
# Create an empty file representing the directory
self._sync_set_contents(s3pathD,"")
def remove(self,path):
"""Remove the file at the given path."""
s3path = self._s3path(path)
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if _eq_utf8(k.name,s3path):
break
if _startswith_utf8(k.name,s3path + "/"):
msg = "that's not a file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
else:
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
k = self._s3bukt.get_key(s3path)
while k:
k = self._s3bukt.get_key(s3path)
def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path."""
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
s3path = self._s3path(path)
if s3path != self._prefix:
s3path = s3path + self._separator
if force:
# If we will be forcibly removing any directory contents, we
# might as well get the un-delimited list straight away.
ks = self._s3bukt.list(prefix=s3path)
else:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
# Fail if the directory is not empty, or remove them if forced
found = False
for k in ks:
found = True
if not _eq_utf8(k.name,s3path):
if not force:
raise DirectoryNotEmptyError(path)
self._s3bukt.delete_key(k.name)
if not found:
if self.isfile(path):
msg = "removedir() called on a regular file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
if path not in ("","/"):
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
if recursive and path not in ("","/"):
pdir = dirname(path)
try:
self.removedir(pdir,recursive=True,force=False)
except DirectoryNotEmptyError:
pass
def rename(self,src,dst):
"""Rename the file at 'src' to 'dst'."""
# Actually, in S3 'rename' is exactly the same as 'move'
if self.isfile(src):
self.move(src,dst)
else:
self.movedir(src,dst)
def getinfo(self,path):
s3path = self._s3path(path)
if path in ("","/"):
k = Prefix(bucket=self._s3bukt,name="/")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if isinstance(k,Prefix):
break
else:
raise ResourceNotFoundError(path)
return self._get_key_info(k,path)
def _get_key_info(self,key,name=None):
info = {}
if name is not None:
info["name"] = basename(name)
else:
info["name"] = basename(self._uns3key(k.name))
if self._key_is_dir(key):
info["st_mode"] = 0700 | statinfo.S_IFDIR
else:
info["st_mode"] = 0700 | statinfo.S_IFREG
if hasattr(key,"size"):
info['size'] = int(key.size)
etag = getattr(key,"etag",None)
if etag is not None:
if isinstance(etag,unicode):
etag = etag.encode("utf8")
info['etag'] = etag.strip('"').strip("'")
if hasattr(key,"last_modified"):
# TODO: does S3 use any other formats?
fmt = "%a, %d %b %Y %H:%M:%S %Z"
try:
mtime = datetime.datetime.strptime(key.last_modified,fmt)
info['modified_time'] = mtime
except ValueError:
pass
return info
def desc(self,path):
return "No description available"
def copy(self,src,dst,overwrite=False,chunk_size=16384):
"""Copy a file from 'src' to 'dst'.
src -- The source path
dst -- The destination path
overwrite -- If True, then the destination may be overwritten
(if a file exists at that location). If False then an exception will be
thrown if the destination exists
chunk_size -- Size of chunks to use in copy (ignored by S3)
"""
s3path_dst = self._s3path(dst)
s3path_dstD = s3path_dst + self._separator
# Check for various preconditions.
ks = self._s3bukt.list(prefix=s3path_dst,delimiter=self._separator)
dstOK = False
for k in ks:
# It exists as a regular file
if _eq_utf8(k.name,s3path_dst):
if not overwrite:
raise DestinationExistsError(dst)
dstOK = True
break
# Check if it refers to a directory. If so, we copy *into* it.
# Since S3 lists in lexicographic order, subsequent iterations
# of the loop will check for the existence of the new filename.
if _eq_utf8(k.name,s3path_dstD):
nm = basename(src)
dst = pathjoin(dirname(dst),nm)
s3path_dst = s3path_dstD + nm
dstOK = True
if not dstOK and not self.isdir(dirname(dst)):
msg = "Destination directory does not exist: %(path)s"
raise ParentDirectoryMissingError(dst,msg=msg)
# OK, now we can copy the file.
s3path_src = self._s3path(src)
try:
self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src)
except S3ResponseError, e:
if "404 Not Found" in str(e):
msg = "Source is not a file: %(path)s"
raise ResourceInvalidError(src, msg=msg)
raise e
else:
k = self._s3bukt.get_key(s3path_dst)
while k is None:
k = self._s3bukt.get_key(s3path_dst)
self._sync_key(k)
def move(self,src,dst,overwrite=False,chunk_size=16384):
"""Move a file from one location to another."""
self.copy(src,dst,overwrite=overwrite)
self._s3bukt.delete_key(self._s3path(src))
def walkfiles(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield item
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield pathjoin(path,name)
def walkinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def walkfilesinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def _eq_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1 == name2
def _startswith_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1.startswith(name2)
| |
#
#
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for QA tests.
"""
import copy
import datetime
import operator
import os
import random
import re
import socket
import subprocess
import sys
import tempfile
import yaml
try:
import functools
except ImportError, err:
raise ImportError("Python 2.5 or higher is required: %s" % err)
from ganeti import utils
from ganeti import compat
from ganeti import constants
from ganeti import ht
from ganeti import pathutils
from ganeti import vcluster
import colors
import qa_config
import qa_error
from qa_logging import FormatInfo
_MULTIPLEXERS = {}
#: Unique ID per QA run
_RUN_UUID = utils.NewUUID()
#: Path to the QA query output log file
_QA_OUTPUT = pathutils.GetLogFilename("qa-output")
_RETRIES = 3
(INST_DOWN,
INST_UP) = range(500, 502)
(FIRST_ARG,
RETURN_VALUE) = range(1000, 1002)
def _RaiseWithInfo(msg, error_desc):
"""Raises a QA error with the given content, and adds a message if present.
"""
if msg:
output = "%s: %s" % (msg, error_desc)
else:
output = error_desc
raise qa_error.Error(output)
def AssertIn(item, sequence, msg=None):
"""Raises an error when item is not in sequence.
"""
if item not in sequence:
_RaiseWithInfo(msg, "%r not in %r" % (item, sequence))
def AssertNotIn(item, sequence, msg=None):
"""Raises an error when item is in sequence.
"""
if item in sequence:
_RaiseWithInfo(msg, "%r in %r" % (item, sequence))
def AssertEqual(first, second, msg=None):
"""Raises an error when values aren't equal.
"""
if not first == second:
_RaiseWithInfo(msg, "%r == %r" % (first, second))
def AssertMatch(string, pattern, msg=None):
"""Raises an error when string doesn't match regexp pattern.
"""
if not re.match(pattern, string):
_RaiseWithInfo(msg, "%r doesn't match /%r/" % (string, pattern))
def _GetName(entity, fn):
"""Tries to get name of an entity.
@type entity: string or dict
@param fn: Function retrieving name from entity
"""
if isinstance(entity, basestring):
result = entity
else:
result = fn(entity)
if not ht.TNonEmptyString(result):
raise Exception("Invalid name '%s'" % result)
return result
def _AssertRetCode(rcode, fail, cmdstr, nodename):
"""Check the return value from a command and possibly raise an exception.
"""
if fail and rcode == 0:
raise qa_error.Error("Command '%s' on node %s was expected to fail but"
" didn't" % (cmdstr, nodename))
elif not fail and rcode != 0:
raise qa_error.Error("Command '%s' on node %s failed, exit code %s" %
(cmdstr, nodename, rcode))
def AssertCommand(cmd, fail=False, node=None, log_cmd=True, max_seconds=None):
"""Checks that a remote command succeeds.
@param cmd: either a string (the command to execute) or a list (to
be converted using L{utils.ShellQuoteArgs} into a string)
@type fail: boolean or None
@param fail: if the command is expected to fail instead of succeeding,
or None if we don't care
@param node: if passed, it should be the node on which the command
should be executed, instead of the master node (can be either a
dict or a string)
@param log_cmd: if False, the command won't be logged (simply passed to
StartSSH)
@type max_seconds: double
@param max_seconds: fail if the command takes more than C{max_seconds}
seconds
@return: the return code, stdout and stderr of the command
@raise qa_error.Error: if the command fails when it shouldn't or vice versa
"""
if node is None:
node = qa_config.GetMasterNode()
nodename = _GetName(node, operator.attrgetter("primary"))
if isinstance(cmd, basestring):
cmdstr = cmd
else:
cmdstr = utils.ShellQuoteArgs(cmd)
start = datetime.datetime.now()
popen = StartSSH(nodename, cmdstr, log_cmd=log_cmd)
# Run the command
stdout, stderr = popen.communicate()
rcode = popen.returncode
duration_seconds = TimedeltaToTotalSeconds(datetime.datetime.now() - start)
if fail is not None:
try:
_AssertRetCode(rcode, fail, cmdstr, nodename)
except:
print "Stdout was:\n%s\nStderr was:\n%s\n" % (stdout, stderr)
raise
if max_seconds is not None:
if duration_seconds > max_seconds:
raise qa_error.Error(
"Cmd '%s' took %f seconds, maximum of %f was exceeded" %
(cmdstr, duration_seconds, max_seconds))
return rcode, stdout, stderr
def AssertRedirectedCommand(cmd, fail=False, node=None, log_cmd=True):
"""Executes a command with redirected output.
The log will go to the qa-output log file in the ganeti log
directory on the node where the command is executed. The fail and
node parameters are passed unchanged to AssertCommand.
@param cmd: the command to be executed, as a list; a string is not
supported
"""
if not isinstance(cmd, list):
raise qa_error.Error("Non-list passed to AssertRedirectedCommand")
ofile = utils.ShellQuote(_QA_OUTPUT)
cmdstr = utils.ShellQuoteArgs(cmd)
AssertCommand("echo ---- $(date) %s ---- >> %s" % (cmdstr, ofile),
fail=False, node=node, log_cmd=False)
return AssertCommand(cmdstr + " >> %s" % ofile,
fail=fail, node=node, log_cmd=log_cmd)
def GetSSHCommand(node, cmd, strict=True, opts=None, tty=False,
use_multiplexer=True):
"""Builds SSH command to be executed.
@type node: string
@param node: node the command should run on
@type cmd: string
@param cmd: command to be executed in the node; if None or empty
string, no command will be executed
@type strict: boolean
@param strict: whether to enable strict host key checking
@type opts: list
@param opts: list of additional options
@type tty: boolean or None
@param tty: if we should use tty; if None, will be auto-detected
@type use_multiplexer: boolean
@param use_multiplexer: if the multiplexer for the node should be used
"""
args = ["ssh", "-oEscapeChar=none", "-oBatchMode=yes", "-lroot"]
if tty is None:
tty = sys.stdout.isatty()
if tty:
args.append("-t")
args.append("-oStrictHostKeyChecking=%s" % ("yes" if strict else "no", ))
args.append("-oClearAllForwardings=yes")
args.append("-oForwardAgent=yes")
if opts:
args.extend(opts)
if node in _MULTIPLEXERS and use_multiplexer:
spath = _MULTIPLEXERS[node][0]
args.append("-oControlPath=%s" % spath)
args.append("-oControlMaster=no")
(vcluster_master, vcluster_basedir) = \
qa_config.GetVclusterSettings()
if vcluster_master:
args.append(vcluster_master)
args.append("%s/%s/cmd" % (vcluster_basedir, node))
if cmd:
# For virtual clusters the whole command must be wrapped using the "cmd"
# script, as that script sets a number of environment variables. If the
# command contains shell meta characters the whole command needs to be
# quoted.
args.append(utils.ShellQuote(cmd))
else:
args.append(node)
if cmd:
args.append(cmd)
return args
def StartLocalCommand(cmd, _nolog_opts=False, log_cmd=True, **kwargs):
"""Starts a local command.
"""
if log_cmd:
if _nolog_opts:
pcmd = [i for i in cmd if not i.startswith("-")]
else:
pcmd = cmd
print "%s %s" % (colors.colorize("Command:", colors.CYAN),
utils.ShellQuoteArgs(pcmd))
return subprocess.Popen(cmd, shell=False, **kwargs)
def StartSSH(node, cmd, strict=True, log_cmd=True):
"""Starts SSH.
"""
return StartLocalCommand(GetSSHCommand(node, cmd, strict=strict),
_nolog_opts=True, log_cmd=log_cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def StartMultiplexer(node):
"""Starts a multiplexer command.
@param node: the node for which to open the multiplexer
"""
if node in _MULTIPLEXERS:
return
# Note: yes, we only need mktemp, since we'll remove the file anyway
sname = tempfile.mktemp(prefix="ganeti-qa-multiplexer.")
utils.RemoveFile(sname)
opts = ["-N", "-oControlPath=%s" % sname, "-oControlMaster=yes"]
print "Created socket at %s" % sname
child = StartLocalCommand(GetSSHCommand(node, None, opts=opts))
_MULTIPLEXERS[node] = (sname, child)
def CloseMultiplexers():
"""Closes all current multiplexers and cleans up.
"""
for node in _MULTIPLEXERS.keys():
(sname, child) = _MULTIPLEXERS.pop(node)
utils.KillProcess(child.pid, timeout=10, waitpid=True)
utils.RemoveFile(sname)
def _GetCommandStdout(proc):
"""Extract the stored standard error, print it and return it.
"""
out = proc.stdout.read()
sys.stdout.write(out)
return out
def _NoTimeout(state):
"""False iff the command timed out."""
rcode, out = state
return rcode == 0 or not ('TimeoutError' in out or 'timed out' in out)
def GetCommandOutput(node, cmd, tty=False, use_multiplexer=True, log_cmd=True,
fail=False):
"""Returns the output of a command executed on the given node.
@type node: string
@param node: node the command should run on
@type cmd: string
@param cmd: command to be executed in the node (cannot be empty or None)
@type tty: bool or None
@param tty: if we should use tty; if None, it will be auto-detected
@type use_multiplexer: bool
@param use_multiplexer: if the SSH multiplexer provided by the QA should be
used or not
@type log_cmd: bool
@param log_cmd: if the command should be logged
@type fail: bool
@param fail: whether the command is expected to fail
"""
assert cmd
def CallCommand():
command = GetSSHCommand(node, cmd, tty=tty,
use_multiplexer=use_multiplexer)
p = StartLocalCommand(command, stdout=subprocess.PIPE, log_cmd=log_cmd)
rcode = p.wait()
out = _GetCommandStdout(p)
return rcode, out
# TODO: make retries configurable
rcode, out = utils.CountRetry(_NoTimeout, CallCommand, _RETRIES)
_AssertRetCode(rcode, fail, cmd, node)
return out
def GetObjectInfo(infocmd):
"""Get and parse information about a Ganeti object.
@type infocmd: list of strings
@param infocmd: command to be executed, e.g. ["gnt-cluster", "info"]
@return: the information parsed, appropriately stored in dictionaries,
lists...
"""
master = qa_config.GetMasterNode()
cmdline = utils.ShellQuoteArgs(infocmd)
info_out = GetCommandOutput(master.primary, cmdline)
return yaml.load(info_out)
def UploadFile(node, src):
"""Uploads a file to a node and returns the filename.
Caller needs to remove the returned file on the node when it's not needed
anymore.
"""
# Make sure nobody else has access to it while preserving local permissions
mode = os.stat(src).st_mode & 0700
cmd = ('tmp=$(mktemp --tmpdir gnt.XXXXXX) && '
'chmod %o "${tmp}" && '
'[[ -f "${tmp}" ]] && '
'cat > "${tmp}" && '
'echo "${tmp}"') % mode
f = open(src, "r")
try:
p = subprocess.Popen(GetSSHCommand(node, cmd), shell=False, stdin=f,
stdout=subprocess.PIPE)
AssertEqual(p.wait(), 0)
# Return temporary filename
return _GetCommandStdout(p).strip()
finally:
f.close()
def UploadData(node, data, mode=0600, filename=None):
"""Uploads data to a node and returns the filename.
Caller needs to remove the returned file on the node when it's not needed
anymore.
"""
if filename:
tmp = "tmp=%s" % utils.ShellQuote(filename)
else:
tmp = ('tmp=$(mktemp --tmpdir gnt.XXXXXX) && '
'chmod %o "${tmp}"') % mode
cmd = ("%s && "
"[[ -f \"${tmp}\" ]] && "
"cat > \"${tmp}\" && "
"echo \"${tmp}\"") % tmp
p = subprocess.Popen(GetSSHCommand(node, cmd), shell=False,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(data)
p.stdin.close()
AssertEqual(p.wait(), 0)
# Return temporary filename
return _GetCommandStdout(p).strip()
def BackupFile(node, path):
"""Creates a backup of a file on the node and returns the filename.
Caller needs to remove the returned file on the node when it's not needed
anymore.
"""
vpath = MakeNodePath(node, path)
cmd = ("tmp=$(mktemp .gnt.XXXXXX --tmpdir=$(dirname %s)) && "
"[[ -f \"$tmp\" ]] && "
"cp %s $tmp && "
"echo $tmp") % (utils.ShellQuote(vpath), utils.ShellQuote(vpath))
# Return temporary filename
result = GetCommandOutput(node, cmd).strip()
print "Backup filename: %s" % result
return result
def ResolveInstanceName(instance):
"""Gets the full name of an instance.
@type instance: string
@param instance: Instance name
"""
info = GetObjectInfo(["gnt-instance", "info", instance])
return info[0]["Instance name"]
def ResolveNodeName(node):
"""Gets the full name of a node.
"""
info = GetObjectInfo(["gnt-node", "info", node.primary])
return info[0]["Node name"]
def GetNodeInstances(node, secondaries=False):
"""Gets a list of instances on a node.
"""
master = qa_config.GetMasterNode()
node_name = ResolveNodeName(node)
# Get list of all instances
cmd = ["gnt-instance", "list", "--separator=:", "--no-headers",
"--output=name,pnode,snodes"]
output = GetCommandOutput(master.primary, utils.ShellQuoteArgs(cmd))
instances = []
for line in output.splitlines():
(name, pnode, snodes) = line.split(":", 2)
if ((not secondaries and pnode == node_name) or
(secondaries and node_name in snodes.split(","))):
instances.append(name)
return instances
def _SelectQueryFields(rnd, fields):
"""Generates a list of fields for query tests.
"""
# Create copy for shuffling
fields = list(fields)
rnd.shuffle(fields)
# Check all fields
yield fields
yield sorted(fields)
# Duplicate fields
yield fields + fields
# Check small groups of fields
while fields:
yield [fields.pop() for _ in range(rnd.randint(2, 10)) if fields]
def _List(listcmd, fields, names):
"""Runs a list command.
"""
master = qa_config.GetMasterNode()
cmd = [listcmd, "list", "--separator=|", "--no-headers",
"--output", ",".join(fields)]
if names:
cmd.extend(names)
return GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd)).splitlines()
def GenericQueryTest(cmd, fields, namefield="name", test_unknown=True):
"""Runs a number of tests on query commands.
@param cmd: Command name
@param fields: List of field names
"""
rnd = random.Random(hash(cmd))
fields = list(fields)
rnd.shuffle(fields)
# Test a number of field combinations
for testfields in _SelectQueryFields(rnd, fields):
AssertRedirectedCommand([cmd, "list", "--output", ",".join(testfields)])
if namefield is not None:
namelist_fn = compat.partial(_List, cmd, [namefield])
# When no names were requested, the list must be sorted
names = namelist_fn(None)
AssertEqual(names, utils.NiceSort(names))
# When requesting specific names, the order must be kept
revnames = list(reversed(names))
AssertEqual(namelist_fn(revnames), revnames)
randnames = list(names)
rnd.shuffle(randnames)
AssertEqual(namelist_fn(randnames), randnames)
if test_unknown:
# Listing unknown items must fail
AssertCommand([cmd, "list", "this.name.certainly.does.not.exist"],
fail=True)
# Check exit code for listing unknown field
rcode, _, _ = AssertRedirectedCommand([cmd, "list",
"--output=field/does/not/exist"],
fail=True)
AssertEqual(rcode, constants.EXIT_UNKNOWN_FIELD)
def GenericQueryFieldsTest(cmd, fields):
master = qa_config.GetMasterNode()
# Listing fields
AssertRedirectedCommand([cmd, "list-fields"])
AssertRedirectedCommand([cmd, "list-fields"] + fields)
# Check listed fields (all, must be sorted)
realcmd = [cmd, "list-fields", "--separator=|", "--no-headers"]
output = GetCommandOutput(master.primary,
utils.ShellQuoteArgs(realcmd)).splitlines()
AssertEqual([line.split("|", 1)[0] for line in output],
utils.NiceSort(fields))
# Check exit code for listing unknown field
rcode, _, _ = AssertCommand([cmd, "list-fields", "field/does/not/exist"],
fail=True)
AssertEqual(rcode, constants.EXIT_UNKNOWN_FIELD)
def AddToEtcHosts(hostnames):
"""Adds hostnames to /etc/hosts.
@param hostnames: List of hostnames first used A records, all other CNAMEs
"""
master = qa_config.GetMasterNode()
tmp_hosts = UploadData(master.primary, "", mode=0644)
data = []
for localhost in ("::1", "127.0.0.1"):
data.append("%s %s" % (localhost, " ".join(hostnames)))
try:
AssertCommand("{ cat %s && echo -e '%s'; } > %s && mv %s %s" %
(utils.ShellQuote(pathutils.ETC_HOSTS),
"\\n".join(data),
utils.ShellQuote(tmp_hosts),
utils.ShellQuote(tmp_hosts),
utils.ShellQuote(pathutils.ETC_HOSTS)))
except Exception:
AssertCommand(["rm", "-f", tmp_hosts])
raise
def RemoveFromEtcHosts(hostnames):
"""Remove hostnames from /etc/hosts.
@param hostnames: List of hostnames first used A records, all other CNAMEs
"""
master = qa_config.GetMasterNode()
tmp_hosts = UploadData(master.primary, "", mode=0644)
quoted_tmp_hosts = utils.ShellQuote(tmp_hosts)
sed_data = " ".join(hostnames)
try:
AssertCommand((r"sed -e '/^\(::1\|127\.0\.0\.1\)\s\+%s/d' %s > %s"
r" && mv %s %s") %
(sed_data, utils.ShellQuote(pathutils.ETC_HOSTS),
quoted_tmp_hosts, quoted_tmp_hosts,
utils.ShellQuote(pathutils.ETC_HOSTS)))
except Exception:
AssertCommand(["rm", "-f", tmp_hosts])
raise
def RunInstanceCheck(instance, running):
"""Check if instance is running or not.
"""
instance_name = _GetName(instance, operator.attrgetter("name"))
script = qa_config.GetInstanceCheckScript()
if not script:
return
master_node = qa_config.GetMasterNode()
# Build command to connect to master node
master_ssh = GetSSHCommand(master_node.primary, "--")
if running:
running_shellval = "1"
running_text = ""
else:
running_shellval = ""
running_text = "not "
print FormatInfo("Checking if instance '%s' is %srunning" %
(instance_name, running_text))
args = [script, instance_name]
env = {
"PATH": constants.HOOKS_PATH,
"RUN_UUID": _RUN_UUID,
"MASTER_SSH": utils.ShellQuoteArgs(master_ssh),
"INSTANCE_NAME": instance_name,
"INSTANCE_RUNNING": running_shellval,
}
result = os.spawnve(os.P_WAIT, script, args, env)
if result != 0:
raise qa_error.Error("Instance check failed with result %s" % result)
def _InstanceCheckInner(expected, instarg, args, result):
"""Helper function used by L{InstanceCheck}.
"""
if instarg == FIRST_ARG:
instance = args[0]
elif instarg == RETURN_VALUE:
instance = result
else:
raise Exception("Invalid value '%s' for instance argument" % instarg)
if expected in (INST_DOWN, INST_UP):
RunInstanceCheck(instance, (expected == INST_UP))
elif expected is not None:
raise Exception("Invalid value '%s'" % expected)
def InstanceCheck(before, after, instarg):
"""Decorator to check instance status before and after test.
@param before: L{INST_DOWN} if instance must be stopped before test,
L{INST_UP} if instance must be running before test, L{None} to not check.
@param after: L{INST_DOWN} if instance must be stopped after test,
L{INST_UP} if instance must be running after test, L{None} to not check.
@param instarg: L{FIRST_ARG} to use first argument to test as instance (a
dictionary), L{RETURN_VALUE} to use return value (disallows pre-checks)
"""
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
_InstanceCheckInner(before, instarg, args, NotImplemented)
result = fn(*args, **kwargs)
_InstanceCheckInner(after, instarg, args, result)
return result
return wrapper
return decorator
def GetNonexistentGroups(count):
"""Gets group names which shouldn't exist on the cluster.
@param count: Number of groups to get
@rtype: integer
"""
return GetNonexistentEntityNames(count, "groups", "group")
def GetNonexistentEntityNames(count, name_config, name_prefix):
"""Gets entity names which shouldn't exist on the cluster.
The actualy names can refer to arbitrary entities (for example
groups, networks).
@param count: Number of names to get
@rtype: integer
@param name_config: name of the leaf in the config containing
this entity's configuration, including a 'inexistent-'
element
@rtype: string
@param name_prefix: prefix of the entity's names, used to compose
the default values; for example for groups, the prefix is
'group' and the generated names are then group1, group2, ...
@rtype: string
"""
entities = qa_config.get(name_config, {})
default = [name_prefix + str(i) for i in range(count)]
assert count <= len(default)
name_config_inexistent = "inexistent-" + name_config
candidates = entities.get(name_config_inexistent, default)[:count]
if len(candidates) < count:
raise Exception("At least %s non-existent %s are needed" %
(count, name_config))
return candidates
def MakeNodePath(node, path):
"""Builds an absolute path for a virtual node.
@type node: string or L{qa_config._QaNode}
@param node: Node
@type path: string
@param path: Path without node-specific prefix
"""
(_, basedir) = qa_config.GetVclusterSettings()
if isinstance(node, basestring):
name = node
else:
name = node.primary
if basedir:
assert path.startswith("/")
return "%s%s" % (vcluster.MakeNodeRoot(basedir, name), path)
else:
return path
def _GetParameterOptions(specs):
"""Helper to build policy options."""
values = ["%s=%s" % (par, val)
for (par, val) in specs.items()]
return ",".join(values)
def TestSetISpecs(new_specs=None, diff_specs=None, get_policy_fn=None,
build_cmd_fn=None, fail=False, old_values=None):
"""Change instance specs for an object.
At most one of new_specs or diff_specs can be specified.
@type new_specs: dict
@param new_specs: new complete specs, in the same format returned by
L{ParseIPolicy}.
@type diff_specs: dict
@param diff_specs: partial specs, it can be an incomplete specifications, but
if min/max specs are specified, their number must match the number of the
existing specs
@type get_policy_fn: function
@param get_policy_fn: function that returns the current policy as in
L{ParseIPolicy}
@type build_cmd_fn: function
@param build_cmd_fn: function that return the full command line from the
options alone
@type fail: bool
@param fail: if the change is expected to fail
@type old_values: tuple
@param old_values: (old_policy, old_specs), as returned by
L{ParseIPolicy}
@return: same as L{ParseIPolicy}
"""
assert get_policy_fn is not None
assert build_cmd_fn is not None
assert new_specs is None or diff_specs is None
if old_values:
(old_policy, old_specs) = old_values
else:
(old_policy, old_specs) = get_policy_fn()
if diff_specs:
new_specs = copy.deepcopy(old_specs)
if constants.ISPECS_MINMAX in diff_specs:
AssertEqual(len(new_specs[constants.ISPECS_MINMAX]),
len(diff_specs[constants.ISPECS_MINMAX]))
for (new_minmax, diff_minmax) in zip(new_specs[constants.ISPECS_MINMAX],
diff_specs[constants.ISPECS_MINMAX]):
for (key, parvals) in diff_minmax.items():
for (par, val) in parvals.items():
new_minmax[key][par] = val
for (par, val) in diff_specs.get(constants.ISPECS_STD, {}).items():
new_specs[constants.ISPECS_STD][par] = val
if new_specs:
cmd = []
if (diff_specs is None or constants.ISPECS_MINMAX in diff_specs):
minmax_opt_items = []
for minmax in new_specs[constants.ISPECS_MINMAX]:
minmax_opts = []
for key in ["min", "max"]:
keyopt = _GetParameterOptions(minmax[key])
minmax_opts.append("%s:%s" % (key, keyopt))
minmax_opt_items.append("/".join(minmax_opts))
cmd.extend([
"--ipolicy-bounds-specs",
"//".join(minmax_opt_items)
])
if diff_specs is None:
std_source = new_specs
else:
std_source = diff_specs
std_opt = _GetParameterOptions(std_source.get("std", {}))
if std_opt:
cmd.extend(["--ipolicy-std-specs", std_opt])
AssertCommand(build_cmd_fn(cmd), fail=fail)
# Check the new state
(eff_policy, eff_specs) = get_policy_fn()
AssertEqual(eff_policy, old_policy)
if fail:
AssertEqual(eff_specs, old_specs)
else:
AssertEqual(eff_specs, new_specs)
else:
(eff_policy, eff_specs) = (old_policy, old_specs)
return (eff_policy, eff_specs)
def ParseIPolicy(policy):
"""Parse and split instance an instance policy.
@type policy: dict
@param policy: policy, as returned by L{GetObjectInfo}
@rtype: tuple
@return: (policy, specs), where:
- policy is a dictionary of the policy values, instance specs excluded
- specs is a dictionary containing only the specs, using the internal
format (see L{constants.IPOLICY_DEFAULTS} for an example)
"""
ret_specs = {}
ret_policy = {}
for (key, val) in policy.items():
if key == "bounds specs":
ret_specs[constants.ISPECS_MINMAX] = []
for minmax in val:
ret_minmax = {}
for key in minmax:
keyparts = key.split("/", 1)
assert len(keyparts) > 1
ret_minmax[keyparts[0]] = minmax[key]
ret_specs[constants.ISPECS_MINMAX].append(ret_minmax)
elif key == constants.ISPECS_STD:
ret_specs[key] = val
else:
ret_policy[key] = val
return (ret_policy, ret_specs)
def UsesIPv6Connection(host, port):
"""Returns True if the connection to a given host/port could go through IPv6.
"""
return any(t[0] == socket.AF_INET6 for t in socket.getaddrinfo(host, port))
def TimedeltaToTotalSeconds(td):
"""Returns the total seconds in a C{datetime.timedelta} object.
This performs the same task as the C{datetime.timedelta.total_seconds()}
method which is present in Python 2.7 onwards.
@type td: datetime.timedelta
@param td: timedelta object to convert
@rtype float
@return: total seconds in the timedelta object
"""
return ((td.microseconds + (td.seconds + td.days * 24.0 * 3600.0) * 10 ** 6) /
10 ** 6)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "digital_rf/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| |
import base64
import datetime
import json
import re
from bson import json_util, ObjectId
from celery import task
from dateutil import parser
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_delete
from django.utils.translation import ugettext as _
from onadata.apps.logger.models import Instance
from onadata.apps.logger.models import Note
from onadata.apps.restservice.utils import call_service
from onadata.libs.utils.timing import calculate_duration
from onadata.libs.utils.common_tags import ID, UUID, ATTACHMENTS, GEOLOCATION,\
SUBMISSION_TIME, MONGO_STRFTIME, BAMBOO_DATASET_ID, DELETEDAT, TAGS,\
NOTES, SUBMITTED_BY, VERSION, DURATION, START_TIME, END_TIME
from onadata.libs.utils.decorators import apply_form_field_names
from onadata.libs.utils.model_tools import queryset_iterator
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
key_whitelist = ['$or', '$and', '$exists', '$in', '$gt', '$gte',
'$lt', '$lte', '$regex', '$options', '$all']
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class ParseError(Exception):
pass
def datetime_from_str(text):
# Assumes text looks like 2011-01-01T09:50:06.966
if text is None:
return None
dt = None
try:
dt = parser.parse(text)
except Exception:
return None
return dt
def dict_for_mongo(d):
for key, value in d.items():
if type(value) == list:
value = [dict_for_mongo(e)
if type(e) == dict else e for e in value]
elif type(value) == dict:
value = dict_for_mongo(value)
elif key == '_id':
try:
d[key] = int(value)
except ValueError:
# if it is not an int don't convert it
pass
if _is_invalid_for_mongo(key):
del d[key]
d[_encode_for_mongo(key)] = value
return d
def _encode_for_mongo(key):
return reduce(lambda s, c: re.sub(c[0], base64.b64encode(c[1]), s),
[(r'^\$', '$'), (r'\.', '.')], key)
def _decode_from_mongo(key):
re_dollar = re.compile(r"^%s" % base64.b64encode("$"))
re_dot = re.compile(r"\%s" % base64.b64encode("."))
return reduce(lambda s, c: c[0].sub(c[1], s),
[(re_dollar, '$'), (re_dot, '.')], key)
def _is_invalid_for_mongo(key):
return key not in\
key_whitelist and (key.startswith('$') or key.count('.') > 0)
@task
def update_mongo_instance(record):
# since our dict always has an id, save will always result in an upsert op
# - so we dont need to worry whether its an edit or not
# http://api.mongodb.org/python/current/api/pymongo/collection.html#pymong\
# o.collection.Collection.save
try:
return xform_instances.save(record)
except Exception:
# todo: mail admins about the exception
pass
class ParsedInstance(models.Model):
USERFORM_ID = u'_userform_id'
STATUS = u'_status'
DEFAULT_LIMIT = 30000
DEFAULT_BATCHSIZE = 1000
instance = models.OneToOneField(Instance, related_name="parsed_instance")
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
# TODO: decide if decimal field is better than float field.
lat = models.FloatField(null=True)
lng = models.FloatField(null=True)
class Meta:
app_label = "viewer"
@classmethod
@apply_form_field_names
def query_mongo(cls, username, id_string, query, fields, sort, start=0,
limit=DEFAULT_LIMIT, count=False, hide_deleted=True):
fields_to_select = {cls.USERFORM_ID: 0}
# TODO: give more detailed error messages to 3rd parties
# using the API when json.loads fails
if isinstance(query, basestring):
query = json.loads(query, object_hook=json_util.object_hook)
query = query if query else {}
query = dict_for_mongo(query)
query[cls.USERFORM_ID] = u'%s_%s' % (username, id_string)
# check if query contains and _id and if its a valid ObjectID
if '_uuid' in query and ObjectId.is_valid(query['_uuid']):
query['_uuid'] = ObjectId(query['_uuid'])
if hide_deleted:
# display only active elements
# join existing query with deleted_at_query on an $and
query = {"$and": [query, {"_deleted_at": None}]}
# fields must be a string array i.e. '["name", "age"]'
if isinstance(fields, basestring):
fields = json.loads(fields, object_hook=json_util.object_hook)
fields = fields if fields else []
# TODO: current mongo (2.0.4 of this writing)
# cant mix including and excluding fields in a single query
if type(fields) == list and len(fields) > 0:
fields_to_select = dict(
[(_encode_for_mongo(field), 1) for field in fields])
if isinstance(sort, basestring):
sort = json.loads(sort, object_hook=json_util.object_hook)
sort = sort if sort else {}
cursor = xform_instances.find(query, fields_to_select)
if count:
return [{"count": cursor.count()}]
if start < 0 or limit < 0:
raise ValueError(_("Invalid start/limit params"))
cursor.skip(start).limit(limit)
if type(sort) == dict and len(sort) == 1:
sort_key = sort.keys()[0]
# TODO: encode sort key if it has dots
sort_dir = int(sort[sort_key]) # -1 for desc, 1 for asc
cursor.sort(_encode_for_mongo(sort_key), sort_dir)
# set batch size
cursor.batch_size = cls.DEFAULT_BATCHSIZE
return cursor
@classmethod
@apply_form_field_names
def mongo_aggregate(cls, query, pipeline, hide_deleted=True):
"""Perform mongo aggregate queries
query - is a dict which is to be passed to $match, a pipeline operator
pipeline - list of dicts or dict of mongodb pipeline operators,
http://docs.mongodb.org/manual/reference/operator/aggregation-pipeline
"""
if isinstance(query, basestring):
query = json.loads(
query, object_hook=json_util.object_hook) if query else {}
if not (isinstance(pipeline, dict) or isinstance(pipeline, list)):
raise Exception(_(u"Invalid pipeline! %s" % pipeline))
if not isinstance(query, dict):
raise Exception(_(u"Invalid query! %s" % query))
query = dict_for_mongo(query)
if hide_deleted:
# display only active elements
deleted_at_query = {
"$or": [{"_deleted_at": {"$exists": False}},
{"_deleted_at": None}]}
# join existing query with deleted_at_query on an $and
query = {"$and": [query, deleted_at_query]}
k = [{'$match': query}]
if isinstance(pipeline, list):
k.extend(pipeline)
else:
k.append(pipeline)
results = xform_instances.aggregate(k)
return results['result']
@classmethod
@apply_form_field_names
def query_mongo_minimal(
cls, query, fields, sort, start=0, limit=DEFAULT_LIMIT,
count=False, hide_deleted=True):
fields_to_select = {cls.USERFORM_ID: 0}
# TODO: give more detailed error messages to 3rd parties
# using the API when json.loads fails
query = json.loads(
query, object_hook=json_util.object_hook) if query else {}
query = dict_for_mongo(query)
if hide_deleted:
# display only active elements
# join existing query with deleted_at_query on an $and
query = {"$and": [query, {"_deleted_at": None}]}
# fields must be a string array i.e. '["name", "age"]'
fields = json.loads(
fields, object_hook=json_util.object_hook) if fields else []
# TODO: current mongo (2.0.4 of this writing)
# cant mix including and excluding fields in a single query
if type(fields) == list and len(fields) > 0:
fields_to_select = dict(
[(_encode_for_mongo(field), 1) for field in fields])
sort = json.loads(
sort, object_hook=json_util.object_hook) if sort else {}
cursor = xform_instances.find(query, fields_to_select)
if count:
return [{"count": cursor.count()}]
if start < 0 or limit < 0:
raise ValueError(_("Invalid start/limit params"))
cursor.skip(start).limit(limit)
if type(sort) == dict and len(sort) == 1:
sort_key = sort.keys()[0]
# TODO: encode sort key if it has dots
sort_dir = int(sort[sort_key]) # -1 for desc, 1 for asc
cursor.sort(_encode_for_mongo(sort_key), sort_dir)
# set batch size
cursor.batch_size = cls.DEFAULT_BATCHSIZE
return cursor
def to_dict_for_mongo(self):
d = self.to_dict()
data = {
UUID: self.instance.uuid,
ID: self.instance.id,
BAMBOO_DATASET_ID: self.instance.xform.bamboo_dataset,
self.USERFORM_ID: u'%s_%s' % (
self.instance.xform.user.username,
self.instance.xform.id_string),
ATTACHMENTS: _get_attachments_from_instance(self.instance),
self.STATUS: self.instance.status,
GEOLOCATION: [self.lat, self.lng],
SUBMISSION_TIME: self.instance.date_created.strftime(
MONGO_STRFTIME),
TAGS: list(self.instance.tags.names()),
NOTES: self.get_notes(),
SUBMITTED_BY: self.instance.user.username
if self.instance.user else None,
VERSION: self.instance.version,
DURATION: self.get_duration()
}
if isinstance(self.instance.deleted_at, datetime.datetime):
data[DELETEDAT] = self.instance.deleted_at.strftime(MONGO_STRFTIME)
d.update(data)
return dict_for_mongo(d)
def get_duration(self):
data = self.instance.get_dict()
_start, _end = data.get(START_TIME, ''), data.get(END_TIME, '')
return calculate_duration(_start, _end)
def update_mongo(self, async=True):
d = self.to_dict_for_mongo()
if async:
update_mongo_instance.apply_async((), {"record": d})
else:
update_mongo_instance(d)
def to_dict(self):
if not hasattr(self, "_dict_cache"):
self._dict_cache = self.instance.get_dict()
return self._dict_cache
@classmethod
def dicts(cls, xform):
qs = cls.objects.filter(instance__xform=xform)
for parsed_instance in queryset_iterator(qs):
yield parsed_instance.to_dict()
def _get_name_for_type(self, type_value):
"""
We cannot assume that start time and end times always use the same
XPath. This is causing problems for other peoples' forms.
This is a quick fix to determine from the original XLSForm's JSON
representation what the 'name' was for a given
type_value ('start' or 'end')
"""
datadict = json.loads(self.instance.xform.json)
for item in datadict['children']:
if type(item) == dict and item.get(u'type') == type_value:
return item['name']
def get_data_dictionary(self):
# TODO: fix hack to get around a circular import
from onadata.apps.viewer.models.data_dictionary import\
DataDictionary
return DataDictionary.objects.get(
user=self.instance.xform.user,
id_string=self.instance.xform.id_string
)
data_dictionary = property(get_data_dictionary)
# TODO: figure out how much of this code should be here versus
# data_dictionary.py.
def _set_geopoint(self):
if self.instance.point:
self.lat = self.instance.point.y
self.lng = self.instance.point.x
def save(self, async=False, *args, **kwargs):
# start/end_time obsolete: originally used to approximate for
# instanceID, before instanceIDs were implemented
self.start_time = None
self.end_time = None
self._set_geopoint()
super(ParsedInstance, self).save(*args, **kwargs)
# insert into Mongo
self.update_mongo(async)
def add_note(self, note):
note = Note(instance=self.instance, note=note)
note.save()
def remove_note(self, pk):
note = self.instance.notes.get(pk=pk)
note.delete()
def get_notes(self):
notes = []
note_qs = self.instance.notes.values(
'id', 'note', 'date_created', 'date_modified')
for note in note_qs:
note['date_created'] = \
note['date_created'].strftime(MONGO_STRFTIME)
note['date_modified'] = \
note['date_modified'].strftime(MONGO_STRFTIME)
notes.append(note)
return notes
def _get_attachments_from_instance(instance):
attachments = []
for a in instance.attachments.all():
attachment = dict()
attachment['download_url'] = a.media_file.url
attachment['mimetype'] = a.mimetype
attachment['filename'] = a.media_file.name
attachment['instance'] = a.instance.pk
attachment['xform'] = instance.xform.id
attachment['id'] = a.id
attachments.append(attachment)
return attachments
def _remove_from_mongo(sender, **kwargs):
instance_id = kwargs.get('instance').instance.id
xform_instances.remove(instance_id)
pre_delete.connect(_remove_from_mongo, sender=ParsedInstance)
def rest_service_form_submission(sender, **kwargs):
parsed_instance = kwargs.get('instance')
created = kwargs.get('created')
if created:
call_service(parsed_instance)
post_save.connect(rest_service_form_submission, sender=ParsedInstance)
| |
# -*- coding: utf-8 -*-
# Taken verbatim from
# https://github.com/Pylons/webob/blob/master/tests/test_multidict.py
#
# With the GetDict tests removed
import unittest
from pyesgf import multidict
def text_(s, encoding='latin-1', errors='strict'):
# Taken verbatim from
# https://github.com/Pylons/webob/blob/master/webob/compat.py
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s
class BaseDictTests(object):
def setUp(self):
self._list = [('a', text_('\xe9')), ('a', 'e'), ('a', 'f'), ('b', '1')]
self.data = multidict.MultiDict(self._list)
self.d = self._get_instance()
def _get_instance(self, **kwargs):
if kwargs:
data = multidict.MultiDict(kwargs)
else:
data = self.data.copy()
return self.klass(data)
def test_len(self):
self.assertEqual(len(self.d), 4)
def test_getone(self):
self.assertEqual(self.d.getone('b'), '1')
def test_getone_missing(self):
self.assertRaises(KeyError, self.d.getone, 'z')
def test_getone_multiple_raises(self):
self.assertRaises(KeyError, self.d.getone, 'a')
def test_getall(self):
self.assertEqual(list(self.d.getall('b')), ['1'])
def test_dict_of_lists(self):
self.assertEqual(
self.d.dict_of_lists(),
{'a': [text_('\xe9'), 'e', 'f'], 'b': ['1']})
def test_dict_api(self):
self.assertTrue('a' in self.d.mixed())
self.assertTrue('a' in self.d.keys())
self.assertTrue('a' in self.d.iterkeys())
self.assertTrue(('b', '1') in self.d.items())
self.assertTrue(('b', '1') in self.d.iteritems())
self.assertTrue('1' in self.d.values())
self.assertTrue('1' in self.d.itervalues())
self.assertEqual(len(self.d), 4)
def test_set_del_item(self):
d = self._get_instance()
self.assertTrue('a' in d)
del d['a']
self.assertTrue('a' not in d)
def test_pop(self):
d = self._get_instance()
d['a'] = '1'
self.assertEqual(d.pop('a'), '1')
self.assertEqual(d.pop('x', '1'), '1')
def test_pop_wrong_args(self):
d = self._get_instance()
self.assertRaises(TypeError, d.pop, 'a', '1', '1')
def test_pop_missing(self):
d = self._get_instance()
self.assertRaises(KeyError, d.pop, 'z')
def test_popitem(self):
d = self._get_instance()
self.assertEqual(d.popitem(), ('b', '1'))
def test_update(self):
d = self._get_instance()
d.update(e='1')
self.assertTrue('e' in d)
d.update(dict(x='1'))
self.assertTrue('x' in d)
d.update([('y', '1')])
self.assertTrue('y' in d)
def test_setdefault(self):
d = self._get_instance()
d.setdefault('a', '1')
self.assertNotEqual(d['a'], '1')
d.setdefault('e', '1')
self.assertTrue('e' in d)
def test_add(self):
d = multidict.MultiDict({'a': '1'})
d.add('a', '2')
self.assertEqual(list(d.getall('a')), ['1', '2'])
d = self._get_instance()
d.add('b', '3')
self.assertEqual(list(d.getall('b')), ['1', '3'])
def test_copy(self):
assert self.d.copy() is not self.d
if hasattr(self.d, 'multi'):
self.assertFalse(self.d.copy().multi is self.d.multi)
self.assertFalse(self.d.copy() is self.d.multi)
def test_clear(self):
d = self._get_instance()
d.clear()
self.assertEqual(len(d), 0)
def test_nonzero(self):
d = self._get_instance()
self.assertTrue(d)
d.clear()
self.assertFalse(d)
def test_repr(self):
self.assertTrue(repr(self._get_instance()))
def test_too_many_args(self):
from pyesgf.multidict import MultiDict
self.assertRaises(TypeError, MultiDict, '1', 2)
def test_no_args(self):
from pyesgf.multidict import MultiDict
md = MultiDict()
self.assertEqual(md._items, [])
def test_kwargs(self):
from pyesgf.multidict import MultiDict
md = MultiDict(kw1='val1')
self.assertEqual(md._items, [('kw1', 'val1')])
def test_view_list_not_list(self):
from pyesgf.multidict import MultiDict
d = MultiDict()
self.assertRaises(TypeError, d.view_list, 42)
def test_view_list(self):
from pyesgf.multidict import MultiDict
d = MultiDict()
self.assertEqual(d.view_list([1, 2])._items, [1, 2])
def test_from_fieldstorage_with_filename(self):
from pyesgf.multidict import MultiDict
d = MultiDict()
fs = DummyFieldStorage('a', '1', 'file')
self.assertEqual(d.from_fieldstorage(fs), MultiDict({'a': fs.list[0]}))
def test_from_fieldstorage_without_filename(self):
from pyesgf.multidict import MultiDict
d = MultiDict()
fs = DummyFieldStorage('a', '1')
self.assertEqual(d.from_fieldstorage(fs), MultiDict({'a': '1'}))
class MultiDictTestCase(BaseDictTests, unittest.TestCase):
klass = multidict.MultiDict
def test_update_behavior_warning(self):
import warnings
class Foo(dict):
def __len__(self):
return 0
foo = Foo()
foo['a'] = 1
d = self._get_instance()
with warnings.catch_warnings(record=True) as w:
d.update(foo)
self.assertEqual(len(w), 1)
def test_repr_with_password(self):
d = self._get_instance(password='pwd')
self.assertEqual(repr(d), "MultiDict([('password', '******')])")
class NestedMultiDictTestCase(BaseDictTests, unittest.TestCase):
klass = multidict.NestedMultiDict
def test_getitem(self):
d = self.klass({'a': 1})
self.assertEqual(d['a'], 1)
def test_getitem_raises(self):
d = self._get_instance()
self.assertRaises(KeyError, d.__getitem__, 'z')
def test_contains(self):
d = self._get_instance()
assert 'a' in d
assert 'z' not in d
def test_add(self):
d = self._get_instance()
self.assertRaises(KeyError, d.add, 'b', 3)
def test_set_del_item(self):
d = self._get_instance()
self.assertRaises(KeyError, d.__delitem__, 'a')
self.assertRaises(KeyError, d.__setitem__, 'a', 1)
def test_update(self):
d = self._get_instance()
self.assertRaises(KeyError, d.update, e=1)
self.assertRaises(KeyError, d.update, dict(x=1))
self.assertRaises(KeyError, d.update, [('y', 1)])
def test_setdefault(self):
d = self._get_instance()
self.assertRaises(KeyError, d.setdefault, 'a', 1)
def test_pop(self):
d = self._get_instance()
self.assertRaises(KeyError, d.pop, 'a')
self.assertRaises(KeyError, d.pop, 'a', 1)
def test_popitem(self):
d = self._get_instance()
self.assertRaises(KeyError, d.popitem, 'a')
def test_pop_wrong_args(self):
d = self._get_instance()
self.assertRaises(KeyError, d.pop, 'a', 1, 1)
def test_clear(self):
d = self._get_instance()
self.assertRaises(KeyError, d.clear)
def test_nonzero(self):
d = self._get_instance()
self.assertEqual(d.__nonzero__(), True)
d.dicts = [{}]
self.assertEqual(d.__nonzero__(), False)
assert not d
class NoVarsTestCase(unittest.TestCase):
klass = multidict.NoVars
def _get_instance(self):
return self.klass()
def test_getitem(self):
d = self._get_instance()
self.assertRaises(KeyError, d.__getitem__, 'a')
def test_setitem(self):
d = self._get_instance()
self.assertRaises(KeyError, d.__setitem__, 'a')
def test_delitem(self):
d = self._get_instance()
self.assertRaises(KeyError, d.__delitem__, 'a')
def test_get(self):
d = self._get_instance()
self.assertEqual(d.get('a', default='b'), 'b')
def test_getall(self):
d = self._get_instance()
self.assertEqual(d.getall('a'), [])
def test_getone(self):
d = self._get_instance()
self.assertRaises(KeyError, d.getone, 'a')
def test_mixed(self):
d = self._get_instance()
self.assertEqual(d.mixed(), {})
def test_contains(self):
d = self._get_instance()
assert 'a' not in d
def test_copy(self):
d = self._get_instance()
self.assertEqual(d.copy(), d)
def test_len(self):
d = self._get_instance()
self.assertEqual(len(d), 0)
def test_repr(self):
d = self._get_instance()
self.assertEqual(repr(d), '<NoVars: N/A>')
def test_keys(self):
d = self._get_instance()
self.assertEqual(list(d.keys()), [])
def test_iterkeys(self):
d = self._get_instance()
self.assertEqual(list(d.iterkeys()), [])
class DummyField(object):
def __init__(self, name, value, filename=None):
self.name = name
self.value = value
self.filename = filename
self.type_options = {}
self.headers = {}
class DummyFieldStorage(object):
def __init__(self, name, value, filename=None):
self.list = [DummyField(name, value, filename)]
| |
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| |
"""
Support for MQTT climate devices.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/climate.mqtt/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt
from homeassistant.components.climate import (
STATE_HEAT, STATE_COOL, STATE_DRY, STATE_FAN_ONLY, ClimateDevice,
PLATFORM_SCHEMA as CLIMATE_PLATFORM_SCHEMA, STATE_AUTO,
ATTR_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE,
SUPPORT_SWING_MODE, SUPPORT_FAN_MODE, SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE,
SUPPORT_AUX_HEAT, DEFAULT_MIN_TEMP, DEFAULT_MAX_TEMP)
from homeassistant.const import (
STATE_ON, STATE_OFF, ATTR_TEMPERATURE, CONF_NAME, CONF_VALUE_TEMPLATE)
from homeassistant.components.mqtt import (
CONF_AVAILABILITY_TOPIC, CONF_QOS, CONF_RETAIN, CONF_PAYLOAD_AVAILABLE,
CONF_PAYLOAD_NOT_AVAILABLE, MQTT_BASE_PLATFORM_SCHEMA, MqttAvailability)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.fan import (SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT HVAC'
CONF_POWER_COMMAND_TOPIC = 'power_command_topic'
CONF_POWER_STATE_TOPIC = 'power_state_topic'
CONF_POWER_STATE_TEMPLATE = 'power_state_template'
CONF_MODE_COMMAND_TOPIC = 'mode_command_topic'
CONF_MODE_STATE_TOPIC = 'mode_state_topic'
CONF_MODE_STATE_TEMPLATE = 'mode_state_template'
CONF_TEMPERATURE_COMMAND_TOPIC = 'temperature_command_topic'
CONF_TEMPERATURE_STATE_TOPIC = 'temperature_state_topic'
CONF_TEMPERATURE_STATE_TEMPLATE = 'temperature_state_template'
CONF_FAN_MODE_COMMAND_TOPIC = 'fan_mode_command_topic'
CONF_FAN_MODE_STATE_TOPIC = 'fan_mode_state_topic'
CONF_FAN_MODE_STATE_TEMPLATE = 'fan_mode_state_template'
CONF_SWING_MODE_COMMAND_TOPIC = 'swing_mode_command_topic'
CONF_SWING_MODE_STATE_TOPIC = 'swing_mode_state_topic'
CONF_SWING_MODE_STATE_TEMPLATE = 'swing_mode_state_template'
CONF_AWAY_MODE_COMMAND_TOPIC = 'away_mode_command_topic'
CONF_AWAY_MODE_STATE_TOPIC = 'away_mode_state_topic'
CONF_AWAY_MODE_STATE_TEMPLATE = 'away_mode_state_template'
CONF_HOLD_COMMAND_TOPIC = 'hold_command_topic'
CONF_HOLD_STATE_TOPIC = 'hold_state_topic'
CONF_HOLD_STATE_TEMPLATE = 'hold_state_template'
CONF_AUX_COMMAND_TOPIC = 'aux_command_topic'
CONF_AUX_STATE_TOPIC = 'aux_state_topic'
CONF_AUX_STATE_TEMPLATE = 'aux_state_template'
CONF_CURRENT_TEMPERATURE_TEMPLATE = 'current_temperature_template'
CONF_CURRENT_TEMPERATURE_TOPIC = 'current_temperature_topic'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_FAN_MODE_LIST = 'fan_modes'
CONF_MODE_LIST = 'modes'
CONF_SWING_MODE_LIST = 'swing_modes'
CONF_INITIAL = 'initial'
CONF_SEND_IF_OFF = 'send_if_off'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
SCHEMA_BASE = CLIMATE_PLATFORM_SCHEMA.extend(MQTT_BASE_PLATFORM_SCHEMA.schema)
PLATFORM_SCHEMA = SCHEMA_BASE.extend({
vol.Optional(CONF_POWER_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMPERATURE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SWING_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AWAY_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HOLD_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AUX_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_POWER_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMPERATURE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_FAN_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SWING_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AWAY_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AUX_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_POWER_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMPERATURE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_SWING_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AWAY_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_HOLD_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AUX_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_CURRENT_TEMPERATURE_TEMPLATE): cv.template,
vol.Optional(CONF_CURRENT_TEMPERATURE_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_FAN_MODE_LIST,
default=[STATE_AUTO, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list,
vol.Optional(CONF_SWING_MODE_LIST,
default=[STATE_ON, STATE_OFF]): cv.ensure_list,
vol.Optional(CONF_MODE_LIST,
default=[STATE_AUTO, STATE_OFF, STATE_COOL, STATE_HEAT,
STATE_DRY, STATE_FAN_ONLY]): cv.ensure_list,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=21): cv.positive_int,
vol.Optional(CONF_SEND_IF_OFF, default=True): cv.boolean,
vol.Optional(CONF_PAYLOAD_ON, default="ON"): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default="OFF"): cv.string,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): vol.Coerce(float)
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the MQTT climate devices."""
if discovery_info is not None:
config = PLATFORM_SCHEMA(discovery_info)
template_keys = (
CONF_POWER_STATE_TEMPLATE,
CONF_MODE_STATE_TEMPLATE,
CONF_TEMPERATURE_STATE_TEMPLATE,
CONF_FAN_MODE_STATE_TEMPLATE,
CONF_SWING_MODE_STATE_TEMPLATE,
CONF_AWAY_MODE_STATE_TEMPLATE,
CONF_HOLD_STATE_TEMPLATE,
CONF_AUX_STATE_TEMPLATE,
CONF_CURRENT_TEMPERATURE_TEMPLATE
)
value_templates = {}
if CONF_VALUE_TEMPLATE in config:
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = hass
value_templates = {key: value_template for key in template_keys}
for key in template_keys & config.keys():
value_templates[key] = config.get(key)
value_templates[key].hass = hass
async_add_entities([
MqttClimate(
hass,
config.get(CONF_NAME),
{
key: config.get(key) for key in (
CONF_POWER_COMMAND_TOPIC,
CONF_MODE_COMMAND_TOPIC,
CONF_TEMPERATURE_COMMAND_TOPIC,
CONF_FAN_MODE_COMMAND_TOPIC,
CONF_SWING_MODE_COMMAND_TOPIC,
CONF_AWAY_MODE_COMMAND_TOPIC,
CONF_HOLD_COMMAND_TOPIC,
CONF_AUX_COMMAND_TOPIC,
CONF_POWER_STATE_TOPIC,
CONF_MODE_STATE_TOPIC,
CONF_TEMPERATURE_STATE_TOPIC,
CONF_FAN_MODE_STATE_TOPIC,
CONF_SWING_MODE_STATE_TOPIC,
CONF_AWAY_MODE_STATE_TOPIC,
CONF_HOLD_STATE_TOPIC,
CONF_AUX_STATE_TOPIC,
CONF_CURRENT_TEMPERATURE_TOPIC
)
},
value_templates,
config.get(CONF_QOS),
config.get(CONF_RETAIN),
config.get(CONF_MODE_LIST),
config.get(CONF_FAN_MODE_LIST),
config.get(CONF_SWING_MODE_LIST),
config.get(CONF_INITIAL),
False, None, SPEED_LOW,
STATE_OFF, STATE_OFF, False,
config.get(CONF_SEND_IF_OFF),
config.get(CONF_PAYLOAD_ON),
config.get(CONF_PAYLOAD_OFF),
config.get(CONF_AVAILABILITY_TOPIC),
config.get(CONF_PAYLOAD_AVAILABLE),
config.get(CONF_PAYLOAD_NOT_AVAILABLE),
config.get(CONF_MIN_TEMP),
config.get(CONF_MAX_TEMP))
])
class MqttClimate(MqttAvailability, ClimateDevice):
"""Representation of an MQTT climate device."""
def __init__(self, hass, name, topic, value_templates, qos, retain,
mode_list, fan_mode_list, swing_mode_list,
target_temperature, away, hold, current_fan_mode,
current_swing_mode, current_operation, aux, send_if_off,
payload_on, payload_off, availability_topic,
payload_available, payload_not_available,
min_temp, max_temp):
"""Initialize the climate device."""
super().__init__(availability_topic, qos, payload_available,
payload_not_available)
self.hass = hass
self._name = name
self._topic = topic
self._value_templates = value_templates
self._qos = qos
self._retain = retain
self._target_temperature = target_temperature
self._unit_of_measurement = hass.config.units.temperature_unit
self._away = away
self._hold = hold
self._current_temperature = None
self._current_fan_mode = current_fan_mode
self._current_operation = current_operation
self._aux = aux
self._current_swing_mode = current_swing_mode
self._fan_list = fan_mode_list
self._operation_list = mode_list
self._swing_list = swing_mode_list
self._target_temperature_step = 1
self._send_if_off = send_if_off
self._payload_on = payload_on
self._payload_off = payload_off
self._min_temp = min_temp
self._max_temp = max_temp
@asyncio.coroutine
def async_added_to_hass(self):
"""Handle being added to home assistant."""
yield from super().async_added_to_hass()
@callback
def handle_current_temp_received(topic, payload, qos):
"""Handle current temperature coming via MQTT."""
if CONF_CURRENT_TEMPERATURE_TEMPLATE in self._value_templates:
payload =\
self._value_templates[CONF_CURRENT_TEMPERATURE_TEMPLATE].\
async_render_with_possible_json_value(payload)
try:
self._current_temperature = float(payload)
self.async_schedule_update_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
if self._topic[CONF_CURRENT_TEMPERATURE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_CURRENT_TEMPERATURE_TOPIC],
handle_current_temp_received, self._qos)
@callback
def handle_mode_received(topic, payload, qos):
"""Handle receiving mode via MQTT."""
if CONF_MODE_STATE_TEMPLATE in self._value_templates:
payload = self._value_templates[CONF_MODE_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if payload not in self._operation_list:
_LOGGER.error("Invalid mode: %s", payload)
else:
self._current_operation = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_MODE_STATE_TOPIC],
handle_mode_received, self._qos)
@callback
def handle_temperature_received(topic, payload, qos):
"""Handle target temperature coming via MQTT."""
if CONF_TEMPERATURE_STATE_TEMPLATE in self._value_templates:
payload = \
self._value_templates[CONF_TEMPERATURE_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
try:
self._target_temperature = float(payload)
self.async_schedule_update_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
if self._topic[CONF_TEMPERATURE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_TEMPERATURE_STATE_TOPIC],
handle_temperature_received, self._qos)
@callback
def handle_fan_mode_received(topic, payload, qos):
"""Handle receiving fan mode via MQTT."""
if CONF_FAN_MODE_STATE_TEMPLATE in self._value_templates:
payload = \
self._value_templates[CONF_FAN_MODE_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if payload not in self._fan_list:
_LOGGER.error("Invalid fan mode: %s", payload)
else:
self._current_fan_mode = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_FAN_MODE_STATE_TOPIC],
handle_fan_mode_received, self._qos)
@callback
def handle_swing_mode_received(topic, payload, qos):
"""Handle receiving swing mode via MQTT."""
if CONF_SWING_MODE_STATE_TEMPLATE in self._value_templates:
payload = \
self._value_templates[CONF_SWING_MODE_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if payload not in self._swing_list:
_LOGGER.error("Invalid swing mode: %s", payload)
else:
self._current_swing_mode = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_SWING_MODE_STATE_TOPIC],
handle_swing_mode_received, self._qos)
@callback
def handle_away_mode_received(topic, payload, qos):
"""Handle receiving away mode via MQTT."""
if CONF_AWAY_MODE_STATE_TEMPLATE in self._value_templates:
payload = \
self._value_templates[CONF_AWAY_MODE_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if payload == "True":
payload = self._payload_on
elif payload == "False":
payload = self._payload_off
if payload == self._payload_on:
self._away = True
elif payload == self._payload_off:
self._away = False
else:
_LOGGER.error("Invalid away mode: %s", payload)
self.async_schedule_update_ha_state()
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_AWAY_MODE_STATE_TOPIC],
handle_away_mode_received, self._qos)
@callback
def handle_aux_mode_received(topic, payload, qos):
"""Handle receiving aux mode via MQTT."""
if CONF_AUX_STATE_TEMPLATE in self._value_templates:
payload = self._value_templates[CONF_AUX_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if payload == "True":
payload = self._payload_on
elif payload == "False":
payload = self._payload_off
if payload == self._payload_on:
self._aux = True
elif payload == self._payload_off:
self._aux = False
else:
_LOGGER.error("Invalid aux mode: %s", payload)
self.async_schedule_update_ha_state()
if self._topic[CONF_AUX_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_AUX_STATE_TOPIC],
handle_aux_mode_received, self._qos)
@callback
def handle_hold_mode_received(topic, payload, qos):
"""Handle receiving hold mode via MQTT."""
if CONF_HOLD_STATE_TEMPLATE in self._value_templates:
payload = self._value_templates[CONF_HOLD_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
self._hold = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_HOLD_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_HOLD_STATE_TOPIC],
handle_hold_mode_received, self._qos)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._operation_list
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._target_temperature_step
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
@property
def current_hold_mode(self):
"""Return hold mode setting."""
return self._hold
@property
def is_aux_heat_on(self):
"""Return true if away mode is on."""
return self._aux
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._fan_list
@asyncio.coroutine
def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_OPERATION_MODE) is not None:
operation_mode = kwargs.get(ATTR_OPERATION_MODE)
yield from self.async_set_operation_mode(operation_mode)
if kwargs.get(ATTR_TEMPERATURE) is not None:
if self._topic[CONF_TEMPERATURE_STATE_TOPIC] is None:
# optimistic mode
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_TEMPERATURE_COMMAND_TOPIC],
kwargs.get(ATTR_TEMPERATURE), self._qos, self._retain)
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_swing_mode(self, swing_mode):
"""Set new swing mode."""
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_SWING_MODE_COMMAND_TOPIC],
swing_mode, self._qos, self._retain)
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = swing_mode
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_FAN_MODE_COMMAND_TOPIC],
fan_mode, self._qos, self._retain)
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = fan_mode
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_operation_mode(self, operation_mode) -> None:
"""Set new operation mode."""
if self._topic[CONF_POWER_COMMAND_TOPIC] is not None:
if (self._current_operation == STATE_OFF and
operation_mode != STATE_OFF):
mqtt.async_publish(
self.hass, self._topic[CONF_POWER_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
elif (self._current_operation != STATE_OFF and
operation_mode == STATE_OFF):
mqtt.async_publish(
self.hass, self._topic[CONF_POWER_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(
self.hass, self._topic[CONF_MODE_COMMAND_TOPIC],
operation_mode, self._qos, self._retain)
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = operation_mode
self.async_schedule_update_ha_state()
@property
def current_swing_mode(self):
"""Return the swing setting."""
return self._current_swing_mode
@property
def swing_list(self):
"""List of available swing modes."""
return self._swing_list
@asyncio.coroutine
def async_turn_away_mode_on(self):
"""Turn away mode on."""
if self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is None:
self._away = True
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_away_mode_off(self):
"""Turn away mode off."""
if self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is None:
self._away = False
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_hold_mode(self, hold_mode):
"""Update hold mode on."""
if self._topic[CONF_HOLD_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_HOLD_COMMAND_TOPIC],
hold_mode, self._qos, self._retain)
if self._topic[CONF_HOLD_STATE_TOPIC] is None:
self._hold = hold_mode
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
if self._topic[CONF_AUX_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass, self._topic[CONF_AUX_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = True
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
if self._topic[CONF_AUX_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass, self._topic[CONF_AUX_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = False
self.async_schedule_update_ha_state()
@property
def supported_features(self):
"""Return the list of supported features."""
support = 0
if (self._topic[CONF_TEMPERATURE_STATE_TOPIC] is not None) or \
(self._topic[CONF_TEMPERATURE_COMMAND_TOPIC] is not None):
support |= SUPPORT_TARGET_TEMPERATURE
if (self._topic[CONF_MODE_COMMAND_TOPIC] is not None) or \
(self._topic[CONF_MODE_STATE_TOPIC] is not None):
support |= SUPPORT_OPERATION_MODE
if (self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_FAN_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_FAN_MODE
if (self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_SWING_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_SWING_MODE
if (self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_AWAY_MODE
if (self._topic[CONF_HOLD_STATE_TOPIC] is not None) or \
(self._topic[CONF_HOLD_COMMAND_TOPIC] is not None):
support |= SUPPORT_HOLD_MODE
if (self._topic[CONF_AUX_STATE_TOPIC] is not None) or \
(self._topic[CONF_AUX_COMMAND_TOPIC] is not None):
support |= SUPPORT_AUX_HEAT
return support
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._max_temp
| |
"""Utility functions used by projects.
"""
import fnmatch
import os
import re
import subprocess
import traceback
import logging
from httplib2 import Http
from django.conf import settings
from distutils2.version import NormalizedVersion, suggest_normalized_version
import redis
log = logging.getLogger(__name__)
def version_from_slug(slug, version):
from projects import tasks
from builds.models import Version
from tastyapi import apiv2 as api
if getattr(settings, 'DONT_HIT_DB', True):
version_data = api.version().get(project=slug, slug=version)['results'][0]
v = tasks.make_api_version(version_data)
else:
v = Version.objects.get(project__slug=slug, slug=version)
return v
def symlink(project, version='latest'):
from projects import symlinks
v = version_from_slug(project, version)
log.info("Symlinking %s" % v)
symlinks.symlink_subprojects(v)
symlinks.symlink_cnames(v)
symlinks.symlink_translations(v)
def update_static_metadata(project_pk):
from projects import tasks
log.info("Updating static metadata")
tasks.update_static_metadata(project_pk)
def find_file(file):
"""Find matching filenames in the current directory and its subdirectories,
and return a list of matching filenames.
"""
matches = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def run(*commands, **kwargs):
"""
Run one or more commands, and return ``(status, out, err)``.
If more than one command is given, then this is equivalent to
chaining them together with ``&&``; if all commands succeed, then
``(status, out, err)`` will represent the last successful command.
If one command failed, then ``(status, out, err)`` will represent
the failed command.
"""
environment = os.environ.copy()
environment['READTHEDOCS'] = 'True'
if 'DJANGO_SETTINGS_MODULE' in environment:
del environment['DJANGO_SETTINGS_MODULE']
if 'PYTHONPATH' in environment:
del environment['PYTHONPATH']
cwd = os.getcwd()
if not commands:
raise ValueError("run() requires one or more command-line strings")
shell = kwargs.get('shell', False)
for command in commands:
if shell:
log.info("Running commands in a shell")
run_command = command
else:
run_command = command.split()
log.info("Running: '%s' [%s]" % (command, cwd))
try:
p = subprocess.Popen(run_command, shell=shell, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=environment)
out, err = p.communicate()
ret = p.returncode
except:
out = ''
err = traceback.format_exc()
ret = -1
log.error("Command failed", exc_info=True)
return (ret, out, err)
def safe_write(filename, contents):
"""Write ``contents`` to the given ``filename``. If the filename's
directory does not exist, it is created. Contents are written as UTF-8,
ignoring any characters that cannot be encoded as UTF-8.
"""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fh:
fh.write(contents.encode('utf-8', 'ignore'))
fh.close()
CUSTOM_SLUG_RE = re.compile(r'[^-._\w]+$')
def _custom_slugify(data):
return CUSTOM_SLUG_RE.sub('', data)
def slugify_uniquely(model, initial, field, max_length, **filters):
slug = _custom_slugify(initial)[:max_length]
current = slug
"""
base_qs = model.objects.filter(**filters)
index = 0
while base_qs.filter(**{field: current}).exists():
suffix = '-%s' % index
current = '%s%s' % (slug, suffix)
index += 1
"""
return current
def mkversion(version_obj):
try:
if hasattr(version_obj, 'slug'):
ver = NormalizedVersion(
suggest_normalized_version(version_obj.slug)
)
else:
ver = NormalizedVersion(
suggest_normalized_version(version_obj['slug'])
)
return ver
except TypeError:
return None
def highest_version(version_list):
highest = [None, None]
for version in version_list:
ver = mkversion(version)
if not ver:
continue
elif highest[1] and ver:
# If there's a highest, and no version, we don't need to set
# anything
if ver > highest[1]:
highest = [version, ver]
else:
highest = [version, ver]
return highest
def purge_version(version, mainsite=False, subdomain=False, cname=False):
varnish_servers = getattr(settings, 'VARNISH_SERVERS', None)
h = Http()
if varnish_servers:
for server in varnish_servers:
if subdomain:
#Send a request to the Server, to purge the URL of the Host.
host = "%s.readthedocs.org" % version.project.slug
headers = {'Host': host}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on %s" % (url, host))
h.request(to_purge, method="PURGE", headers=headers)
if mainsite:
headers = {'Host': "readthedocs.org"}
url = "/docs/%s/en/%s/*" % (version.project.slug, version.slug)
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on readthedocs.org" % url)
h.request(to_purge, method="PURGE", headers=headers)
root_url = "/docs/%s/" % version.project.slug
to_purge = "http://%s%s" % (server, root_url)
log.info("Purging %s on readthedocs.org" % root_url)
h.request(to_purge, method="PURGE", headers=headers)
if cname:
redis_conn = redis.Redis(**settings.REDIS)
for cnamed in redis_conn.smembers('rtd_slug:v1:%s'
% version.project.slug):
headers = {'Host': cnamed}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on %s" % (url, cnamed))
h.request(to_purge, method="PURGE", headers=headers)
root_url = "/"
to_purge = "http://%s%s" % (server, root_url)
log.info("Purging %s on %s" % (root_url, cnamed))
h.request(to_purge, method="PURGE", headers=headers)
class DictObj(object):
def __getattr__(self, attr):
return self.__dict__.get(attr)
# Prevent saving the temporary Project instance
def _new_save(*args, **kwargs):
log.warning("Called save on a non-real object.")
return 0
def make_api_version(version_data):
from builds.models import Version
for key in ['resource_uri', 'absolute_url']:
if key in version_data:
del version_data[key]
project_data = version_data['project']
project = make_api_project(project_data)
version_data['project'] = project
ver = Version(**version_data)
ver.save = _new_save
return ver
def make_api_project(project_data):
from projects.models import Project
for key in ['users', 'resource_uri', 'absolute_url', 'downloads', 'main_language_project', 'related_projects']:
if key in project_data:
del project_data[key]
project = Project(**project_data)
project.save = _new_save
return project
| |
import re, hashlib, random, json, csv, sys
from datetime import datetime, timedelta, tzinfo
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.cache import caches
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.files.uploadhandler import MemoryFileUploadHandler
from django.core.validators import validate_email
from django.db.models import ProtectedError
from django.forms import ValidationError
from django.forms.models import modelformset_factory, inlineformset_factory
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import RequestContext
from django.utils.datastructures import MultiValueDictKeyError
from django.views.defaults import page_not_found, permission_denied, bad_request
from itertools import chain
from polls import models
from polls.includes import forms, email_messages
from pprint import pprint
#################################################
#### PASO DE MENSAJES Y PARAMETROS POR CACHE ####
#################################################
# Crea un mensaje que se mostrara en la siguiente pagina
def set_cache_message(user, msg_type, msg):
if not user.is_authenticated():
return
cache = caches['default']
if (msg_type == 'error'):
key = 'error_msg'
elif (msg_type == 'warning'):
key = 'warning_msg'
elif (msg_type == 'success'):
key = 'success_msg'
else:
key = 'info_msg'
key = hashlib.sha256(('%d_%s' % (user.pk, key)).encode('utf-8')).hexdigest()
cache.set(key, msg)
# Lee el contenido de las variables de mensaje si existen.
def caches_messages(user):
if not user.is_authenticated():
return
cache = caches['default']
# Construyo las claves
error_key = hashlib.sha256(("%d_error_msg" % user.pk).encode('utf-8')).hexdigest()
warning_key = hashlib.sha256(("%d_warning_msg" % user.pk).encode('utf-8')).hexdigest()
success_key = hashlib.sha256(("%d_success_msg" % user.pk).encode('utf-8')).hexdigest()
info_key = hashlib.sha256(("%d_info_msg" % user.pk).encode('utf-8')).hexdigest()
# Recojo los mensajes
error_msg = cache.get(error_key, None);
warning_msg = cache.get(warning_key, None);
success_msg = cache.get(success_key, None);
info_msg = cache.get(info_key, None);
# Limpio las variables
cache.set(error_key, None);
cache.set(warning_key, None);
cache.set(success_key, None);
cache.set(info_key, None);
return error_msg, warning_msg, success_msg, info_msg
def set_cache_param(user, name, value):
if not user.is_authenticated():
return
cache = caches['default']
key = hashlib.sha256(('%d_%s' % (user.pk, name)).encode('utf-8')).hexdigest()
cache.set(key, value)
def caches_param(user, name):
if not user.is_authenticated():
return
cache = caches['default']
key = hashlib.sha256(('%d_%s' % (user.pk, name)).encode('utf-8')).hexdigest()
param = cache.get(key, None)
cache.set(key, None)
return param
#################################################
#################################################
def login_view(request):
login_active = "active"
login_form = forms.LoginForm()
reg_form = forms.RegisterForm()
js_actions = "$('#registerForm').modal('hide')"
error_msg = ''
register_error = ''
info_msg = ''
if request.user is not None and request.user.is_active:
try:
redir = request.GET['next'];
except MultiValueDictKeyError:
redir = '/polls/home/';
return HttpResponseRedirect(redir)
if (request.method == 'POST'):
if (request.POST['wichform'] == 'registration'):
reg_form = forms.RegisterForm(request.POST)
if reg_form.is_valid():
password = request.POST['password']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
new_user = User.objects.create_user(username=email, password=password, first_name=first_name, last_name=last_name, email=email)
new_user.is_active = False
new_user.save()
# Send activation email
salt = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()[:5]
activation_key = hashlib.sha256((salt+email).encode('utf-8')).hexdigest()
key_expires = datetime.now() + timedelta(2)
new_user_profile = models.UserProfile(user=new_user, activation_key=activation_key, key_expires=key_expires)
new_user_profile.save()
new_user_profile.send_activation_email()
reg_form = forms.RegisterForm()
info_msg = "Thank you for your registration. You will now receive an activation email. Please activate your account within the next 48 hours."
else:
js_actions = "$('#registerForm').modal('show')"
else:
login_form = forms.LoginForm(request.POST)
email = request.POST['email']
password = request.POST['password']
user = authenticate(username=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
try:
redir = request.GET['next'];
except MultiValueDictKeyError:
redir = '/polls/home/';
return HttpResponseRedirect(redir)
else:
info_msg = 'Your user has not been activated yet. If the problem persist, please contact us.'
else:
error_msg = 'Wrong username or password. Please try again.'
return render(
request,
'polls/login.html',
context={
'login_form': login_form,
'reg_form': reg_form,
'error_msg': error_msg,
'info_msg': info_msg,
'js_actions': js_actions,
'login_active': login_active,
'register_error': register_error
}
)
def logout_view(request):
logout(request)
return HttpResponseRedirect('/polls/login/')
def activate_account(request, activation_key):
msg = ''
user_profile = None
status = False
try:
user_profile = models.UserProfile.objects.get(activation_key=activation_key)
status = user_profile.activate_account(activation_key)
if not status:
msg = 'Sorry, your activation link has expired. Please register again.'
else:
msg = 'Congratulatins! You have activated your account succesfully. You can now login into BBPolls.'
except ObjectDoesNotExist:
msg = "Sorry, your account could not be found or you have already activated your account."
return render(request, 'polls/activate_account.html',
{'user_profile':user_profile, 'msg':msg, 'status':status});
@login_required(login_url='/polls/login')
def polls_index(request):
mypolls_active = 'active'
js_file = "polls_index.js"
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
published_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_PUBLISHED).order_by("publish_date")
draft_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_DRAFT).order_by("-last_modified")
archived_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_ARCHIVED).order_by("-archive_date")
# send_poll_form = forms.SendPollForm()
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/polls_index.html',
{'published_polls':published_polls,
'username':request.user.username,
'draft_polls':draft_polls,
'archived_polls':archived_polls,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'js_file':js_file,
'send_poll_form':forms.SendPollForm(),
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def send_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
# send_poll_form = forms.SendPollForm(request.POST)
if (request.method == 'POST'):
emails_text = request.POST["emails"]
emails = []
for line_emails in emails_text.splitlines():
line_emails = line_emails.strip()
if (',' in line_emails):
splited_line = line_emails.split(",")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif (';' in line_emails):
splited_line = line_emails.split(";")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif (' ' in line_emails):
splited_line = line_emails.split(" ")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif(line_emails != ""):
try:
validate_email(line_emails)
emails.append(line_emails.strip())
except ValidationError:
continue
emails = list(set(emails))
if not emails:
set_cache_message(request.user, "warning", "No emails were found")
else:
poll.send_poll(emails)
set_cache_message(request.user, "success", "Invitations sent!")
return HttpResponseRedirect("/polls/my-polls/")
@login_required(login_url='/polls/login')
def publish_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_ARCHIVED):
set_cache_message(request.user, "error", "Sorry! An archived poll cannot be unarchived")
return HttpResponseRedirect("/polls/my-polls/")
elif (not poll.is_doable):
set_cache_message(request.user, "error", "Sorry! Is not possible to publish this poll. At least one question in this poll that cannot be proeprly answered")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
pprint("PUBLISH Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_PUBLISHED
poll.publish_date = datetime.now()
poll.save()
pprint("PUBLISH Current status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def archive_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_DRAFT):
set_cache_message(request.user, "error", "Sorry! Only published polls may be archived")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
models.Response.objects.filter(poll=poll, is_completed=False).delete()
pprint("ARCHIVE Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_ARCHIVED
poll.archive_date = datetime.now()
poll.save()
pprint("ARCHIVE Current status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def unpublish_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_ARCHIVED):
set_cache_message(request.user, "error", "Sorry! An archived poll cannot be unarchived")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
if (models.Response.objects.filter(poll=poll, is_completed=True)):
set_cache_message(request.user, "error", "Sorry! This poll has already been answered and cannot be unpublish.")
return HttpResponseRedirect("/polls/my-polls/")
models.Response.objects.filter(poll=poll, is_completed=False).delete()
pprint("UNPUBLISH Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_DRAFT
poll.save()
pprint("UNPUBLISH New status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def create_poll(request):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
mypolls_active = 'active'
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
create_form = forms.PollCreateForm(request.POST or None, prefix="create");
import_form = forms.PollImportForm(request.POST or None, request.FILES or None, prefix="import")
if(request.method == 'POST'): # Create
if ('create' in request.POST):
if create_form.is_valid():
poll_name = request.POST['create-name'];
p = models.Poll(name=poll_name, user=request.user)
p.save();
return HttpResponseRedirect('/polls/manage-poll/%d/' % p.pk)
elif ('import' in request.POST): # Import
if (import_form.is_valid()):
# Check size
data = b''
for chunk in request.FILES['import-import_file'].chunks():
data+=chunk
json_data = json.loads(data)
try:
poll = models.Poll.import_poll(json_data, request.user)
return HttpResponseRedirect('/polls/manage-poll/%d/' % poll.pk)
except ValidationError as ve:
import_form.errors["import_file"] = [ve.messages[0]]
return render(request, 'polls/create-poll.html',
{'create_form':create_form,
'username':request.user.username,
'import_form':import_form,
'js_actions':js_actions,
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def manage_poll(request, poll_id):
mypolls_active = 'active'
js_file = "manage-poll.js"
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true});"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
scroll = caches_param(request.user, "scroll")
pprint(scroll, sys.stderr)
if scroll:
js_actions += "$('body').scrollTop(%s)" % scroll
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll_form = forms.PollForm(instance = poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to access does not exist anymore.");
return HttpResponseRedirect("/polls/my-polls/")
can_edit = poll.poll_status == models.Poll.ST_DRAFT;
if not can_edit:
poll_form.disable()
question_queryset = models.Question.objects.filter(poll=poll).order_by('order');
if (request.method == 'POST' and can_edit):
poll_form = forms.PollForm(request.POST, instance=poll)
if poll_form.is_valid():
poll_form.save()
return HttpResponseRedirect('/polls/my-polls/')
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-poll.html',
{'poll_form':poll_form,
'username':request.user.username,
'question_queryset':question_queryset,
'poll':poll,
'js_file':js_file,
'js_actions':js_actions,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'mypolls_active':mypolls_active,
'is_pollster':is_pollster,
'can_edit':can_edit});
@login_required(login_url='/polls/login')
def add_question(request, poll_id):
mypolls_active = 'active'
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The poll you are trying to create a question within, does not exist anymore.");
return HttpResponseRedirect("/polls/my-polls/")
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be edited.");
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
pass
question_form = forms.AddQuestionForm(request.POST or None)
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=3, can_delete=False)
choice_formset = BaseChoiceFormset()
if (request.method == 'POST'):
if (request.POST['submit'] == 'Save'):
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.poll = poll
new_question.save()
choice_formset = BaseChoiceFormset(request.POST, instance=new_question)
if choice_formset.is_valid():
choice_formset.save()
set_cache_message(request.user, 'success', 'New question created')
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
elif(request.POST['submit'] == 'Save and add new'):
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.poll = poll
new_question.save()
choice_formset = BaseChoiceFormset(request.POST, instance=new_question)
if choice_formset.is_valid():
choice_formset.save()
question_form = forms.QuestionForm(None)
set_cache_message(request.user, 'success', 'New question created')
return HttpResponseRedirect('/polls/manage-poll/%s/add-question/' % poll_id)
else:
more_choices = request.POST['number-choices']
if not more_choices:
more_choices=0
try:
more_choices = int(more_choices)
if more_choices < 0:
more_choices = 0
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=more_choices+3, can_delete=False)
except ValueError:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=3, can_delete=False)
choice_formset = BaseChoiceFormset()
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-question.html',
{'question_form':question_form,
'username':request.user.username,
'poll':poll, 'choice_formset':choice_formset,
'question_index':poll.number_questions+1,
'create_question':True,
'js_actions':js_actions,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def increase_question_order(request, poll_id, question_id, scroll):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
mypolls_active = 'active'
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The question you are trying to increase order to does not exist anymore");
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
pprint(scroll, sys.stderr)
if scroll:
set_cache_param(request.user, "scroll", scroll)
question.increase_order();
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id);
@login_required(login_url='/polls/login')
def decrease_question_order(request, poll_id, question_id, scroll):
mypolls_active = 'active'
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The question you are trying to decrease order to does not exist anymore");
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
pprint(scroll, sys.stderr)
if scroll:
set_cache_param(request.user, "scroll", scroll)
question.decrease_order();
return HttpResponseRedirect('/polls/manage-poll/%s/'% poll_id);
@login_required(login_url='/polls/login')
def manage_question(request, poll_id, question_id):
mypolls_active = 'active'
manage_only = 'manage-only'
js_file = "manage-question.js"
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
can_edit = poll.poll_status == models.Poll.ST_DRAFT;
i = 0;
for q in models.Question.objects.filter(poll=poll):
i +=1;
if (q.pk == question.pk):
break;
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=0)
multimedia_sources = models.MultimediaSource.objects.filter(question=question).order_by('media_type')
choice_formset = BaseChoiceFormset(request.POST or None, instance=question)
if (request.method == 'POST' and can_edit):
if (request.POST['submit'] == 'Save'):
question_form = forms.QuestionForm(request.POST, instance=question)
if question_form.is_valid():
question = question_form.save()
if choice_formset.is_valid():
choice_formset.save();
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
else:
more_choices = request.POST['number-choices']
if not more_choices:
more_choices=0
try:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=int(more_choices))
except ValueError:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=0)
question_form = forms.QuestionForm(instance=question)
choice_formset = BaseChoiceFormset(instance=question)
if not can_edit:
question_form.disable()
for choice_form in choice_formset:
choice_form.disable()
video_message = "You have %d video sources available" % question.number_video_srcs
if (question.number_video_srcs > 0):
video_class = "alert-success"
else:
video_class = "alert-danger"
audio_message = "You have %d audio sources available" % question.number_audio_srcs
if (question.number_audio_srcs > 0):
audio_class = "alert-success"
else:
audio_class = "alert-danger"
image_message = "You have %d image sources available" % question.number_image_srcs
if (question.number_image_srcs > 0):
image_class = "alert-success"
else:
image_class = "alert-danger"
iframe_message = "You have %d iframe sources available" % question.number_iframe_srcs
if (question.number_iframe_srcs > 0):
iframe_class = "alert-success"
else:
iframe_class = "alert-danger"
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-question.html',
{'question_form':question_form,
'username':request.user.username,
'poll':poll,
'question_index':i,
'question_pk':question_id,
'choice_formset':choice_formset,
'multimedia_sources':multimedia_sources,
'manage_only':manage_only,
'mypolls_active':mypolls_active,
'create_question':False,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'image_message': image_message,
'image_class': image_class,
'audio_message': audio_message,
'audio_class': audio_class,
'video_message': video_message,
'video_class': video_class,
'iframe_message': iframe_message,
'iframe_class': iframe_class,
'js_file': js_file,
'js_actions' : js_actions,
'is_pollster':is_pollster,
'can_edit':can_edit});
@login_required(login_url='/polls/login')
def clone_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll.clone()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to clone does not exist anymore.")
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def remove_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to clone does not exist anymore.")
except ProtectedError:
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be removed.")
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def remove_question(request, poll_id, question_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
question.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll have been already answered and cannot be edited.")
return HttpResponseRedirect('/polls/my-polls/')
except ObjectDoesNotExist:
pass
set_cache_message(request.user, 'success', "Question successfully removed")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll.pk)
@login_required(login_url='/polls/login')
def add_multimedia_source(request, poll_id, question_id, source='url'):
mypolls_active = "active"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be edited.");
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
pass
i = 0;
for q in models.Question.objects.filter(poll=poll):
i +=1;
if (q.pk == question.pk):
break;
if (source == 'url'):
if (request.method == 'POST'):
multimedia_form = forms.MultimediaSourceFormURL(request.POST)
if multimedia_form.is_valid():
try:
mmsrc = multimedia_form.save(commit=False)
mmsrc.question = question
mmsrc.validate_mime_type()
mmsrc.save()
set_cache_message(request.user, 'success', "Multimedia source successfully created")
return HttpResponseRedirect('/polls/manage-poll/%s/manage-question/%s/' % (poll.pk, question.pk))
except ValidationError as ve:
multimedia_form = forms.MultimediaSourceFormURL(request.POST)
multimedia_form.errors["url_source"] = [ve.messages[0]]
else:
multimedia_form = forms.MultimediaSourceFormURL()
elif (source == 'file'):
pass
else:
pass
return render(request, 'polls/add-multimedia-source.html',
{'multimedia_form':multimedia_form,
'username':request.user.username,
'poll':poll,
'question':question,
'question_index':i,
'mypolls_active':mypolls_active})
@login_required(login_url='/polls/login')
def remove_multimedia_source(request, poll_id, question_id, mmsrc_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
mmsrc = models.MultimediaSource.objects.get(pk=mmsrc_id, question=question)
mmsrc.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The source you are trying to delete does not exist anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/manage-question/%s/" % (poll.pk, question.pk))
set_cache_message(request.user, 'success', "Multimedia source successfully removed")
return HttpResponseRedirect("/polls/manage-poll/%s/manage-question/%s/" % (poll.pk, question.pk))
def do_survey(request, poll_id, try_poll=False, invitation_key=None):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
try:
poll = models.Poll.objects.get(pk=poll_id)
except ObjectDoesNotExist:
if try_poll:
set_cache_message(request.user, 'error', "Poll not found.")
return HttpResponseRedirect('/polls/my-polls/')
else:
set_cache_message(request.user, 'error', "Poll not found.")
return HttpResponseRedirect('/polls/home/')
# Comprobamos que el usuario tenga permisos parar acceder
if (invitation_key is not None):
try:
poll_invitation = models.AnonymousInvitation.objects.get(poll=poll, key=invitation_key)
anonymous_poll = True
if (poll_invitation.response is not None and poll_invitation.response.is_completed):
return HttpResponseRedirect('/polls/login/')
except ObjectDoesNotExist:
return HttpResponseRedirect('/polls/login/?next=/polls/do-poll/%d/' % poll.pk)
elif (request.user.is_authenticated()):
print("auth user")
if (poll.access_type != models.Poll.AT_PUBLIC):
if ((request.user not in poll.allowed_users.all()
and request.user.groups not in poll.allowed_groups.all())
and (request.user != poll.user and not try_poll)):
print("not allowed user")
set_cache_message(request.user, 'error', "Sorry! You don't have permission to access this poll.")
return HttpResponseRedirect('/polls/home/')
anonymous_poll = False;
else:
print("neither invitation_key, neither allowed_user")
return HttpResponseRedirect('/polls/login/?next=%s' % request.path)
if (poll.randomize_questions):
questions = models.Question.objects.filter(poll=poll).order_by('?')
else:
questions = models.Question.objects.filter(poll=poll).order_by('order')
choices = models.Choice.objects.filter(question__in=questions)
if (not anonymous_poll):
try:
response = models.Response.objects.get(poll=poll, user=request.user)
except ObjectDoesNotExist:
response = None
else:
response = poll_invitation.response
error_msg = None;
if (request.method == 'POST') and not try_poll:
if response is not None:
response.choices.clear()
models.Verbatim.objects.filter(response=response).delete()
else:
try:
response = models.Response(poll=poll, user=request.user)
except ValueError:
if anonymous_poll:
response = models.Response(poll=poll, user=None)
poll_invitation.response = response
poll_invitation.save()
else:
set_cache_message(request.user, 'error', "Unexpected error occurred when attempting to save your response. Please contact the administrator.")
return HttpResponseRedirect(request.path)
response.save()
for field, value in request.POST.items():
if re.match('^q\d*_choice\d*$', field) == None:
continue
try:
choice = models.Choice.objects.get(pk=int(value), question__poll=poll)
response.choices.add(choice)
if not choice.is_fixed:
v = models.Verbatim(response=response, choice=choice, verbatim=request.POST['%s_verbatim' % choice.pk])
v.save()
except (ObjectDoesNotExist, ValueError):
error_msg = "Corrupted data, please try again."
break
if error_msg:
set_cache_message(request.user, "error", error_msg)
response.delete();
else:
if request.user.is_authenticated():
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
cs = response.choices.all()
completed = True
for q in models.Question.objects.filter(poll=poll):
if not cs.exists():
completed = False
break
cs = cs.exclude(question=q)
if completed: # Complete also saves the Response
set_cache_message(request.user, "success", "You have successfully completed the poll. Thank you!")
response.set_complete()
else:
set_cache_message(request.user, "info", "The poll has not been completed. You may finish it in the \"Ongoing Polls\" section at the home page.")
return HttpResponseRedirect('/polls/home/')
elif request.user.is_authenticated():
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
if anonymous_poll:
template = "non-auth-do_survey.html"
username = None
else:
template = "do_survey.html"
username = request.user.username
return render(request, 'polls/%s' % template,
{'poll':poll,
'username':username,
'questions':questions,
'choices':choices,
'response':response,
'try_poll':try_poll,
'error_msg':error_msg,
'anonymous_poll':anonymous_poll,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def review_survey(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
try:
poll = models.Poll.objects.get(pk=poll_id)
response = models.Response.objects.get(poll=poll, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
if (not response.is_completed):
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
questions = models.Question.objects.filter(poll=poll)
choices = models.Choice.objects.filter(question__in=questions)
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/review_survey.html',
{'response':response,
'username':request.user.username,
'choices':choices,
'questions':questions,
'poll':poll,
'error_msg':error_msg,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def remove_response(request, poll_id):
try:
response = models.Response.objects.get(poll__pk=poll_id, user=request.user)
response.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
@login_required(login_url='/polls/login')
def home(request):
home_active = 'active'
# Checking pollster permission
try:
g = request.user.groups.get(name='sys_pollsters')
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
public_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_PUBLIC, is_finished=False).exclude(user=request.user)
restricted_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_RESTRICTED, allowed_groups__in=request.user.groups.all(), is_finished=False).exclude(user=request.user)
private_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_PRIVATE, allowed_users=request.user, is_finished=False).exclude(user=request.user)
if (public_polls or restricted_polls or private_polls):
available_polls = list(chain(public_polls, private_polls, restricted_polls))
else:
available_polls = None
responses = models.Response.objects.filter(user=request.user)
completed_polls = responses.exclude(is_completed=False)
ongoing_polls = responses.exclude(is_completed=True, poll__is_finished=False)
pprint("available_polls before: ", stream=sys.stderr)
pprint(available_polls, stream=sys.stderr)
if responses.exists() and available_polls is not None:
for response in responses:
if response.poll in available_polls:
available_polls.remove(response.poll)
pprint("available_polls after: ", stream=sys.stderr)
pprint(available_polls, stream=sys.stderr)
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/home.html',
{'available_polls':available_polls,
'username':request.user.username,
'completed_polls':completed_polls,
'ongoing_polls':ongoing_polls,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'home_active':home_active,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def view_stats(request, poll_id):
mypolls_active = "active"
css_file = "view_stats.css"
js_file = "view_stats.js"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to see the statistics from, does not exists anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
questions = models.Question.objects.filter(poll=poll)
choices = models.Choice.objects.filter(question__in=questions)
verbatims = models.Verbatim.objects.filter(choice__in=choices)
print("Preguntas: %d" % questions.count())
print("Opciones: %d" % choices.count())
print("Verbatims: %d" % verbatims.count())
return render(request, 'polls/view_stats.html',
{'poll':poll,
'username':request.user.username,
'questions':questions,
'choices':choices,
'verbatims':verbatims,
'css_file':css_file,
'mypolls_active':mypolls_active,
'is_pollster':is_pollster,
'js_file':js_file});
@login_required(login_url='/polls/login')
def account(request):
account_active = 'active'
password_error = None
user_error = None
user_form = forms.UserProfileForm(instance=request.user)
password_form = forms.PasswordChangeForm()
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
if (request.method == 'POST'):
if (request.POST['submit'] == 'Save'):
user_form = forms.UserProfileForm(request.POST, instance=request.user)
if user_form.is_valid():
user_form.save()
else:
password_form = forms.PasswordChangeForm(request.POST)
if password_form.is_valid():
old_password = password_form.cleaned_data['old_password']
password = password_form.cleaned_data['password']
cpassword = password_form.cleaned_data['confirm_password']
if not (request.user.check_password(old_password)):
password_error = 'Wrong password. Please try again.'
elif (password != cpassword):
password_error = "New passwords don't match. Please try again."
else:
request.user.set_password(password)
request.user.save()
return render(request, 'polls/account.html',
{'user_form':user_form,
'password_form': password_form,
'account_active':account_active,
'username':request.user.username,
'password_error':password_error,
'user_error':user_error,
'is_pollster':is_pollster});
def about(request):
about_active = 'active'
is_pollster = False
if request.user.is_authenticated():
template = 'polls/about.html'
username = request.user.username
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
else:
username = ''
template = 'polls/non-auth-about.html'
return render(request, template, {'about_active':about_active, 'is_pollster':is_pollster, 'username':username});
def contact(request):
contact_active = 'active'
is_pollster = False
if request.user.is_authenticated():
template = 'polls/contact.html'
username = request.user.username
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
pass
else:
template = 'polls/non-auth-contact.html'
username = ''
return render(request, template, {'contact_active':contact_active, 'is_pollster':is_pollster, 'username':username});
@login_required(login_url='/polls/login')
def export_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to export does not exist anymore.")
return HttpResponseRedirect("/polls/my-polls/%s/" % poll_id)
poll_json = poll.get_json()
json_response = JsonResponse(poll_json, safe=False)
json_response['Content-Disposition'] = 'attachment; filename=%s.json' % poll.name
return json_response
@login_required(login_url='/polls/login')
def get_csv_stats(request, poll_id, delimiter=','):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll_csv = poll.get_responses_csv()
csv_response = HttpResponse(content_type='text/csv')
csv_response['Content-Disposition'] = 'attachment; filename=%s_stats.csv' % poll.name
writer = csv.writer(csv_response, delimiter=delimiter)
writer.writerows(poll_csv)
return csv_response
except ObjectDoesNotExist:
return page_not_found(request)
| |
from __future__ import absolute_import, unicode_literals
import datetime
import time
import warnings
from email.header import Header
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.conf import settings
from django.core import signals
from django.core import signing
from django.core.exceptions import SuspiciousOperation
from django.http.cookie import SimpleCookie
from django.utils import six, timezone
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.http import cookie_date
from django.utils.six.moves import map
class BadHeaderError(ValueError):
pass
class HttpResponseBase(six.Iterator):
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(self, content_type=None, status=None, mimetype=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
if mimetype:
warnings.warn("Using mimetype keyword argument is deprecated, use"
" content_type instead",
DeprecationWarning, stacklevel=2)
content_type = mimetype
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def serialize_headers(self):
"""HTTP headers as a bytestring."""
headers = [
('%s: %s' % (key, value)).encode('us-ascii')
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
if six.PY3:
__bytes__ = serialize_headers
else:
__str__ = serialize_headers
def _convert_to_charset(self, value, charset, mime_encode=False):
"""Converts headers key/value to ascii/latin1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` value can't be represented in the given charset, MIME-encoding
is applied.
"""
if not isinstance(value, (bytes, six.text_type)):
value = str(value)
try:
if six.PY3:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
else:
if isinstance(value, str):
# Ensure string is valid in given charset
value.decode(charset)
else:
# Convert unicode string to given charset
value = value.encode(charset)
except UnicodeError as e:
if mime_encode:
# Wrapping in str() is a workaround for #12422 under Python 2.
value = str(Header(value, 'utf-8').encode())
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
if str('\n') in value or str('\r') in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def __getstate__(self):
# SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we
# serialise to a string instead
state = self.__dict__.copy()
state['cookies'] = str(state['cookies'])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.cookies = SimpleCookie(self.cookies)
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# If content is already encoded (eg. gzip), assume bytes.
if self.has_header('Content-Encoding'):
return bytes(value)
# Handle string types -- we can't rely on force_bytes here because:
# - under Python 3 it attemps str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, six.text_type):
return bytes(value.encode(self._charset))
# Handle non-string types (#16494)
return force_bytes(value, self._charset)
def __iter__(self):
return self
def __next__(self):
# Subclasses must define self._iterator for this function.
return self.make_bytes(next(self._iterator))
# These methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise Exception("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise Exception("This %s instance cannot tell its position" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to or replaced.
"""
streaming = False
def __init__(self, content='', *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
if six.PY3:
__bytes__ = serialize
else:
__str__ = serialize
def _consume_content(self):
# If the response was instantiated with an iterator, when its content
# is accessed, the iterator is going be exhausted and the content
# loaded in memory. At this point, it's better to abandon the original
# iterator and save the content for later reuse. This is a temporary
# solution. See the comment in __iter__ below for the long term plan.
if self._base_content_is_iter:
self.content = b''.join(self.make_bytes(e) for e in self._container)
@property
def content(self):
self._consume_content()
return b''.join(self.make_bytes(e) for e in self._container)
@content.setter
def content(self, value):
if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)):
self._container = value
self._base_content_is_iter = True
if hasattr(value, 'close'):
self._closable_objects.append(value)
else:
self._container = [value]
self._base_content_is_iter = False
def __iter__(self):
# Raise a deprecation warning only if the content wasn't consumed yet,
# because the response may be intended to be streamed.
# Once the deprecation completes, iterators should be consumed upon
# assignment rather than upon access. The _consume_content method
# should be removed. See #6527.
if self._base_content_is_iter:
warnings.warn(
'Creating streaming responses with `HttpResponse` is '
'deprecated. Use `StreamingHttpResponse` instead '
'if you need the streaming behavior.',
DeprecationWarning, stacklevel=2)
if not hasattr(self, '_iterator'):
self._iterator = iter(self._container)
return self
def write(self, content):
self._consume_content()
self._container.append(content)
def tell(self):
self._consume_content()
return len(self.content)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super(StreamingHttpResponse, self).__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError("This %s instance has no `content` attribute. "
"Use `streaming_content` instead." % self.__class__.__name__)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
class CompatibleStreamingHttpResponse(StreamingHttpResponse):
"""
This class maintains compatibility with middleware that doesn't know how
to handle the content of a streaming response by exposing a `content`
attribute that will consume and cache the content iterator when accessed.
These responses will stream only if no middleware attempts to access the
`content` attribute. Otherwise, they will behave like a regular response,
and raise a `DeprecationWarning`.
"""
@property
def content(self):
warnings.warn(
'Accessing the `content` attribute on a streaming response is '
'deprecated. Use the `streaming_content` attribute instead.',
DeprecationWarning, stacklevel=2)
content = b''.join(self)
self.streaming_content = [content]
return content
@content.setter
def content(self, content):
warnings.warn(
'Accessing the `content` attribute on a streaming response is '
'deprecated. Use the `streaming_content` attribute instead.',
DeprecationWarning, stacklevel=2)
self.streaming_content = [content]
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
parsed = urlparse(redirect_to)
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
url = property(lambda self: self['Location'])
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super(HttpResponseNotModified, self).__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
self._base_content_is_iter = False
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
from webtest.app import AppError
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers.v1 import cluster_template as api_cluster_template
from magnum.common import exception
from magnum.common import policy as magnum_policy
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestClusterTemplateObject(base.TestCase):
def test_cluster_template_init(self):
cluster_template_dict = apiutils.cluster_template_post_data()
del cluster_template_dict['image_id']
del cluster_template_dict['registry_enabled']
del cluster_template_dict['tls_disabled']
del cluster_template_dict['public']
del cluster_template_dict['server_type']
del cluster_template_dict['master_lb_enabled']
del cluster_template_dict['floating_ip_enabled']
del cluster_template_dict['hidden']
cluster_template = api_cluster_template.ClusterTemplate(
**cluster_template_dict)
self.assertEqual(wtypes.Unset, cluster_template.image_id)
self.assertFalse(cluster_template.registry_enabled)
self.assertFalse(cluster_template.tls_disabled)
self.assertFalse(cluster_template.public)
self.assertEqual('vm', cluster_template.server_type)
self.assertFalse(cluster_template.master_lb_enabled)
self.assertTrue(cluster_template.floating_ip_enabled)
self.assertFalse(cluster_template.hidden)
class TestListClusterTemplate(api_base.FunctionalTest):
_cluster_template_attrs = ('name', 'apiserver_port', 'network_driver',
'coe', 'flavor_id', 'fixed_network',
'dns_nameserver', 'http_proxy',
'docker_volume_size', 'server_type',
'cluster_distro', 'external_network_id',
'image_id', 'registry_enabled', 'no_proxy',
'keypair_id', 'https_proxy', 'tls_disabled',
'public', 'labels', 'master_flavor_id',
'volume_driver', 'insecure_registry', 'hidden',
'tags',)
def test_empty(self):
response = self.get_json('/clustertemplates')
self.assertEqual([], response['clustertemplates'])
def test_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['uuid'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['name'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_uuid(self):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid)
response = self.get_json(
'/clustertemplates/%s' % temp_uuid)
self.assertEqual(temp_uuid, response['uuid'])
def test_get_one_by_uuid_not_found(self):
temp_uuid = uuidutils.generate_uuid()
response = self.get_json(
'/clustertemplates/%s' % temp_uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_one_by_uuid_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.get_json(
'/clustertemplates/%s' % temp_uuid)
self.assertEqual(temp_uuid, response['uuid'])
def test_get_one_by_name_multiple_cluster_template(self):
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
response = self.get_json(
'/clustertemplates/test_clustertemplate',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_all_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_all_with_all_projects(self, mock_context, mock_policy):
for id_ in range(4):
obj_utils.create_test_cluster_template(
self.context, id=id_, project_id=id_,
uuid=uuidutils.generate_uuid())
self.context.is_admin = True
response = self.get_json('/clustertemplates')
self.assertEqual(4, len(response['clustertemplates']))
def test_detail(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/detail')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates/detail?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_against_single(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s/detail' %
cluster_template['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template.uuid)
response = self.get_json('/clustertemplates')
self.assertEqual(len(bm_list), len(response['clustertemplates']))
uuids = [bm['uuid'] for bm in response['clustertemplates']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid)
response = self.get_json('/clustertemplates/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for link in response['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'],
bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates/?limit=3')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
self.cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='cluster_model_example_A',
image_id='nerdherd',
apiserver_port=8080,
fixed_network='private',
flavor_id='m1.magnum',
master_flavor_id='m1.magnum',
external_network_id='public',
keypair_id='test',
volume_driver='rexray',
public=False,
docker_volume_size=20,
coe='swarm',
labels={'key1': 'val1', 'key2': 'val2'},
hidden=False
)
def test_update_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/clustertemplates/%s' % uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'add'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_update_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/network_driver',
'value': 'flannel',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
def test_update_cluster_template_name_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertTrue(response['public'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_cluster_template_with_cluster_allow_update(self,
mock_policy):
mock_policy.return_value = True
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/public',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['public'], True)
@mock.patch.object(magnum_policy, 'enforce')
def test_update_hidden_cluster_template_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/hidden', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertTrue(response['hidden'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_hidden_cluster_template_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/hidden', 'value': True,
'op': 'replace'}])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_cluster_template_hidden_with_cluster_allow_update(
self, mock_policy):
mock_policy.return_value = True
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/hidden',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['hidden'], True)
def test_update_cluster_template_with_devicemapper(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
note = 'deprecated in favor of overlay2'
with self.assertWarnsRegex(DeprecationWarning, note):
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/docker_storage_driver',
'value': 'devicemapper',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
def test_update_cluster_template_replace_labels_success(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/labels',
'value': '{\'etcd_volume_size\': \'1\'}',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['labels'], {'etcd_volume_size': '1'})
def test_update_cluster_template_with_cluster_not_allow_update(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/network_driver',
'value': 'calico',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_singular(self, mock_utcnow):
name = 'cluster_model_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.image_id, response['image_id'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe,
response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy,
response['no_proxy'])
self.assertEqual(self.cluster_template.labels,
response['labels'])
def test_replace_cluster_template_with_no_exist_flavor_id(self):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/flavor_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_keypair_id(self):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/keypair_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_external_network_id(self):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/external_network_id',
'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_image_id(self):
self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_create_cluster_template_with_no_os_distro_image(self):
image_exce = exception.OSDistroFieldNotFound('img')
self.mock_valid_os_res.side_effect = image_exce
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'img',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_singular(self):
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNotNone(response['dns_nameserver'])
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/dns_nameserver',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNone(response['dns_nameserver'])
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.name, response['name'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.image_id,
response['image_id'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe, response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy, response['no_proxy'])
self.assertEqual(self.cluster_template.labels, response['labels'])
def test_remove_non_existent_property_fail(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/non-existent',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_mandatory_property_fail(self):
mandatory_properties = ('/image_id', '/coe',
'/external_network_id', '/server_type',
'/tls_disabled', '/public',
'/registry_enabled',
'/cluster_distro', '/network_driver')
for p in mandatory_properties:
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': p, 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_add_root_non_existent(self):
response = self.patch_json(
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_remove_uuid(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_update_cluster_template_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.patch_json('/clustertemplates/%s' % temp_uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster_template(self, mock_utcnow,
mock_image_data):
bdict = apiutils.cluster_template_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/clustertemplates/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_set_project_id_and_user_id(
self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
self.post_json('/clustertemplates', bdict)
cc_mock.assert_called_once_with(mock.ANY)
self.assertEqual(self.context.project_id,
cc_mock.call_args[0][0]['project_id'])
self.assertEqual(self.context.user_id,
cc_mock.call_args[0][0]['user_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_doesnt_contain_id(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(image_id='my-image')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['image_id'], response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def _create_model_raises_app_error(self, **kwargs):
# Create mock for db and image data
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock,\
mock.patch('magnum.api.attr_validator.validate_image')\
as mock_image_data:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(**kwargs)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
self.assertFalse(cc_mock.called)
def test_create_cluster_template_with_invalid_long_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "labels", "volume_driver"]
for field in fields:
self._create_model_raises_app_error(**{field: 'i' * 256})
def test_create_cluster_template_with_invalid_empty_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "labels", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "volume_driver", "coe"]
for field in fields:
self._create_model_raises_app_error(**{field: ''})
def test_create_cluster_template_with_invalid_coe(self):
self._create_model_raises_app_error(coe='k8s')
self._create_model_raises_app_error(coe='storm')
self._create_model_raises_app_error(coe='meson')
self._create_model_raises_app_error(coe='osomatsu')
def test_create_cluster_template_with_invalid_docker_volume_size(self):
self._create_model_raises_app_error(docker_volume_size=-1)
self._create_model_raises_app_error(
docker_volume_size=1,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(
docker_volume_size=2,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(docker_volume_size='notanint')
def test_create_cluster_template_with_invalid_dns_nameserver(self):
self._create_model_raises_app_error(dns_nameserver='1.1.2')
self._create_model_raises_app_error(dns_nameserver='1.1..1')
self._create_model_raises_app_error(dns_nameserver='openstack.org')
def test_create_cluster_template_with_invalid_apiserver_port(self):
self._create_model_raises_app_error(apiserver_port=-12)
self._create_model_raises_app_error(apiserver_port=65536)
self._create_model_raises_app_error(apiserver_port=0)
self._create_model_raises_app_error(apiserver_port=1023)
self._create_model_raises_app_error(apiserver_port='not an int')
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_labels(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
labels={'key1': 'val1', 'key2': 'val2'})
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['labels'],
response.json['labels'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_docker_volume_size(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(docker_volume_size=99)
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_overlay(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
docker_volume_size=1, docker_storage_driver="overlay")
note = 'deprecated in favor of overlay2'
with self.assertWarnsRegex(DeprecationWarning, note):
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def _test_create_cluster_template_network_driver_attr(
self,
cluster_template_dict,
cluster_template_config_dict,
expect_errors,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
for k, v in cluster_template_config_dict.items():
cfg.CONF.set_override(k, v, 'cluster_template')
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
bdict = apiutils.cluster_template_post_data(
**cluster_template_dict)
response = self.post_json('/clustertemplates', bdict,
expect_errors=expect_errors)
if expect_errors:
self.assertEqual(400, response.status_int)
else:
expected_driver = bdict.get('network_driver')
if not expected_driver:
expected_driver = (
cfg.CONF.cluster_template.swarm_default_network_driver)
self.assertEqual(expected_driver,
response.json['network_driver'])
self.assertEqual(bdict['image_id'],
response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_template_with_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {} # Default config
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_no_network_driver(self):
cluster_template_dict = {}
config_dict = {}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_network_driver_non_def_config(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'foo']}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_invalid_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'bad_driver'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']}
expect_errors_flag = True
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(volume_driver='rexray')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
response = self.post_json('/clustertemplates', bdict)
self.assertTrue(response.json['public'])
mock_policy.assert_called_with(mock.ANY,
"clustertemplate:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_not_set(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=False)
response = self.post_json('/clustertemplates', bdict)
self.assertFalse(response.json['public'])
# policy enforcement is called only once for enforce_wsgi
self.assertEqual(1, mock_policy.call_count)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertFalse(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_hidden_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(hidden=True)
response = self.post_json('/clustertemplates', bdict)
self.assertTrue(response.json['hidden'])
mock_policy.assert_called_with(mock.ANY,
"clustertemplate:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['hidden'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_hidden_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(hidden=True)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_os_distro_image(self,
mock_image_data):
mock_image_data.side_effect = exception.OSDistroFieldNotFound('img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_os_distro_image(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_image_name(self,
mock_image_data):
mock_image = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
mock_image_data.return_value = mock_image
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.ResourceNotFound('test-img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_multi_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.Conflict('Multiple images')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(409, response.status_int)
def test_create_cluster_template_without_image_id(self):
bdict = apiutils.cluster_template_post_data()
del bdict['image_id']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_keypair_id(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['keypair_id']
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_dns(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['dns_nameserver'],
response.json['dns_nameserver'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_keypair(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_flavor(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['flavor_id'],
response.json['flavor_id'])
self.assertEqual(bdict['master_flavor_id'],
response.json['master_flavor_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_flavor(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_external_network(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['external_network_id'],
response.json['external_network_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_no_exist_external_network(
self, mock_image_data):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_name(self, mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
bdict.pop('name')
resp = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, resp.status_int)
self.assertIsNotNone(resp.json['name'])
def test_create_cluster_with_disabled_driver(self):
cfg.CONF.set_override('disabled_drivers',
['mesos_ubuntu_v1'],
group='drivers')
bdict = apiutils.cluster_template_post_data(coe="mesos")
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster_template_with_multi_dns(self, mock_utcnow,
mock_image_data):
bdict = apiutils.cluster_template_post_data(
dns_nameserver="8.8.8.8,114.114.114.114")
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/clustertemplates/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
class TestDelete(api_base.FunctionalTest):
def test_delete_cluster_template(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self.delete('/clustertemplates/%s' % cluster_template.uuid)
response = self.get_json('/clustertemplates/%s' %
cluster_template.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.delete('/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
def test_delete_cluster_template_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete('/clustertemplates/%s' % uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.delete('/clustertemplates/%s' %
cluster_template['name'],
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_cluster_template_with_name_not_found(self):
response = self.delete('/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_multiple_cluster_template_by_name(self):
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
response = self.delete('/clustertemplates/test_cluster_template',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_delete_cluster_template_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.delete('/clustertemplates/%s' % temp_uuid,
expect_errors=True)
self.assertEqual(204, response.status_int)
class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"cluster_template:get_all", self.get_json, '/clustertemplates',
expect_errors=True)
def test_policy_disallow_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"cluster_template:detail", self.get_json,
'/clustertemplates/%s/detail' % uuidutils.generate_uuid(),
expect_errors=True)
def test_policy_disallow_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='example_A',
uuid=uuidutils.generate_uuid())
self._common_policy_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_create(self):
bdict = apiutils.cluster_template_post_data(
name='cluster_model_example_A')
self._common_policy_check(
"cluster_template:create", self.post_json, '/clustertemplates',
bdict, expect_errors=True)
def test_policy_disallow_delete(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid, expect_errors=True)
def _owner_check(self, rule, func, *args, **kwargs):
self.policy.set_rules({rule: "user_id:%(user_id)s"})
response = func(*args, **kwargs)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_only_owner_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check("cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_only_owner_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.uuid,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_only_owner_delete(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox import IsA # noqa
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from horizon_lib.workflows import views
from openstack_horizon import api
from openstack_horizon.test import helpers as test
from openstack_horizon.dashboards.project.loadbalancers import workflows
class LoadBalancerTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:loadbalancers:index' % DASHBOARD)
ADDPOOL_PATH = 'horizon:%s:loadbalancers:addpool' % DASHBOARD
ADDVIP_PATH = 'horizon:%s:loadbalancers:addvip' % DASHBOARD
ADDMEMBER_PATH = 'horizon:%s:loadbalancers:addmember' % DASHBOARD
ADDMONITOR_PATH = 'horizon:%s:loadbalancers:addmonitor' % DASHBOARD
POOL_DETAIL_PATH = 'horizon:%s:loadbalancers:pooldetails' % DASHBOARD
VIP_DETAIL_PATH = 'horizon:%s:loadbalancers:vipdetails' % DASHBOARD
MEMBER_DETAIL_PATH = ('horizon:%s:loadbalancers:memberdetails'
% DASHBOARD)
MONITOR_DETAIL_PATH = ('horizon:%s:loadbalancers:monitordetails'
% DASHBOARD)
UPDATEPOOL_PATH = 'horizon:%s:loadbalancers:updatepool' % DASHBOARD
UPDATEVIP_PATH = 'horizon:%s:loadbalancers:updatevip' % DASHBOARD
UPDATEMEMBER_PATH = ('horizon:%s:loadbalancers:updatemember'
% DASHBOARD)
UPDATEMONITOR_PATH = ('horizon:%s:loadbalancers:updatemonitor'
% DASHBOARD)
ADDASSOC_PATH = 'horizon:%s:loadbalancers:addassociation' % DASHBOARD
DELETEASSOC_PATH = ('horizon:%s:loadbalancers:deleteassociation'
% DASHBOARD)
def set_up_expect(self):
# retrieve pools
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
# retrieves members
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.members.list())
# retrieves monitors
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).MultipleTimes() \
.AndReturn(self.monitors.list())
def set_up_expect_with_exception(self):
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_pools(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.pools.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_members(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data),
len(self.members.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_monitors(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data),
len(self.monitors.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_pools(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_members(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_monitors(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon_lib/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported'),
api.lbaas: ('pool_create',)})
def test_add_pool_post(self):
pool = self.pools.first()
subnet = self.subnets.first()
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(True)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.provider_list(IsA(http.HttpRequest)) \
.AndReturn(self.providers.list())
form_data = {'name': pool.name,
'description': pool.description,
'subnet_id': pool.subnet_id,
'protocol': pool.protocol,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_create(
IsA(http.HttpRequest), **form_data).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOOL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get(self):
self._test_add_pool_get()
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get_provider_list_exception(self):
self._test_add_pool_get(with_provider_exception=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'is_extension_supported')})
def test_add_pool_get_without_service_type_support(self):
self._test_add_pool_get(with_service_type=False)
def _test_add_pool_get(self, with_service_type=True,
with_provider_exception=False):
subnet = self.subnets.first()
default_provider = self.providers.first()['name']
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(with_service_type)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
if with_service_type:
prov_list = api.neutron.provider_list(IsA(http.HttpRequest))
if with_provider_exception:
prov_list.AndRaise(self.exceptions.neutron)
else:
prov_list.AndReturn(self.providers.list())
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDPOOL_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPool.name)
expected_objs = ['<AddPoolStep: addpoolaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if not with_service_type:
self.assertNotContains(res, default_provider)
self.assertContains(res, ('Provider for Load Balancer '
'is not supported'))
elif with_provider_exception:
self.assertNotContains(res, default_provider)
self.assertContains(res, 'No provider is available')
else:
self.assertContains(res, default_provider)
def test_add_vip_post(self):
self._test_add_vip_post()
def test_add_vip_post_no_connection_limit(self):
self._test_add_vip_post(with_conn_limit=False)
def test_add_vip_post_with_diff_subnet(self):
self._test_add_vip_post(with_diff_subnet=True)
@test.create_stubs({api.lbaas: ('pool_get', 'vip_create'),
api.neutron: (
'network_list_for_tenant', 'subnet_get',)})
def _test_add_vip_post(self, with_diff_subnet=False, with_conn_limit=True):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(
IsA(http.HttpRequest), pool.id).MultipleTimes().AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
params = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': pool.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up,
}
if with_conn_limit:
params['connection_limit'] = vip.connection_limit
if with_diff_subnet:
params['subnet_id'] = vip.subnet_id
api.lbaas.vip_create(
IsA(http.HttpRequest), **params).AndReturn(vip)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': pool.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up}
if with_conn_limit:
form_data['connection_limit'] = vip.connection_limit
if with_diff_subnet:
params['subnet_id'] = vip.subnet_id
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',),
api.neutron: (
'network_list_for_tenant', 'subnet_get',)})
def test_add_vip_post_with_error(self):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': pool.subnet_id,
'protocol_port': 65536,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'connection_limit': -2,
'admin_state_up': vip.admin_state_up}
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertFormErrors(res, 2)
def test_add_vip_get(self):
self._test_add_vip_get()
def test_add_vip_get_with_diff_subnet(self):
self._test_add_vip_get(with_diff_subnet=True)
@test.create_stubs({api.lbaas: ('pool_get',),
api.neutron: (
'network_list_for_tenant', 'subnet_get',)})
def _test_add_vip_get(self, with_diff_subnet=False):
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVIP_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVip.name)
expected_objs = ['<AddVipStep: addvipaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if with_diff_subnet:
self.assertNotEqual(networks[0], pool.subnet_id)
@test.create_stubs({api.lbaas: ('pool_health_monitor_create',)})
def test_add_monitor_post(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_create(
IsA(http.HttpRequest), **form_data).AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_monitor_post_with_error(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': 0,
'timeout': 0,
'max_retries': 11,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_post_with_httpmethod_error(self):
monitor = self.monitors.first()
form_data = {'type': 'http',
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': '',
'url_path': '',
'expected_codes': '',
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_get(self):
res = self.client.get(reverse(self.ADDMONITOR_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMonitor.name)
expected_objs = ['<AddMonitorStep: addmonitoraction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_member_post(self):
self._test_add_member_post()
def test_add_member_post_without_weight(self):
self._test_add_member_post(with_weight=False)
def test_add_member_post_without_server_list(self):
self._test_add_member_post(with_server_list=False)
def test_add_member_post_multiple_ports(self):
self._test_add_member_post(mult_ports=True)
@test.create_stubs({api.lbaas: ('pool_list', 'pool_get', 'member_create'),
api.neutron: ('port_list',),
api.nova: ('server_list',)})
def _test_add_member_post(self, with_weight=True, with_server_list=True,
mult_ports=False):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(
[[server1, server2], False])
if with_server_list:
pool = self.pools.list()[1]
port1 = self.AttributeDict(
{'fixed_ips': [{'ip_address': member.address,
'subnet_id':
'e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'}],
'network_id': '82288d84-e0a5-42ac-95be-e6af08727e42'})
api.lbaas.pool_get(
IsA(http.HttpRequest), pool.id).AndReturn(pool)
if mult_ports:
port2 = self.AttributeDict(
{'fixed_ips': [{'ip_address': '172.16.88.12',
'subnet_id':
'3f7c5d79-ee55-47b0-9213-8e669fb03009'}],
'network_id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2'})
api.neutron.port_list(
IsA(http.HttpRequest),
device_id=server1.id).AndReturn([port1, port2])
else:
api.neutron.port_list(
IsA(http.HttpRequest),
device_id=server1.id).AndReturn([port1, ])
form_data = {'pool_id': member.pool_id,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
if with_weight:
form_data['weight'] = member.weight
if with_server_list:
form_data['member_type'] = 'server_list'
else:
form_data['member_type'] = 'member_address'
form_data['address'] = member.address
api.lbaas.member_create(IsA(http.HttpRequest),
**form_data).AndReturn(member)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_post_with_error(self):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[server1,
server2],
False])
self.mox.ReplayAll()
# try to create member with invalid protocol port and weight
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': 65536,
'weight': -1,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_get(self):
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([[server1, server2], False])
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDMEMBER_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMember.name)
expected_objs = ['<AddMemberStep: addmemberaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get', 'pool_update')})
def test_update_pool_post(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
data = {'name': pool.name,
'description': pool.description,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_update(IsA(http.HttpRequest), pool.id, pool=data)\
.AndReturn(pool)
self.mox.ReplayAll()
form_data = data.copy()
form_data['pool_id'] = pool.id
res = self.client.post(
reverse(self.UPDATEPOOL_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',)})
def test_update_pool_get(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEPOOL_PATH, args=(pool.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatepool.html')
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'vip_update')})
def test_update_vip_post(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'session_persistence': {},
'connection_limit': vip.connection_limit,
'admin_state_up': vip.admin_state_up}
api.lbaas.vip_update(IsA(http.HttpRequest), vip.id, vip=data)\
.AndReturn(vip)
self.mox.ReplayAll()
form_data = data.copy()
form_data['vip_id'] = vip.id
res = self.client.post(
reverse(self.UPDATEVIP_PATH, args=(vip.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('vip_get', 'pool_list')})
def test_update_vip_get(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEVIP_PATH, args=(vip.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatevip.html')
@test.create_stubs({api.lbaas: ('pool_list', 'member_get',
'member_update')})
def test_update_member_post(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
data = {'pool_id': member.pool_id,
'weight': member.weight,
'admin_state_up': member.admin_state_up}
api.lbaas.member_update(IsA(http.HttpRequest), member.id, member=data)\
.AndReturn(member)
self.mox.ReplayAll()
form_data = data.copy()
form_data['member_id'] = member.id
res = self.client.post(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('member_get', 'pool_list')})
def test_update_member_get(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatemember.html')
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',
'pool_health_monitor_update')})
def test_update_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
data = {'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_update(
IsA(http.HttpRequest),
monitor.id, health_monitor=data).AndReturn(monitor)
self.mox.ReplayAll()
form_data = data.copy()
form_data['monitor_id'] = monitor.id
res = self.client.post(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',)})
def test_update_monitor_get(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)))
self.assertTemplateUsed(
res, 'project/loadbalancers/updatemonitor.html')
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list',
'pool_monitor_association_create')})
def test_add_pool_monitor_association_post(self):
pool = self.pools.list()[1]
monitors = self.monitors.list()
monitor = self.monitors.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
api.lbaas.pool_monitor_association_create(
IsA(http.HttpRequest), **form_data).AndReturn(None)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.ADDASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list')})
def test_add_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPMAssociation.name)
expected_objs = ['<AddPMAssociationStep: addpmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list',
'pool_monitor_association_delete')})
def test_delete_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = monitors[0]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
api.lbaas.pool_monitor_association_delete(
IsA(http.HttpRequest), **form_data).AndReturn(None)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list')})
def test_delete_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.DeletePMAssociation.name)
expected_objs = [
'<DeletePMAssociationStep: deletepmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_delete')})
def test_delete_pool(self):
self.set_up_expect()
pool = self.pools.first()
api.lbaas.pool_delete(IsA(http.HttpRequest), pool.id)
self.mox.ReplayAll()
form_data = {"action": "poolstable__deletepool__%s" % pool.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_get', 'vip_delete')})
def test_delete_vip(self):
self.set_up_expect()
pool = self.pools.first()
vip = self.vips.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.vip_delete(IsA(http.HttpRequest), vip.id)
self.mox.ReplayAll()
form_data = {"action": "poolstable__deletevip__%s" % pool.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'member_delete')})
def test_delete_member(self):
self.set_up_expect()
member = self.members.first()
api.lbaas.member_delete(IsA(http.HttpRequest), member.id)
self.mox.ReplayAll()
form_data = {"action": "memberstable__deletemember__%s" % member.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_health_monitor_delete')})
def test_delete_monitor(self):
self.set_up_expect()
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_delete(IsA(http.HttpRequest), monitor.id)
self.mox.ReplayAll()
form_data = {"action": "monitorstable__deletemonitor__%s" % monitor.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
from abc import ABC, abstractmethod
from enum import Enum
from functools import partial
from io import BytesIO
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.runners.worker.bundle_processor import SynchronousBagRuntimeState
from apache_beam.transforms import userstate
from typing import List, Tuple, Any, Dict, Collection
from pyflink.datastream import ReduceFunction
from pyflink.datastream.functions import AggregateFunction
from pyflink.fn_execution.beam.beam_coders import FlinkCoder
from pyflink.fn_execution.coders import FieldCoder
from pyflink.fn_execution.internal_state import InternalKvState, N, InternalValueState, \
InternalListState, InternalReducingState, InternalMergingState, InternalAggregatingState, \
InternalMapState
class LRUCache(object):
"""
A simple LRUCache implementation used to manage the internal runtime state.
An internal runtime state is used to handle the data under a specific key of a "public" state.
So the number of the internal runtime states may keep growing during the streaming task
execution. To prevent the OOM caused by the unlimited growth, we introduce this LRUCache
to evict the inactive internal runtime states.
"""
def __init__(self, max_entries, default_entry):
self._max_entries = max_entries
self._default_entry = default_entry
self._cache = collections.OrderedDict()
self._on_evict = None
def get(self, key):
value = self._cache.pop(key, self._default_entry)
if value != self._default_entry:
# update the last access time
self._cache[key] = value
return value
def put(self, key, value):
self._cache[key] = value
while len(self._cache) > self._max_entries:
name, value = self._cache.popitem(last=False)
if self._on_evict is not None:
self._on_evict(name, value)
def evict(self, key):
value = self._cache.pop(key, self._default_entry)
if self._on_evict is not None:
self._on_evict(key, value)
def evict_all(self):
if self._on_evict is not None:
for item in self._cache.items():
self._on_evict(*item)
self._cache.clear()
def set_on_evict(self, func):
self._on_evict = func
def __len__(self):
return len(self._cache)
def __iter__(self):
return iter(self._cache.values())
class SynchronousKvRuntimeState(InternalKvState, ABC):
"""
Base Class for partitioned State implementation.
"""
def __init__(self, name: str, remote_state_backend: 'RemoteKeyedStateBackend'):
self.name = name
self._remote_state_backend = remote_state_backend
self._internal_state = None
self.namespace = None
def set_current_namespace(self, namespace: N) -> None:
if namespace == self.namespace:
return
if self.namespace is not None:
self._remote_state_backend.cache_internal_state(
self._remote_state_backend._encoded_current_key, self)
self.namespace = namespace
self._internal_state = None
@abstractmethod
def get_internal_state(self):
pass
class SynchronousBagKvRuntimeState(SynchronousKvRuntimeState, ABC):
"""
Base Class for State implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousBagKvRuntimeState, self).__init__(name, remote_state_backend)
self._value_coder = value_coder
def get_internal_state(self):
if self._internal_state is None:
self._internal_state = self._remote_state_backend._get_internal_bag_state(
self.name, self.namespace, self._value_coder)
return self._internal_state
class SynchronousValueRuntimeState(SynchronousBagKvRuntimeState, InternalValueState):
"""
The runtime ValueState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousValueRuntimeState, self).__init__(name, value_coder, remote_state_backend)
def value(self):
for i in self.get_internal_state().read():
return i
return None
def update(self, value) -> None:
self.get_internal_state()
self._internal_state.clear()
self._internal_state.add(value)
def clear(self) -> None:
self.get_internal_state().clear()
class SynchronousMergingRuntimeState(SynchronousBagKvRuntimeState, InternalMergingState, ABC):
"""
Base Class for MergingState implementation.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousMergingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
self._remote_state_backend.merge_namespaces(self, target, sources)
class SynchronousListRuntimeState(SynchronousMergingRuntimeState, InternalListState):
"""
The runtime ListState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousListRuntimeState, self).__init__(name, value_coder, remote_state_backend)
def add(self, v):
self.get_internal_state().add(v)
def get(self):
return self.get_internal_state().read()
def add_all(self, values):
self.get_internal_state()._added_elements.extend(values)
def update(self, values):
self.clear()
self.add_all(values)
def clear(self):
self.get_internal_state().clear()
class SynchronousReducingRuntimeState(SynchronousMergingRuntimeState, InternalReducingState):
"""
The runtime ReducingState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self,
name: str,
value_coder,
remote_state_backend: 'RemoteKeyedStateBackend',
reduce_function: ReduceFunction):
super(SynchronousReducingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
self._reduce_function = reduce_function
def add(self, v):
current_value = self.get()
if current_value is None:
self._internal_state.add(v)
else:
self._internal_state.clear()
self._internal_state.add(self._reduce_function.reduce(current_value, v))
def get(self):
for i in self.get_internal_state().read():
return i
return None
def clear(self):
self.get_internal_state().clear()
class SynchronousAggregatingRuntimeState(SynchronousMergingRuntimeState, InternalAggregatingState):
"""
The runtime AggregatingState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self,
name: str,
value_coder,
remote_state_backend: 'RemoteKeyedStateBackend',
agg_function: AggregateFunction):
super(SynchronousAggregatingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
self._agg_function = agg_function
def add(self, v):
if v is None:
self.clear()
return
accumulator = self._get_accumulator()
if accumulator is None:
accumulator = self._agg_function.create_accumulator()
accumulator = self._agg_function.add(v, accumulator)
self._internal_state.clear()
self._internal_state.add(accumulator)
def get(self):
accumulator = self._get_accumulator()
if accumulator is None:
return None
else:
return self._agg_function.get_result(accumulator)
def _get_accumulator(self):
for i in self.get_internal_state().read():
return i
return None
def clear(self):
self.get_internal_state().clear()
class CachedMapState(LRUCache):
def __init__(self, max_entries):
super(CachedMapState, self).__init__(max_entries, None)
self._all_data_cached = False
self._cached_keys = set()
def on_evict(key, value):
self._cached_keys.remove(key)
self._all_data_cached = False
self.set_on_evict(on_evict)
def set_all_data_cached(self):
self._all_data_cached = True
def is_all_data_cached(self):
return self._all_data_cached
def put(self, key, exists_and_value):
if exists_and_value[0]:
self._cached_keys.add(key)
super(CachedMapState, self).put(key, exists_and_value)
def get_cached_keys(self):
return self._cached_keys
class IterateType(Enum):
ITEMS = 0
KEYS = 1
VALUES = 2
class IteratorToken(Enum):
"""
The token indicates the status of current underlying iterator. It can also be a UUID,
which represents an iterator on the Java side.
"""
NOT_START = 0
FINISHED = 1
def create_cache_iterator(cache_dict, iterate_type, iterated_keys=None):
if iterated_keys is None:
iterated_keys = []
if iterate_type == IterateType.KEYS:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, key
elif iterate_type == IterateType.VALUES:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, value
elif iterate_type == IterateType.ITEMS:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, (key, value)
else:
raise Exception("Unsupported iterate type: %s" % iterate_type)
class CachingMapStateHandler(object):
# GET request flags
GET_FLAG = 0
ITERATE_FLAG = 1
CHECK_EMPTY_FLAG = 2
# GET response flags
EXIST_FLAG = 0
IS_NONE_FLAG = 1
NOT_EXIST_FLAG = 2
IS_EMPTY_FLAG = 3
NOT_EMPTY_FLAG = 4
# APPEND request flags
DELETE = 0
SET_NONE = 1
SET_VALUE = 2
def __init__(self, caching_state_handler, max_cached_map_key_entries):
self._state_cache = caching_state_handler._state_cache
self._underlying = caching_state_handler._underlying
self._context = caching_state_handler._context
self._max_cached_map_key_entries = max_cached_map_key_entries
self._cached_iterator_num = 0
def _get_cache_token(self):
if not self._state_cache.is_cache_enabled():
return None
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
def blocking_get(self, state_key, map_key, map_key_encoder, map_value_decoder):
cache_token = self._get_cache_token()
if not cache_token:
# cache disabled / no cache token, request from remote directly
return self._get_raw(state_key, map_key, map_key_encoder, map_value_decoder)
# lookup cache first
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.get(cache_state_key, cache_token)
if cached_map_state is None:
# request from remote
exists, value = self._get_raw(state_key, map_key, map_key_encoder, map_value_decoder)
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
cached_map_state.put(map_key, (exists, value))
self._state_cache.put(cache_state_key, cache_token, cached_map_state)
return exists, value
else:
cached_value = cached_map_state.get(map_key)
if cached_value is None:
if cached_map_state.is_all_data_cached():
return False, None
# request from remote
exists, value = self._get_raw(
state_key, map_key, map_key_encoder, map_value_decoder)
cached_map_state.put(map_key, (exists, value))
return exists, value
else:
return cached_value
def lazy_iterator(self, state_key, iterate_type, map_key_decoder, map_value_decoder,
iterated_keys):
cache_token = self._get_cache_token()
if cache_token:
# check if the data in the read cache can be used
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.get(cache_state_key, cache_token)
if cached_map_state and cached_map_state.is_all_data_cached():
return create_cache_iterator(
cached_map_state._cache, iterate_type, iterated_keys)
# request from remote
last_iterator_token = IteratorToken.NOT_START
current_batch, iterator_token = self._iterate_raw(
state_key, iterate_type,
last_iterator_token,
map_key_decoder,
map_value_decoder)
if cache_token and \
iterator_token == IteratorToken.FINISHED and \
iterate_type != IterateType.KEYS and \
self._max_cached_map_key_entries >= len(current_batch):
# Special case: all the data of the map state is contained in current batch,
# and can be stored in the cached map state.
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
cache_state_key = self._convert_to_cache_key(state_key)
for key, value in current_batch.items():
cached_map_state.put(key, (True, value))
cached_map_state.set_all_data_cached()
self._state_cache.put(cache_state_key, cache_token, cached_map_state)
return self._lazy_remote_iterator(
state_key,
iterate_type,
map_key_decoder,
map_value_decoder,
iterated_keys,
iterator_token,
current_batch)
def _lazy_remote_iterator(
self,
state_key,
iterate_type,
map_key_decoder,
map_value_decoder,
iterated_keys,
iterator_token,
current_batch):
if iterate_type == IterateType.KEYS:
while True:
for key in current_batch:
if key in iterated_keys:
continue
yield key, key
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
elif iterate_type == IterateType.VALUES:
while True:
for key, value in current_batch.items():
if key in iterated_keys:
continue
yield key, value
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
elif iterate_type == IterateType.ITEMS:
while True:
for key, value in current_batch.items():
if key in iterated_keys:
continue
yield key, (key, value)
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
else:
raise Exception("Unsupported iterate type: %s" % iterate_type)
def extend(self, state_key, items: List[Tuple[int, Any, Any]],
map_key_encoder, map_value_encoder):
cache_token = self._get_cache_token()
if cache_token:
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.get(cache_state_key, cache_token)
if cached_map_state is None:
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
self._state_cache.put(cache_state_key, cache_token, cached_map_state)
for request_flag, map_key, map_value in items:
if request_flag == self.DELETE:
cached_map_state.put(map_key, (False, None))
elif request_flag == self.SET_NONE:
cached_map_state.put(map_key, (True, None))
elif request_flag == self.SET_VALUE:
cached_map_state.put(map_key, (True, map_value))
else:
raise Exception("Unknown flag: " + str(request_flag))
return self._append_raw(
state_key,
items,
map_key_encoder,
map_value_encoder)
def check_empty(self, state_key):
cache_token = self._get_cache_token()
if cache_token:
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.get(cache_state_key, cache_token)
if cached_map_state is not None:
if cached_map_state.is_all_data_cached() and \
len(cached_map_state.get_cached_keys()) == 0:
return True
elif len(cached_map_state.get_cached_keys()) > 0:
return False
return self._check_empty_raw(state_key)
def clear(self, state_key):
self.clear_read_cache(state_key)
return self._underlying.clear(state_key)
def clear_read_cache(self, state_key):
cache_token = self._get_cache_token()
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.evict(cache_key, cache_token)
def get_cached_iterators_num(self):
return self._cached_iterator_num
def _inc_cached_iterators_num(self):
self._cached_iterator_num += 1
def _dec_cached_iterators_num(self):
self._cached_iterator_num -= 1
def reset_cached_iterators_num(self):
self._cached_iterator_num = 0
def _check_empty_raw(self, state_key):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.CHECK_EMPTY_FLAG)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
if data[0] == self.IS_EMPTY_FLAG:
return True
elif data[0] == self.NOT_EMPTY_FLAG:
return False
else:
raise Exception("Unknown response flag: " + str(data[0]))
def _get_raw(self, state_key, map_key, map_key_encoder, map_value_decoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.GET_FLAG)
map_key_encoder(map_key, output_stream)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
input_stream = coder_impl.create_InputStream(data)
result_flag = input_stream.read_byte()
if result_flag == self.EXIST_FLAG:
return True, map_value_decoder(input_stream)
elif result_flag == self.IS_NONE_FLAG:
return True, None
elif result_flag == self.NOT_EXIST_FLAG:
return False, None
else:
raise Exception("Unknown response flag: " + str(result_flag))
def _iterate_raw(self, state_key, iterate_type, iterator_token,
map_key_decoder, map_value_decoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.ITERATE_FLAG)
output_stream.write_byte(iterate_type.value)
if not isinstance(iterator_token, IteratorToken):
# The iterator token represents a Java iterator
output_stream.write_bigendian_int32(len(iterator_token))
output_stream.write(iterator_token)
else:
output_stream.write_bigendian_int32(0)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
if len(response_token) != 0:
# The new iterator token is an UUID which represents a cached iterator at Java
# side.
new_iterator_token = response_token
if iterator_token == IteratorToken.NOT_START:
# This is the first request but not the last request of current state.
# It means there is a new iterator has been created and cached at Java side.
self._inc_cached_iterators_num()
else:
new_iterator_token = IteratorToken.FINISHED
if iterator_token != IteratorToken.NOT_START:
# This is not the first request but the last request of current state.
# It means the cached iterator created at Java side has been removed as
# current iteration has finished.
self._dec_cached_iterators_num()
input_stream = coder_impl.create_InputStream(data)
if iterate_type == IterateType.ITEMS or iterate_type == IterateType.VALUES:
# decode both key and value
current_batch = {}
while input_stream.size() > 0:
key = map_key_decoder(input_stream)
is_not_none = input_stream.read_byte()
if is_not_none:
value = map_value_decoder(input_stream)
else:
value = None
current_batch[key] = value
else:
# only decode key
current_batch = []
while input_stream.size() > 0:
key = map_key_decoder(input_stream)
current_batch.append(key)
return current_batch, new_iterator_token
def _append_raw(self, state_key, items, map_key_encoder, map_value_encoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_bigendian_int32(len(items))
for request_flag, map_key, map_value in items:
output_stream.write_byte(request_flag)
# Not all the coder impls will serialize the length of bytes when we set the "nested"
# param to "True", so we need to encode the length of bytes manually.
tmp_out = coder_impl.create_OutputStream()
map_key_encoder(map_key, tmp_out)
serialized_data = tmp_out.get()
output_stream.write_bigendian_int32(len(serialized_data))
output_stream.write(serialized_data)
if request_flag == self.SET_VALUE:
tmp_out = coder_impl.create_OutputStream()
map_value_encoder(map_value, tmp_out)
serialized_data = tmp_out.get()
output_stream.write_bigendian_int32(len(serialized_data))
output_stream.write(serialized_data)
return self._underlying.append_raw(state_key, output_stream.get())
@staticmethod
def _convert_to_cache_key(state_key):
return state_key.SerializeToString()
class RemovableConcatIterator(collections.Iterator):
def __init__(self, internal_map_state, first, second):
self._first = first
self._second = second
self._first_not_finished = True
self._internal_map_state = internal_map_state
self._mod_count = self._internal_map_state._mod_count
self._last_key = None
def __next__(self):
self._check_modification()
if self._first_not_finished:
try:
self._last_key, element = next(self._first)
return element
except StopIteration:
self._first_not_finished = False
return self.__next__()
else:
self._last_key, element = next(self._second)
return element
def remove(self):
"""
Remove the the last element returned by this iterator.
"""
if self._last_key is None:
raise Exception("You need to call the '__next__' method before calling "
"this method.")
self._check_modification()
# Bypass the 'remove' method of the map state to avoid triggering the commit of the write
# cache.
if self._internal_map_state._cleared:
del self._internal_map_state._write_cache[self._last_key]
if len(self._internal_map_state._write_cache) == 0:
self._internal_map_state._is_empty = True
else:
self._internal_map_state._write_cache[self._last_key] = (False, None)
self._mod_count += 1
self._internal_map_state._mod_count += 1
self._last_key = None
def _check_modification(self):
if self._mod_count != self._internal_map_state._mod_count:
raise Exception("Concurrent modification detected. "
"You can not modify the map state when iterating it except using the "
"'remove' method of this iterator.")
class InternalSynchronousMapRuntimeState(object):
def __init__(self,
map_state_handler: CachingMapStateHandler,
state_key,
map_key_coder,
map_value_coder,
max_write_cache_entries):
self._map_state_handler = map_state_handler
self._state_key = state_key
self._map_key_coder = map_key_coder
if isinstance(map_key_coder, FieldCoder):
map_key_coder_impl = FlinkCoder(map_key_coder).get_impl()
else:
map_key_coder_impl = map_key_coder.get_impl()
self._map_key_encoder, self._map_key_decoder = \
self._get_encoder_and_decoder(map_key_coder_impl)
self._map_value_coder = map_value_coder
if isinstance(map_value_coder, FieldCoder):
map_value_coder_impl = FlinkCoder(map_value_coder).get_impl()
else:
map_value_coder_impl = map_value_coder.get_impl()
self._map_value_encoder, self._map_value_decoder = \
self._get_encoder_and_decoder(map_value_coder_impl)
self._write_cache = dict()
self._max_write_cache_entries = max_write_cache_entries
self._is_empty = None
self._cleared = False
self._mod_count = 0
def get(self, map_key):
if self._is_empty:
return None
if map_key in self._write_cache:
exists, value = self._write_cache[map_key]
if exists:
return value
else:
return None
if self._cleared:
return None
exists, value = self._map_state_handler.blocking_get(
self._state_key, map_key, self._map_key_encoder, self._map_value_decoder)
if exists:
return value
else:
return None
def put(self, map_key, map_value):
self._write_cache[map_key] = (True, map_value)
self._is_empty = False
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def put_all(self, dict_value):
for map_key, map_value in dict_value:
self._write_cache[map_key] = (True, map_value)
self._is_empty = False
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def remove(self, map_key):
if self._is_empty:
return
if self._cleared:
del self._write_cache[map_key]
if len(self._write_cache) == 0:
self._is_empty = True
else:
self._write_cache[map_key] = (False, None)
self._is_empty = None
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def contains(self, map_key):
if self._is_empty:
return False
if self.get(map_key) is None:
return False
else:
return True
def is_empty(self):
if self._is_empty is None:
if len(self._write_cache) > 0:
self.commit()
self._is_empty = self._map_state_handler.check_empty(self._state_key)
return self._is_empty
def clear(self):
self._cleared = True
self._is_empty = True
self._mod_count += 1
self._write_cache.clear()
def items(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.ITEMS),
self.remote_data_iterator(IterateType.ITEMS))
def keys(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.KEYS),
self.remote_data_iterator(IterateType.KEYS))
def values(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.VALUES),
self.remote_data_iterator(IterateType.VALUES))
def commit(self):
to_await = None
if self._cleared:
to_await = self._map_state_handler.clear(self._state_key)
if self._write_cache:
append_items = []
for map_key, (exists, value) in self._write_cache.items():
if exists:
if value is not None:
append_items.append(
(CachingMapStateHandler.SET_VALUE, map_key, value))
else:
append_items.append((CachingMapStateHandler.SET_NONE, map_key, None))
else:
append_items.append((CachingMapStateHandler.DELETE, map_key, None))
self._write_cache.clear()
to_await = self._map_state_handler.extend(
self._state_key, append_items, self._map_key_encoder, self._map_value_encoder)
if to_await:
to_await.get()
self._write_cache.clear()
self._cleared = False
self._mod_count += 1
def write_cache_iterator(self, iterate_type):
return create_cache_iterator(self._write_cache, iterate_type)
def remote_data_iterator(self, iterate_type):
if self._cleared or self._is_empty:
return iter([])
else:
return self._map_state_handler.lazy_iterator(
self._state_key,
iterate_type,
self._map_key_decoder,
self._map_value_decoder,
self._write_cache)
@staticmethod
def _get_encoder_and_decoder(coder):
encoder = partial(coder.encode_to_stream, nested=True)
decoder = partial(coder.decode_from_stream, nested=True)
return encoder, decoder
class SynchronousMapRuntimeState(SynchronousKvRuntimeState, InternalMapState):
def __init__(self,
name: str,
map_key_coder,
map_value_coder,
remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousMapRuntimeState, self).__init__(name, remote_state_backend)
self._map_key_coder = map_key_coder
self._map_value_coder = map_value_coder
def get_internal_state(self):
if self._internal_state is None:
self._internal_state = self._remote_state_backend._get_internal_map_state(
self.name, self.namespace, self._map_key_coder, self._map_value_coder)
return self._internal_state
def get(self, key):
return self.get_internal_state().get(key)
def put(self, key, value):
self.get_internal_state().put(key, value)
def put_all(self, dict_value):
self.get_internal_state().put_all(dict_value)
def remove(self, key):
self.get_internal_state().remove(key)
def contains(self, key):
return self.get_internal_state().contains(key)
def items(self):
return self.get_internal_state().items()
def keys(self):
return self.get_internal_state().keys()
def values(self):
return self.get_internal_state().values()
def is_empty(self):
return self.get_internal_state().is_empty()
def clear(self):
self.get_internal_state().clear()
class RemoteKeyedStateBackend(object):
"""
A keyed state backend provides methods for managing keyed state.
"""
MERGE_NAMESAPCES_MARK = "merge_namespaces"
def __init__(self,
state_handler,
key_coder,
namespace_coder,
state_cache_size,
map_state_read_cache_size,
map_state_write_cache_size):
self._state_handler = state_handler
self._map_state_handler = CachingMapStateHandler(
state_handler, map_state_read_cache_size)
self._key_coder_impl = key_coder.get_impl()
self.namespace_coder = namespace_coder
if namespace_coder:
self._namespace_coder_impl = namespace_coder.get_impl()
else:
self._namespace_coder_impl = None
self._state_cache_size = state_cache_size
self._map_state_write_cache_size = map_state_write_cache_size
self._all_states = {} # type: Dict[str, SynchronousKvRuntimeState]
self._internal_state_cache = LRUCache(self._state_cache_size, None)
self._internal_state_cache.set_on_evict(
lambda key, value: self.commit_internal_state(value))
self._current_key = None
self._encoded_current_key = None
self._clear_iterator_mark = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id="clear_iterators",
side_input_id="clear_iterators",
key=self._encoded_current_key))
def get_list_state(self, name, element_coder):
return self._wrap_internal_bag_state(
name, element_coder, SynchronousListRuntimeState, SynchronousListRuntimeState)
def get_value_state(self, name, value_coder):
return self._wrap_internal_bag_state(
name, value_coder, SynchronousValueRuntimeState, SynchronousValueRuntimeState)
def get_map_state(self, name, map_key_coder, map_value_coder):
if name in self._all_states:
self.validate_map_state(name, map_key_coder, map_value_coder)
return self._all_states[name]
map_state = SynchronousMapRuntimeState(name, map_key_coder, map_value_coder, self)
self._all_states[name] = map_state
return map_state
def get_reducing_state(self, name, coder, reduce_function):
return self._wrap_internal_bag_state(
name, coder, SynchronousReducingRuntimeState,
partial(SynchronousReducingRuntimeState, reduce_function=reduce_function))
def get_aggregating_state(self, name, coder, agg_function):
return self._wrap_internal_bag_state(
name, coder, SynchronousAggregatingRuntimeState,
partial(SynchronousAggregatingRuntimeState, agg_function=agg_function))
def validate_state(self, name, coder, expected_type):
if name in self._all_states:
state = self._all_states[name]
if not isinstance(state, expected_type):
raise Exception("The state name '%s' is already in use and not a %s."
% (name, expected_type))
if state._value_coder != coder:
raise Exception("State name corrupted: %s" % name)
def validate_map_state(self, name, map_key_coder, map_value_coder):
if name in self._all_states:
state = self._all_states[name]
if not isinstance(state, SynchronousMapRuntimeState):
raise Exception("The state name '%s' is already in use and not a map state."
% name)
if state._map_key_coder != map_key_coder or \
state._map_value_coder != map_value_coder:
raise Exception("State name corrupted: %s" % name)
def _wrap_internal_bag_state(self, name, element_coder, wrapper_type, wrap_method):
if name in self._all_states:
self.validate_state(name, element_coder, wrapper_type)
return self._all_states[name]
wrapped_state = wrap_method(name, element_coder, self)
self._all_states[name] = wrapped_state
return wrapped_state
def _get_internal_bag_state(self, name, namespace, element_coder):
encoded_namespace = self._encode_namespace(namespace)
cached_state = self._internal_state_cache.get(
(name, self._encoded_current_key, encoded_namespace))
if cached_state is not None:
return cached_state
# The created internal state would not be put into the internal state cache
# at once. The internal state cache is only updated when the current key changes.
# The reason is that the state cache size may be smaller that the count of activated
# state (i.e. the state with current key).
state_spec = userstate.BagStateSpec(name, element_coder)
internal_state = self._create_bag_state(state_spec, encoded_namespace)
return internal_state
def _get_internal_map_state(self, name, namespace, map_key_coder, map_value_coder):
encoded_namespace = self._encode_namespace(namespace)
cached_state = self._internal_state_cache.get(
(name, self._encoded_current_key, encoded_namespace))
if cached_state is not None:
return cached_state
internal_map_state = self._create_internal_map_state(
name, encoded_namespace, map_key_coder, map_value_coder)
return internal_map_state
def _create_bag_state(self, state_spec: userstate.StateSpec, encoded_namespace) \
-> userstate.AccumulatingRuntimeState:
if isinstance(state_spec, userstate.BagStateSpec):
bag_state = SynchronousBagRuntimeState(
self._state_handler,
state_key=self.get_bag_state_key(
state_spec.name, self._encoded_current_key, encoded_namespace),
value_coder=state_spec.coder)
return bag_state
else:
raise NotImplementedError(state_spec)
def _create_internal_map_state(self, name, encoded_namespace, map_key_coder, map_value_coder):
# Currently the `beam_fn_api.proto` does not support MapState, so we use the
# the `MultimapSideInput` message to mark the state as a MapState for now.
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id="",
window=encoded_namespace,
side_input_id=name,
key=self._encoded_current_key))
return InternalSynchronousMapRuntimeState(
self._map_state_handler,
state_key,
map_key_coder,
map_value_coder,
self._map_state_write_cache_size)
def _encode_namespace(self, namespace):
if namespace is not None:
encoded_namespace = self._namespace_coder_impl.encode(namespace)
else:
encoded_namespace = b''
return encoded_namespace
def cache_internal_state(self, encoded_key, internal_kv_state: SynchronousKvRuntimeState):
encoded_old_namespace = self._encode_namespace(internal_kv_state.namespace)
self._internal_state_cache.put(
(internal_kv_state.name, encoded_key, encoded_old_namespace),
internal_kv_state.get_internal_state())
def set_current_key(self, key):
if key == self._current_key:
return
encoded_old_key = self._encoded_current_key
self._current_key = key
self._encoded_current_key = self._key_coder_impl.encode(self._current_key)
for state_name, state_obj in self._all_states.items():
if self._state_cache_size > 0:
# cache old internal state
self.cache_internal_state(encoded_old_key, state_obj)
state_obj.namespace = None
state_obj._internal_state = None
def get_current_key(self):
return self._current_key
def commit(self):
for internal_state in self._internal_state_cache:
self.commit_internal_state(internal_state)
for name, state in self._all_states.items():
if (name, self._encoded_current_key, self._encode_namespace(state.namespace)) \
not in self._internal_state_cache:
self.commit_internal_state(state._internal_state)
def clear_cached_iterators(self):
if self._map_state_handler.get_cached_iterators_num() > 0:
self._clear_iterator_mark.multimap_side_input.key = self._encoded_current_key
self._map_state_handler.clear(self._clear_iterator_mark)
def merge_namespaces(self, state: SynchronousMergingRuntimeState, target, sources):
state.set_current_namespace(target)
self.commit_internal_state(state.get_internal_state())
encoded_target_namespace = self._encode_namespace(target)
encoded_namespaces = [encoded_target_namespace]
for source in sources:
encoded_namespaces.append(self._encode_namespace(source))
self.clear_state_cache(state, encoded_namespaces)
state_key = self.get_bag_state_key(
state.name, self._encoded_current_key, encoded_target_namespace)
state_key.bag_user_state.transform_id = self.MERGE_NAMESAPCES_MARK
encoded_namespaces_writer = BytesIO()
encoded_namespaces_writer.write(len(sources).to_bytes(4, 'big'))
for encoded_namespace in encoded_namespaces:
encoded_namespaces_writer.write(encoded_namespace)
sources_bytes = encoded_namespaces_writer.getvalue()
to_await = self._map_state_handler._underlying.append_raw(state_key, sources_bytes)
if to_await:
to_await.get()
def clear_state_cache(self, state: SynchronousMergingRuntimeState, encoded_namespaces):
name = state.name
for encoded_namespace in encoded_namespaces:
if (name, self._encoded_current_key, encoded_namespace) in self._internal_state_cache:
# commit and clear the write cache
self._internal_state_cache.evict(
(name, self._encoded_current_key, encoded_namespace))
# currently all the SynchronousMergingRuntimeState is based on bag state
state_key = self.get_bag_state_key(
name, self._encoded_current_key, encoded_namespace)
# clear the read cache, the read cache is shared between map state handler and bag
# state handler. So we can use the map state handler instead.
self._map_state_handler.clear_read_cache(state_key)
@staticmethod
def get_bag_state_key(name, encoded_key, encoded_namespace):
return beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
transform_id="",
window=encoded_namespace,
user_state_id=name,
key=encoded_key))
@staticmethod
def commit_internal_state(internal_state):
if internal_state is not None:
internal_state.commit()
# reset the status of the internal state to reuse the object cross bundle
if isinstance(internal_state, SynchronousBagRuntimeState):
internal_state._cleared = False
internal_state._added_elements = []
| |
'''Wrapper for demolib.h
Generated with:
../ctypesgen.py -o pydemolib.py -l demolib.so demolib.h
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
# Begin preamble
import ctypes, os, sys
from ctypes import *
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
del t
del _int_types
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
def POINTER(obj):
p = ctypes.POINTER(obj)
# Convert None to a real NULL pointer to work around bugs
# in how ctypes handles None on 64-bit platforms
if not isinstance(p.from_param, classmethod):
def from_param(cls, x):
if x is None:
return cls()
else:
return x
p.from_param = classmethod(from_param)
return p
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [('raw', POINTER(c_char)),
('data', c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, (str, unicode, UserString)):
self.data = str(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from str
elif isinstance(obj, str):
return cls(obj)
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
# As of ctypes 1.0, ctypes does not support custom error-checking
# functions on callbacks, nor does it support custom datatypes on
# callbacks, so we must ensure that all callbacks return
# primitive datatypes.
#
# Non-primitive return values wrapped with UNCHECKED won't be
# typechecked, and will be converted to c_void_p.
def UNCHECKED(type):
if (hasattr(type, "_type_") and isinstance(type._type_, str)
and type._type_ != "P"):
return type
else:
return c_void_p
# ctypes doesn't have direct support for variadic functions, so we have to write
# our own wrapper class
class _variadic_function(object):
def __init__(self,func,restype,argtypes):
self.func=func
self.func.restype=restype
self.argtypes=argtypes
def _as_parameter_(self):
# So we can pass this variadic function as a function pointer
return self.func
def __call__(self,*args):
fixed_args=[]
i=0
for argtype in self.argtypes:
# Typecheck what we can
fixed_args.append(argtype.from_param(args[i]))
i+=1
return self.func(*fixed_args+list(args[i:]))
# End preamble
_libs = {}
_libdirs = []
# Begin loader
# ----------------------------------------------------------------------------
# Copyright (c) 2008 David James
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os.path, re, sys, glob
import ctypes
import ctypes.util
def _environ_path(name):
if name in os.environ:
return os.environ[name].split(":")
else:
return []
class LibraryLoader(object):
def __init__(self):
self.other_dirs=[]
def load_library(self,libname):
"""Given the name of a library, load it."""
paths = self.getpaths(libname)
for path in paths:
if os.path.exists(path):
return self.load(path)
raise ImportError("%s not found." % libname)
def load(self,path):
"""Given a path to a library, load it."""
try:
# Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
# of the default RTLD_LOCAL. Without this, you end up with
# libraries not being loadable, resulting in "Symbol not found"
# errors
if sys.platform == 'darwin':
return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
else:
return ctypes.cdll.LoadLibrary(path)
except OSError,e:
raise ImportError(e)
def getpaths(self,libname):
"""Return a list of paths where the library might be found."""
if os.path.isabs(libname):
yield libname
else:
# FIXME / TODO return '.' and os.path.dirname(__file__)
for path in self.getplatformpaths(libname):
yield path
path = ctypes.util.find_library(libname)
if path: yield path
def getplatformpaths(self, libname):
return []
# Darwin (Mac OS X)
class DarwinLibraryLoader(LibraryLoader):
name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
"%s.so", "%s.bundle", "%s"]
def getplatformpaths(self,libname):
if os.path.pathsep in libname:
names = [libname]
else:
names = [format % libname for format in self.name_formats]
for dir in self.getdirs(libname):
for name in names:
yield os.path.join(dir,name)
def getdirs(self,libname):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/
DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
if not dyld_fallback_library_path:
dyld_fallback_library_path = [os.path.expanduser('~/lib'),
'/usr/local/lib', '/usr/lib']
dirs = []
if '/' in libname:
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
else:
dirs.extend(_environ_path("LD_LIBRARY_PATH"))
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
dirs.extend(self.other_dirs)
dirs.append(".")
dirs.append(os.path.dirname(__file__))
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
dirs.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks'))
dirs.extend(dyld_fallback_library_path)
return dirs
# Posix
class PosixLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
for name in ("LD_LIBRARY_PATH",
"SHLIB_PATH", # HPUX
"LIBPATH", # OS/2, AIX
"LIBRARY_PATH", # BE/OS
):
if name in os.environ:
directories.extend(os.environ[name].split(os.pathsep))
directories.extend(self.other_dirs)
directories.append(".")
directories.append(os.path.dirname(__file__))
try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError: pass
directories.extend(['/lib', '/usr/lib', '/lib64', '/usr/lib64'])
cache = {}
lib_re = re.compile(r'lib(.*)\.s[ol]')
ext_re = re.compile(r'\.s[ol]$')
for dir in directories:
try:
for path in glob.glob("%s/*.s[ol]*" % dir):
file = os.path.basename(path)
# Index by filename
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def getplatformpaths(self, libname):
if self._ld_so_cache is None:
self._create_ld_so_cache()
result = self._ld_so_cache.get(libname)
if result: yield result
path = ctypes.util.find_library(libname)
if path: yield os.path.join("/lib",path)
# Windows
class _WindowsLibrary(object):
def __init__(self, path):
self.cdll = ctypes.cdll.LoadLibrary(path)
self.windll = ctypes.windll.LoadLibrary(path)
def __getattr__(self, name):
try: return getattr(self.cdll,name)
except AttributeError:
try: return getattr(self.windll,name)
except AttributeError:
raise
class WindowsLibraryLoader(LibraryLoader):
name_formats = ["%s.dll", "lib%s.dll", "%slib.dll"]
def load_library(self, libname):
try:
result = LibraryLoader.load_library(self, libname)
except ImportError:
result = None
if os.path.sep not in libname:
for name in self.name_formats:
try:
result = getattr(ctypes.cdll, name % libname)
if result:
break
except WindowsError:
result = None
if result is None:
try:
result = getattr(ctypes.cdll, libname)
except WindowsError:
result = None
if result is None:
raise ImportError("%s not found." % libname)
return result
def load(self, path):
return _WindowsLibrary(path)
def getplatformpaths(self, libname):
if os.path.sep not in libname:
for name in self.name_formats:
dll_in_current_dir = os.path.abspath(name % libname)
if os.path.exists(dll_in_current_dir):
yield dll_in_current_dir
path = ctypes.util.find_library(name % libname)
if path:
yield path
# Platform switching
# If your value of sys.platform does not appear in this dict, please contact
# the Ctypesgen maintainers.
loaderclass = {
"darwin": DarwinLibraryLoader,
"cygwin": WindowsLibraryLoader,
"win32": WindowsLibraryLoader
}
loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
def add_library_search_dirs(other_dirs):
loader.other_dirs = other_dirs
load_library = loader.load_library
del loaderclass
# End loader
add_library_search_dirs([])
# Begin libraries
_libs["demolib.so"] = load_library("demolib.so")
# 1 libraries
# End libraries
# No modules
# /home/clach04/dev/python/ctypesgen/demo/demolib.h: 6
if hasattr(_libs['demolib.so'], 'trivial_add'):
trivial_add = _libs['demolib.so'].trivial_add
trivial_add.argtypes = [c_int, c_int]
trivial_add.restype = c_int
# No inserted files
| |
#!/usr/bin/python3
import sys
import json
import os
import re
import time
import xml.etree.cElementTree as cET
from scrape_affiliation import scrape_affiliation
class DBLPEncoder(json.JSONEncoder):
def default(self, o):
return o.to_dict()
class Author():
def __init__(self, name, id):
self.name = name
self.id = id
self.articles = []
self.affiliation = None
def to_dict(self):
return {
"name": self.name,
"id": self.id,
"articles": self.articles,
"affiliation": self.affiliation
}
class Article():
def __init__(self, id):
self.title = ""
self.doi = ""
self.authors = []
self.year = -1
self.id = id
def to_dict(self):
return {
"title": self.title,
"doi": self.doi,
"authors": self.authors,
"year": self.year,
"id": self.id
}
class Journal():
def __init__(self):
self.title = ""
self.short_name = ""
self.num_articles = 0
self.first_year = 9999
self.latest_year = 0
self.authors = []
self.articles = []
def __init__(self, title, short_name):
self.title = title
self.short_name = short_name
self.num_articles = 0
self.first_year = 9999
self.latest_year = 0
self.authors = []
self.articles = []
# Format the journal information to a dict so we can dump it to JSON
def to_dict(self):
return {
"title": self.title,
"short_name": self.short_name,
"num_authors": len(self.authors),
"num_articles": self.num_articles,
"first_year": self.first_year,
"latest_year": self.latest_year,
"authors": self.authors,
"articles": self.articles,
}
# Check that the data and data/authors directories exist. If not, create them
def check_directories(d):
if not os.path.isdir(d):
os.mkdir(d)
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: ./dblp_to_json <dblp file> <journal_filters.json> <output dir>")
print("\tThe journal filters JSON file specifies the short names of journals that")
print("\tyou're interested in getting the reduced JSON data for")
sys.exit(1)
out_dir = sys.argv[3]
check_directories(out_dir)
desired_journals = {}
with open(sys.argv[2], "r") as f:
desired_journals = json.load(f)
print("Journal Filters: {}".format(desired_journals))
match_journal_key = re.compile("journals\/(\w+)\/")
journal = None
# We want to keep the author information across journals, otherwise we would lose
# information that one author published in many journals. Instead we would see them as
# multiple authors, one for each journal they published in.
authors = {}
next_author_id = 0
# We also keep a single article id counter
next_article_id = 0
context = iter(cET.iterparse(sys.argv[1], events=("start", "end")))
event, root = next(context)
for event, elem in context:
# For each end tag we hit that is a full article, paper or w/e we want to dump it since we don't
# need to keep the data around. If it's an article we check if it's in the journal we want and
# save it to our reduced data set
if event == "end" and elem.tag == "article" or elem.tag == "inproceedings" \
or elem.tag == "proceedings" or elem.tag == "book" or elem.tag == "incollection" \
or elem.tag == "phdthesis" or elem.tag == "mastersthesis" or elem.tag == "www":
# If it's an article see if it's in our journal filters
if elem.tag == "article":
# We also track which journal we're currently parsing so we can dump it once we hit
# a new journal and avoid keeping all the data around for the entire run
key = match_journal_key.match(elem.get("key"))
if key and key.group(1) in desired_journals:
# If this is the first journal or we're done reading this journal
if journal == None or not key.group(1) == journal.short_name:
# If we were previously reading a journal save it out
if journal:
print("Saving out {}\n\tnum_authors = {}\n\tnum_articles = {}".format(journal.title,
len(journal.authors), journal.num_articles))
with open(out_dir + "/" + journal.short_name + ".json", "w") as fp:
json.dump(journal, fp, cls=DBLPEncoder)
# Setup the new journal we're reading
title = ""
# Find the journal's full title
for child in elem.getiterator("journal"):
title = child.text
journal = Journal(title, key.group(1))
# Add this article to the journal
journal.num_articles += 1
# Parse the article information
article = Article(next_article_id)
next_article_id += 1
for child in list(elem):
if child.tag == "title":
article.title = child.text
elif child.tag == "author":
# This is the first time we've seen this author, so we need to add a new Author entry
auth = None
if not child.text in authors:
auth = Author(child.text, next_author_id)
authors[child.text] = auth
next_author_id += 1
else:
auth = authors[child.text]
article.authors.append(auth.id)
auth.articles.append(article)
# If this author isn't already recorded for this journal, add their id
if not auth.id in journal.authors:
journal.authors.append(auth.id)
elif child.tag == "year":
year = int(child.text)
journal.first_year = min(journal.first_year, year)
journal.latest_year = max(journal.latest_year, year)
article.year = year
elif child.tag == "ee":
article.doi = child.text
journal.articles.append(article)
# Dump the node we just parsed and any references to it so we don't explode our memory
root.clear()
# Save out the last journal we read in, since it won't be dumped by encountering a new one
if journal:
print("Saving out {}\n\tnum_authors = {}\n\tnum_articles = {}".format(journal.title,
len(journal.authors), journal.num_articles))
with open(out_dir + "/" + journal.short_name + ".json", "w") as fp:
json.dump(journal, fp, cls=DBLPEncoder)
# Save out the author information we've read in
print("Saving out {} authors".format(len(authors)))
authors_array = [None] * len(authors)
for _, author in authors.items():
authors_array[author.id] = author
# Now go through and sort out everyone's affiliation, also use any cached information we might have in
# affiliation_cache.json
affiliation_cache = {}
if os.path.isfile(out_dir + "/affiliation_cache.json"):
with open(out_dir + "/affiliation_cache.json", "r") as fp:
affiliation_cache = json.load(fp)
for a in authors_array:
# If we've cached this author's affiliation information just re-use it
if a.affiliation == None and a.name in affiliation_cache:
a.affiliation = affiliation_cache[a.name]
# If we don't have affiliation information pick their first article and load it
elif a.affiliation == None and not a.name in affiliation_cache:
# TODO: Pick the most recent article, not the first one in the array
# we should actually sort the articles by year
a.articles.sort(key=lambda x: x.year)
article = a.articles[-1]
print("Fetching affiliations from {}".format(article.doi))
affiliations = scrape_affiliation(article.doi)
if affiliations == None:
print("Skipping affiliation for unhandled DOI site")
affiliation_cache[a.name] = "Missing"
a.affiliation = "Missing"
else:
# There are multiple authors per paper typically, so update everyone who needs
# an affiliation
for idx, auth in enumerate(article.authors):
author = authors_array[auth]
affil = "None"
if idx < len(affiliations):
affil = affiliations[idx]
if not author.name in affiliation_cache:
affiliation_cache[author.name] = affil
# Set this author's affiliation now that we've updated the cache
a.affiliation = affiliation_cache[a.name]
# Sleep a bit to not overload the server we're hitting
time.sleep(0.005)
with open(out_dir + "/authors.json", "w") as fp:
json.dump(authors_array, fp, cls=DBLPEncoder)
# Also dump our updated affiliation cache
with open(out_dir + "/affiliation_cache.json", "w") as fp:
json.dump(affiliation_cache, fp)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.util import compat
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" % (self,
other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```
Dimension(m) * Dimension(n) == Dimension(m * n)
Dimension(m) * Dimension(None) == Dimension(None)
Dimension(None) * Dimension(n) == Dimension(None)
Dimension(None) * Dimension(None) == Dimension(None)
```
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
Dimension(m) // Dimension(n) == Dimension(m // n)
Dimension(m) // Dimension(None) == Dimension(None)
Dimension(None) // Dimension(n) == Dimension(None)
Dimension(None) // Dimension(None) == Dimension(None)
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
Dimension(m) < Dimension(n) == m < n
Dimension(m) < Dimension(None) == None
Dimension(None) < Dimension(n) == None
Dimension(None) < Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) <= Dimension(n) == m <= n
Dimension(m) <= Dimension(None) == None
Dimension(None) <= Dimension(n) == None
Dimension(None) <= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
Dimension(m) > Dimension(n) == m > n
Dimension(m) > Dimension(None) == None
Dimension(None) > Dimension(n) == None
Dimension(None) > Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) >= Dimension(n) == m >= n
Dimension(m) >= Dimension(None) == None
Dimension(None) >= Dimension(n) == None
Dimension(None) >= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimenson input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`}
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using @{tf.Tensor.set_shape}.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
return len(self._dims)
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop - start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError("Shapes %s and %s must have the same rank" % (self,
other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and all(dim.value is not None
for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1
if d.value is None else d.value)
for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
| |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Widgets for advanced display of files."""
import json
from django import http
from grr.gui import renderers
from grr.lib import utils
class HexView(renderers.TemplateRenderer):
"""Display a HexView of a file.
Internal State:
- aff4_path: The name of the aff4 object we are viewing now.
- age: The version of the AFF4 object to display.
"""
table_width = 32
total_size = 0
# The state of this widget.
state = {}
# This is the template used by the js to build the hex viewer html.
table_jquery_template = """
<script id="HexTableTemplate" type="text/x-jquery-tmpl">
<table class="monospace">
<tbody>
<tr id="hex_header" class="ui-state-default">
<th id="offset">offset</th>
<th id="data_column"></th>
</tr>
<tr>
<td id="offset_area">
<table>
</table>
</td>
<td id="hex_area">
<table>
</table>
</td>
<td id="data_area" class="data_area">
<table>
</table>
</td>
<td class='slider_area'><div id=slider></div></td>
</tr>
</tbody>
</table>
</script>
"""
layout_template = renderers.Template("""
<div id="{{unique|escape}}" style="position: absolute; top: 45px;
right: 0; bottom: 0; left: 0"></div> """ + table_jquery_template + """
<script>
$("#{{unique|escapejs}}").resize(function() {
grr.hexview.HexViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
{{this.table_width|escapejs}}, {{this.state_json|safe}});
});
$("#{{unique|escapejs}}").resize();
</script>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(HexView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the hex viewer in JSON."""
try:
row_count = int(request.REQ.get("hex_row_count", 10))
except ValueError:
row_count = 2
try:
offset = int(request.REQ.get("offset", 0))
except ValueError:
offset = 0
encoder = json.JSONEncoder()
data = [ord(x) for x in self.ReadBuffer(
request, offset, row_count * self.table_width)]
response = dict(offset=offset, values=data)
response["total_size"] = self.total_size
return http.HttpResponse(encoder.encode(response),
content_type="text/json")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return [x % 255 for x in xrange(offset, offset + length)]
class TextView(renderers.TemplateRenderer):
"""Display a TextView of a file."""
# The state of this widget.
state = {}
total_size = 0
default_codec = "utf_8"
allowed_codecs = ["base64_codec", "big5", "big5hkscs", "cp037", "cp1006",
"cp1026", "cp1140", "cp1250", "cp1251", "cp1252",
"cp1253", "cp1254", "cp1255", "cp1256", "cp1257",
"cp1258", "cp424", "cp437", "cp500", "cp737",
"cp775", "cp850", "cp852", "cp855", "cp856", "cp857",
"cp860", "cp861", "cp862", "cp863", "cp864", "cp865",
"cp866", "cp869", "cp874", "cp875", "cp932", "cp949",
"cp950" "idna", "rot_13", "utf_16", "utf_16_be",
"utf_16_le", "utf_32", "utf_32_be", "utf_32_le",
"utf_7", "utf_8", "utf_8_sig", "uu_codec", "zlib_codec"]
layout_template = renderers.Template("""
<div id="{{unique|escape}}">
<div id="text_viewer">
offset <input id="text_viewer_offset" name="offset" type=text value=0 size=6>
size <input id="text_viewer_data_size" name="text_data_size"
type=text value=0 size=6>
encoding <select id="text_encoding" name="text_encoding">
{% for encoder in this.allowed_codecs %}
<option value={{encoder|escape}}>{{encoder|escape}}</option>
{% endfor %}
</select>
<div id="text_viewer_slider"></div>
<div id="text_viewer_data" total_size=0>
<div id="text_viewer_data_content" total_size=0></div>
</div>
<script>
grr.textview.TextViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
"{{this.default_codec|escapejs}}",
{{this.state_json|safe}});
</script>
</div>
</div>
""")
action_template = renderers.Template("""
<div id="text_viewer_data_content" total_size="{{this.total_size|escape}}">
{% if this.error %}
<div class="errormsg">{{this.error|escape}}</div>
{% else %}
<pre class="monospace">
{{this.data|escape}}
</pre>
{% endif %}
</div>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(TextView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the text viewer."""
try:
self.data_size = int(request.REQ.get("data_size", 10000))
self.offset = int(request.REQ.get("offset", 0))
except ValueError:
self.error = "Invalid data_size or offset given."
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
text_encoding = request.REQ.get("text_encoding", self.default_codec)
try:
buf = self.ReadBuffer(request, self.offset, self.data_size)
self.data = self._Decode(text_encoding, buf)
except RuntimeError as e:
self.error = "Failed to decode: %s" % utils.SmartStr(e)
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
def _Decode(self, codec_name, data):
"""Decode data with the given codec name."""
if codec_name not in self.allowed_codecs:
raise RuntimeError("Invalid encoding requested.")
try:
return data.decode(codec_name, "replace")
except LookupError:
raise RuntimeError("Codec could not be found.")
except AssertionError:
raise RuntimeError("Codec failed to decode")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return "".join(x % 255 for x in xrange(offset, offset + length))
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from pants.base.build_environment import (get_buildroot, get_default_pants_config_file,
get_pants_cachedir, get_pants_configdir, pants_version)
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
class GlobalOptionsRegistrar(Optionable):
options_scope = GLOBAL_SCOPE
options_scope_category = ScopeInfo.GLOBAL
@classmethod
def register_bootstrap_options(cls, register):
"""Register bootstrap options.
"Bootstrap options" are a small set of options whose values are useful when registering other
options. Therefore we must bootstrap them early, before other options are registered, let
alone parsed.
Bootstrap option values can be interpolated into the config file, and can be referenced
programatically in registration code, e.g., as register.bootstrap.pants_workdir.
Note that regular code can also access these options as normal global-scope options. Their
status as "bootstrap options" is only pertinent during option registration.
"""
buildroot = get_buildroot()
# Although logging supports the WARN level, its not documented and could conceivably be yanked.
# Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly
# setup a 'WARN' logging level name that maps to 'WARNING'.
logging.addLevelName(logging.WARNING, 'WARN')
register('-l', '--level', choices=['debug', 'info', 'warn'], default='info', recursive=True,
help='Set the logging level.')
register('-q', '--quiet', type=bool, recursive=True,
help='Squelches most console output.')
# Not really needed in bootstrap options, but putting it here means it displays right
# after -l and -q in help output, which is conveniently contextual.
register('--colors', type=bool, default=True, recursive=True,
help='Set whether log messages are displayed in color.')
# Pants code uses this only to verify that we are of the requested version. However
# setup scripts, runner scripts, IDE plugins, etc., may grep this out of pants.ini
# and use it to select the right version.
# Note that to print the version of the pants instance you're running, use -v, -V or --version.
register('--pants-version', advanced=True, default=pants_version(),
help='Use this pants version.')
register('--plugins', advanced=True, type=list, help='Load these plugins.')
register('--plugin-cache-dir', advanced=True,
default=os.path.join(get_pants_cachedir(), 'plugins'),
help='Cache resolved plugin requirements here.')
register('--backend-packages', advanced=True, type=list,
default=['pants.backend.graph_info',
'pants.backend.python',
'pants.backend.jvm',
'pants.backend.codegen.antlr.java',
'pants.backend.codegen.antlr.python',
'pants.backend.codegen.jaxb',
'pants.backend.codegen.protobuf.java',
'pants.backend.codegen.ragel.java',
'pants.backend.codegen.thrift.java',
'pants.backend.codegen.thrift.python',
'pants.backend.codegen.wire.java',
'pants.backend.project_info'],
help='Load backends from these packages that are already on the path. '
'Add contrib and custom backends to this list.')
register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(),
help='Use this dir for global cache.')
register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(),
help='Use this dir for global config files.')
register('--pants-workdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, '.pants.d'),
help='Write intermediate output files to this dir.')
register('--pants-supportdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, 'build-support'),
help='Use support files from this dir.')
register('--pants-distdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, 'dist'),
help='Write end-product artifacts to this dir.')
register('--pants-subprocessdir', advanced=True, default=os.path.join(buildroot, '.pids'),
help='The directory to use for tracking subprocess metadata, if any. This should '
'live outside of the dir used by `--pants-workdir` to allow for tracking '
'subprocesses that outlive the workdir data (e.g. `./pants server`).')
register('--pants-config-files', advanced=True, type=list,
default=[get_default_pants_config_file()], help='Paths to Pants config files.')
# TODO: Deprecate --config-override in favor of --pants-config-files.
# But only once we're able to both append and override list-valued options, as there are
# use-cases for both here.
# TODO: Deprecate the --pantsrc/--pantsrc-files options? This would require being able
# to set extra config file locations in an initial bootstrap config file.
register('--config-override', advanced=True, type=list, metavar='<path>',
help='A second config file, to override pants.ini.')
register('--pantsrc', advanced=True, type=bool, default=True,
help='Use pantsrc files.')
register('--pantsrc-files', advanced=True, type=list, metavar='<path>',
default=['/etc/pantsrc', '~/.pants.rc'],
help='Override config with values from these files. '
'Later files override earlier ones.')
register('--pythonpath', advanced=True, type=list,
help='Add these directories to PYTHONPATH to search for plugins.')
register('--target-spec-file', type=list, dest='target_spec_files',
help='Read additional specs from this file, one per line')
register('--verify-config', type=bool, default=True,
help='Verify that all config file values correspond to known options.')
# These logging options are registered in the bootstrap phase so that plugins can log during
# registration and not so that their values can be interpolated in configs.
register('-d', '--logdir', advanced=True, metavar='<dir>',
help='Write logs to files under this directory.')
# This facilitates bootstrap-time configuration of pantsd usage such that we can
# determine whether or not to use the Pailgun client to invoke a given pants run
# without resorting to heavier options parsing.
register('--enable-pantsd', advanced=True, type=bool, default=False,
help='Enables use of the pants daemon (and implicitly, the v2 engine). (Beta)')
# This facilitates use of the v2 engine for BuildGraph construction, sans daemon.
register('--enable-v2-engine', advanced=True, type=bool, default=False,
help='Enables use of the v2 engine. (Beta)')
@classmethod
def register_options(cls, register):
"""Register options not tied to any particular task or subsystem."""
# The bootstrap options need to be registered on the post-bootstrap Options instance, so it
# won't choke on them on the command line, and also so we can access their values as regular
# global-scope options, for convenience.
cls.register_bootstrap_options(register)
register('-x', '--time', type=bool,
help='Output a timing report at the end of the run.')
register('-e', '--explain', type=bool,
help='Explain the execution of goals.')
register('--tag', type=list, metavar='[+-]tag1,tag2,...',
help="Include only targets with these tags (optional '+' prefix) or without these "
"tags ('-' prefix). Useful with ::, to find subsets of targets "
"(e.g., integration tests.)")
register('-t', '--timeout', advanced=True, type=int, metavar='<seconds>',
help='Number of seconds to wait for http connections.')
# TODO: After moving to the new options system these abstraction leaks can go away.
register('-k', '--kill-nailguns', advanced=True, type=bool,
help='Kill nailguns before exiting')
register('-i', '--interpreter', advanced=True, default=[], type=list,
metavar='<requirement>',
removal_version='1.5.0.dev0',
removal_hint='Use --interpreter-constraints in scope python-setup instead.',
help="Constrain what Python interpreters to use. Uses Requirement format from "
"pkg_resources, e.g. 'CPython>=2.6,<3' or 'PyPy'. By default, no constraints "
"are used. Multiple constraints may be added. They will be ORed together.")
register('--exclude-target-regexp', advanced=True, type=list, default=[],
metavar='<regexp>',
help='Exclude targets that match these regexes.',
recursive=True) # TODO: Does this need to be recursive? What does that even mean?
# Relative pants_distdir to buildroot. Requires --pants-distdir to be bootstrapped above first.
# e.g. '/dist/'
rel_distdir = '/{}/'.format(os.path.relpath(register.bootstrap.pants_distdir, get_buildroot()))
register('--ignore-patterns', advanced=True, type=list, fromfile=True,
default=['.*', rel_distdir, 'bower_components', 'node_modules', '*.egg-info'],
removal_version='1.3.0.dev0', removal_hint='Use --build-ignore instead.',
mutually_exclusive_group='build_ignore', help='See help for --build-ignore.')
register('--build-ignore', advanced=True, type=list, fromfile=True,
default=['.*', rel_distdir, 'bower_components', 'node_modules', '*.egg-info'],
help='Paths to ignore when identifying BUILD files. '
'This does not affect any other filesystem operations. '
'Patterns use the gitignore pattern syntax (https://git-scm.com/docs/gitignore).')
register('--pants-ignore', advanced=True, type=list, fromfile=True, default=['.*', rel_distdir],
help='Paths to ignore for all filesystem operations performed by pants '
'(e.g. BUILD file scanning, glob matching, etc). '
'Patterns use the gitignore syntax (https://git-scm.com/docs/gitignore). '
'This currently only affects the v2 engine. '
'To experiment with v2 engine, try --enable-v2-engine option.')
register('--fail-fast', advanced=True, type=bool, recursive=True,
help='Exit as quickly as possible on error, rather than attempting to continue '
'to process the non-erroneous subset of the input.')
register('--cache-key-gen-version', advanced=True, default='200', recursive=True,
help='The cache key generation. Bump this to invalidate every artifact for a scope.')
register('--workdir-max-build-entries', advanced=True, type=int, default=None,
help='Maximum number of previous builds to keep per task target pair in workdir. '
'If set, minimum 2 will always be kept to support incremental compilation.')
register('--max-subprocess-args', advanced=True, type=int, default=100, recursive=True,
help='Used to limit the number of arguments passed to some subprocesses by breaking '
'the command up into multiple invocations.')
register('--print-exception-stacktrace', advanced=True, type=bool,
help='Print to console the full exception stack trace if encountered.')
register('--build-file-rev', advanced=True,
removal_hint='Lightly used feature, scheduled for removal.', removal_version='1.5.0.dev0',
help='Read BUILD files from this scm rev instead of from the working tree. This is '
'useful for implementing pants-aware sparse checkouts.')
register('--lock', advanced=True, type=bool, default=True,
help='Use a global lock to exclude other versions of pants from running during '
'critical operations.')
register('--subproject-roots', type=list, advanced=True, fromfile=True, default=[],
help='Paths that correspond with build roots for any subproject that this '
'project depends on.')
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def record_gradient_callback(inputs, attrs, results):
return backprop._record_gradient("MatMul", inputs, attrs, results, None)
def c_tfe_py_fastpath_execute(a, b, transpose_a=False, transpose_b=False):
ctx = context.context()
assert not ctx.in_graph_mode(
), "The prototype doesn't contain C code for graph construction"
ctx_handle = ctx._handle # pylint: disable=protected-access
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx_handle, None, "MatMul", record_gradient_callback, a, b,
"transpose_a", transpose_a, "transpose_b", transpose_b)[0]
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 1000
def _run(self, func, num_iters):
# call func to maybe warm up the GPU
func()
start = time.time()
for _ in xrange(num_iters):
func()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(iters=num_iters, wall_time=mean_us,
extras={"examples_per_sec": num_iters/(end-start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(
ctx_handle, None, "Identity", inputs, attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
m = self._m_2
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
m = self._m_2
self._run(
lambda: backprop.gradients_function(lambda x: x, [0])(m),
30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self, m, transpose_b, num_iters):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b)
self._run(func, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
if __name__ == "__main__":
test.main()
| |
# 1. Something that actually "writes" an integer to memory and can "read" an integer from a memory address
# 2. Value - something allowing us to get several integers from memory and interpret them as a thing, or
# write a thing out as several integers into memory
# 3. A specific type of value: "pointer". Interpretation: location in underlying address space of
# whatever type this is pointing to.
# 2+3. A function to get a pointer value from any value.
# 4. "Reference". System for managing "references", which at least must give us a value for a reference
# when we ask for it. Name. Scope.
# 5. Scope. ?? Functions. Calling things.
# Reference: "name" "scope" "value"
# asdf = [1, 2, 3]
# My memory manager makes some space for a list with 1, 2, 3 in it. Value X
# Now create a reference with name "asdf", the current scope, and value X.
# qwerty = asdf
# Create a reference with name "qwerty", the current scope, and value...? X.
# qwerty[1] = 'hi' ====> qwerty is a reference with value X. Change X to have its second element be 'hi'.
# asdf[1] = ? it returns [1, 'hi', 3]
# asdf = ['nope'] ====> asdf's value is now Y
#
# def F(r, s):
# r = s + 1
# return r + s
#
# a = [1,1,1,1,,1,1,1,1]
# pa = a.value.getPointer()
# b = 4
# a = F(a, b)
#
# create val(addressX, [thousand element list])
# create ref("a", 0, X)
# create val(addressY, 4)
# create ref("b", 0, Y)
# Call F! *enter scope* [optional: make values X' and Y', copies of X and Y]
# create ref("r", 1, X)
# --- create ref("r", 1, lookup("a",0))
# --- r = 1
# create ref("s", 1, Y)
# create val(addressZ, the value in s, which is at Y and is 4, plus 1, or 5)
# update ref("r", 1, Z)
# create val(addressA, the value in r (5) plus the value in s (4), or 9)
# return
# *leave scope*
# update ref("a", 0, A)
class Memory(object):
def getAddressOfUnusedMemoryOfSizeN(self, N):
pass
def write(self, address, listOfIntegers):
pass
def read(self, address, N):
pass
def display(self):
print(self.mem)
class Value(object):
"""Represents a fixed size structure written somehow to memory
A value has a size, which is the size of the list of integers returned by getData.
getSize returns that size.
getData queries the underlying memory and builds a list of integers to return.
getPointer returns a Value of size 1 whose getData returns a list of size 1 whose
element is the address of the memory backing the Value that this pointer Value points to.
e.g. if we have a Value X storing a list [3,4] at underlying address 22-23, X.getPointer()
returns a Value Y storing a list [22] at underlying address ???whoknows it's not up to us.
"""
def getSize(self):
pass
def getData(self):
pass
def createPointerValue(self):
pass
def free(self):
pass
def display(self, name):
print(name + " has index "+str(self.address)+" size "+ str(self.size) + " in:")
self.mem.display()
def clone(self):
pass
class Reference(object):
pass
#???
class ReferenceManager(object):
# enter scope #???
# leave scope
# assign value to reference
# get value of reference
def __init__(self, mem):
self.mem = mem
# refs is a stack. Each element is a scope which can have several name:value pairs.
# the current scope is peek. Leaving scope is pop (plus cleanup). Entering scope is
# push (plus initialization)
self.refs = [{}]
def setReferenceValue(self, name, value):
self.refs[-1][name] = value
def getReferenceValue(self, name):
return self.refs[-1][name]
def enterScopeByValue(self, previousScopeNamesOfParameterValues, newScopeParameterNames):
newScope = {}
for parameterName, previousParameterName in zip(newScopeParameterNames, previousScopeNamesOfParameterValues):
referenceValue = self.getReferenceValue(previousParameterName).clone()
newScope[parameterName] = referenceValue
self.refs.append(newScope)
def enterScope(self, previousScopeNamesOfParameterValues, newScopeParameterNames):
newScope = {}
for parameterName, previousParameterName in zip(newScopeParameterNames, previousScopeNamesOfParameterValues):
referenceValue = self.getReferenceValue(previousParameterName)
newScope[parameterName] = referenceValue
self.refs.append(newScope)
def leaveScope(self):
cleanup = self.refs.pop()
# clean this shit up
# a = 1
# aPlus1 = a + 1
# referenceManager.enterScope(["b", "c"], ["a", "aPlus1"])
#
class PythonFixedSizeListOfIntegersMemory(Memory):
def __init__(self, size):
self.mem = [0]*size
self.used = [0]*size
self.size = size
self.EOM = "EOM Addressing past end of memory"
self.DEFRAG = "DEFRAG Memory too fragrmented; Not enough executive blocks"
def getAddressOfUnusedMemoryOfSizeN(self, N):
self.rangeTooLarge(0,N)
unusedSize = 0
address = 0
while unusedSize < N:
if address == self.size:
raise Exception(self.EOM)
if self.used[address] == 0:
unusedSize += 1
else:
unusedSize = 0
address += 1
if unusedSize == N:
return address - N
else:
raise Exception(self.DEFRAG)
def rangeTooLarge(self,address,N):
exclusiveEndAddress = address + N
if exclusiveEndAddress > self.size:
raise Exception(self.EOM)
def markMemoryUsed(self, address, N):
for i in range(address, address + N):
self.used[i] = 1
def markMemoryUnused(self, address, N):
for i in range(address, address + N):
self.used[i] = 0
def write(self, address, listOfIntegers):
length = len(listOfIntegers)
self.rangeTooLarge(address,length)
for i in range(length):
self.mem[address + i] = listOfIntegers[i]
self.markMemoryUsed(address,len(listOfIntegers))
def read(self, address, N):
storedData = [0]*N
for i in range(N):
storedData[i] = self.mem[address + i]
return storedData
def free(self, address, N):
self.markMemoryUnused(address, N)
class ArbitrarySizeValue(Value):
def __init__(self,mem,data):
self.mem = mem
self.size = len(data)
self.address = self.mem.getAddressOfUnusedMemoryOfSizeN(self.size)
self.mem.write(self.address,data)
def getSize(self):
return self.size
def getData(self):
return self.mem.read(self.address,self.size)
def createPointerValue(self):
pointer = PointerValue(self.mem,[self.address])
return pointer
def free(self):
self.mem.free(self.address,self.size)
def clone(self):
newValue = ArbitrarySizeValue(self.mem,self.getData())
return newValue
# PointerValue should point to a size 1 mem whose data is the index of the thing it's pointing to
class PointerValue(ArbitrarySizeValue):
def __init__(self, mem, data):
super(PointerValue, self).__init__(mem, data)
m = PythonFixedSizeListOfIntegersMemory(10)
#m.write(3, [1,2,3,4])
#print(m.mem[4]) #returns 2
#r = m.read(3, 4)
#print(r) #returns [1,2,3,4]
##m.write(99,[1,2]) #raises Exception
m.getAddressOfUnusedMemoryOfSizeN(1)
# somehow we have a Value that's a ListOfSizeTwo called v
v = ArbitrarySizeValue(m, [6, 8])
vData = v.getData()
#print(v.mem.mem)
#print(v.mem.used)
vPointer = v.createPointerValue()
vPointerData = vPointer.getData()
vDataAddress = vPointerData[0]
otherVData = m.read(vDataAddress, 2)
#print(otherVData)
# vData and otherVData are equal
#print()
m = PythonFixedSizeListOfIntegersMemory(10)
d = ArbitrarySizeValue(m, [-1])
v = ArbitrarySizeValue(m, [111, 21, 441])
#v.display("v")
vp = v.createPointerValue()
#vp.display("&v")
vpp = vp.createPointerValue()
#vpp.display("&&v")
w = ArbitrarySizeValue(m, [999])
#w.display("w")
wp = w.createPointerValue()
#wp.display("&w")
vp2 = v.createPointerValue()
#vp2.display("&v (2)")
vpp2 = vp2.createPointerValue()
#vpp2.display("&&v (2)")
w.free()
vp.free()
#m.display()
vppp = vpp.createPointerValue()
#vppp.display("&&&v")
vppp = vpp.createPointerValue()
#print("\r[-1, 111, 21, 441, 5, 4, 5, 6, 1, 8]")
#vppp.display("vppp")
# asdf = [42, 42]
# qwerty = asdf <---
# qwerty[0] = 1 <---
# print(asdf)
m = PythonFixedSizeListOfIntegersMemory(30)
manager = ReferenceManager(m)
# asdf = 42+42
manager.setReferenceValue("asdf", ArbitrarySizeValue(m, [42, 42]))
# qwerty = <the same reference as> asdf
manager.setReferenceValue("qwerty", manager.getReferenceValue("asdf"))
# qwerty highest order byte becomes 1
# 9 = 1001
# 65537 = 00000001 00000001 00000000 00000001 ----- 0x01010001 ... 0-9a-f or 16 possibilities 2^4
# boolean is 1 bit
# byte is 8 bits
# char 8 bits
# short 8 bits
# int 16 or 32
v = manager.getReferenceValue("qwerty")
m.write(v.createPointerValue().getData()[0], [1])
# print asdf
print(manager.getReferenceValue("asdf").getData())
# pAsdf = &asdf
manager.setReferenceValue("pAsdf", manager.getReferenceValue("asdf").createPointerValue())
# call F(pAsdf) where F is def F(pB): ...
manager.enterScopeByValue(["pAsdf"], ["pB"])
# *pB = 99
m.write(manager.getReferenceValue("pB").getData()[0],[99])
# print *pB
print(m.read(manager.getReferenceValue("pB").getData()[0],2))
# return
manager.leaveScope()
# print qwerty
print(manager.getReferenceValue("qwerty").getData())
m.display()
# F(a+1,b)
# def F(lol, hi)
# enter scope needs input values and "output" references
| |
"""
FROWNS LICENSE
Copyright (c) 2001-2003, Brian Kelley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Brian Kelley nor the names of frowns
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Default element properties:
The information in an element is:
number == atomic number
symbol == the short symbol (eg, He for helium)
name == the full (American) English name
mass == the atomic mass, in amu, natural propensities
negativity == Pauling negativity value
valences == list of possible valence occupancies
This was pretty much lifted from element.py
Thanks Andrew!
"""
# XXX FIX ME
# Let's make dictionaries instead. That will
# be cleaner
class AtomType:
"""AtomType
This class essentiall reads a bunch of named arguments in the form
a=2, b=3, c=[]
and makes a class O with attributes O.a=2, O.b=3 O.c=[]
Python is cool."""
def __init__(self, symbol, **args):
for kw, val in args.items():
self.__dict__[kw] = val
self.symbol = symbol
def __str__(self):
return "%s"%self.symbol
defaultAtomTypes = {}
# these represent valences in various rows of the periodic table.
valence_unknown = None
valence_1 =(1,)
valence_B =(3,)
valence_C =(4,)
# Nitrogen should not be pentavalent
valence_N =(3, 5)
valence_O =(2,)
valence_P =(3, 5)
valence_S =(2, 4, 6)
num = 0
# the big list of properties
for element in (
("*" , 0, "unknown", 0.0),
("H" , 1.00794, "hydrogen", 2.20), # 1
("He", 4.003, "helium", 0.0),
("Li", 6.941, "lithium", 0.98),
("Be", 9.0122, "beryllium", 1.57),
("B" , 10.81, "boron", 2.04),
("C" , 12.011, "carbon", 2.55),
("N" , 14.007, "nitrogen", 3.04),
("O" , 15.999, "oxygen", 3.44),
("F" , 18.998, "fluorine", 3.98),
("Ne", 20.179, "neon", 0.0),
("Na", 22.990, "sodium", 0.93), # 11
("Mg", 24.305, "magnesium", 1.31),
("Al", 26.98, "aluminum", 1.61),
("Si", 28.086, "silicon", 1.90),
("P" , 30.974, "phosphorus", 2.19),
("S" , 32.066, "sulfer", 2.58),
("Cl", 35.453, "chlorine", 3.16),
("Ar", 39.948, "argon", 0.0),
("K" , 39.098, "potassium", 0.82),
("Ca", 40.08, "calcium", 1.00),
("Sc", 44.956, "scandium", 1.36), # 21
("Ti", 47.88, "titanium", 1.54),
("V" , 50.94, "vanadium", 1.63),
("Cr", 51.996, "chromium", 1.66),
("Mn", 54.938, "manganese", 1.55),
("Fe", 55.847, "iron", 1.83),
("Co", 58.9332, "cobalt", 1.88),
("Ni", 58.69, "nickel", 1.91),
("Cu", 63.546, "copper", 1.90),
("Zn", 65.39, "zinc", 1.65),
("Ga", 69.72, "gallium", 1.81), # 31
("Ge", 72.59, "germanium", 2.01),
("As", 74.922, "arsenic", 2.18),
("Se", 78.96, "selenium", 2.55),
("Br", 79.904, "bromine", 2.96),
("Kr", 83.80, "krypton", 0.0),
("Rb", 85.468, "rubidium", 0.82),
("Sr", 87.62, "strontium", 0.95),
("Y" , 88.9059, "yttrium", 1.22),
("Zr", 91.224, "zirconium", 1.33),
("Nb", 92.91, "niobium", 1.6), # 41
("Mo", 95.94, "molybdenum", 2.16),
("Tc", 98., "technetium", 1.9),
("Ru", 101.07, "ruthenium", 2.2),
("Rh", 102.906, "rhodium", 2.28),
("Pd", 106.42, "palladium", 2.20),
("Ag", 107.868, "silver", 1.93),
("Cd", 112.41, "cadmium", 1.69),
("In", 114.82, "indium", 1.78),
("Sn", 118.71, "tin", 1.96),
("Sb", 121.76, "antimony", 2.05), # 51
("Te", 127.60, "tellurium", 2.1),
("I" , 126.9045, "iodine", 2.66),
("Xe", 131.29, "xenon", 0.0),
("Cs", 132.91, "cesium", 0.79),
("Ba", 137.33, "barium", 0.89),
("La", 138.906, "lanthanum", 1.10),
("Ce", 140.12, "cerium", 1.12),
("Pr", 140.908, "praseodymium", 1.13),
("Nd", 144.24, "neodymium", 1.14),
("Pm", 145., "promethium", 0.0), # 61
("Sm", 150.36, "samarium", 1.17),
("Eu", 151.96, "europium", 0.0),
("Gd", 157.25, "gadolinium", 1.20),
("Tb", 158.925, "terbium", 0.0),
("Dy", 162.50, "dysprosium", 1.22),
("Ho", 164.93, "holmium", 1.23),
("Er", 167.26, "erbium", 1.24),
("Tm", 168.934, "thulium", 1.25),
("Yb", 173.04, "ytterbium", 0.0),
("Lu", 174.967, "lutetium", 1.27), # 71
("Hf", 178.49, "hafnium", 1.3),
("Ta", 180.95, "tantalum", 1.5),
("W" , 183.84, "tungsten", 2.36),
("Re", 186.207, "rhenium", 1.9),
("Os", 190.2, "osmium", 2.2),
("Ir", 192.22, "iridium", 2.20),
("Pt", 195.08, "platinum", 2.28),
("Au", 196.967, "gold", 2.54),
("Hg", 200.59, "mercury", 2.00),
("Tl", 204.383, "thallium", 1.62), # 81
("Pb", 207.2, "lead", 1.8),
("Bi", 208.98, "bismuth", 2.02),
("Po", 209., "polonium", 2.0),
("At", 210., "astatine", 2.2),
("Rn", 222., "radon", 0.0),
("Fr", 223., "francium", 0.7),
("Ra", 226.025, "radium", 0.9),
("Ac", 227.028, "actinium", 1.1),
("Th", 232.038, "thorium", 1.3),
("Pa", 231.036, "protactinium", 1.5), # 91
("U" , 238.029, "uranium", 1.38),
("Np", 237.048, "neptunium", 1.36),
("Pu", 244., "plutonium", 1.28),
("Am", 243., "americium", 1.3),
("Cm", 247., "curium", 1.3),
("Bk", 247., "berkelium", 1.3),
("Cf", 251., "califorium", 1.3),
("Es", 252., "einsteinium", 1.3),
("Fm", 257., "fermium", 1.3),
("Md", 258., "mendelevium", 1.3), # 101
("No", 259., "nobelium", 1.3),
("Lr", 260., "lawrencium", 0.0),
("Rf", 261., "rutherfordium", 0.0),
("Ha", 262., "hahnium", 0.0), # also called "dubnium"
("Sg", 263., "seagorbium", 0.0), # once 'unnilhexium'
("Ns", 269., "bohrium", 0.0), # or "nielsbohrium"
("Hs", 268., "hassium", 0.0), # so what names do you want?
("Mt", 266., "meitnerium", 0.0),
("Uun", 269., "ununnilium", 0.0),
("Uuu", 272., "unununium", 0.0), # 111
("Uub", 277., "ununbium", 0.0),
# ("Uut", 0.0, "ununtrium", 0.0), # enter when they are
# ("Uuq", 0.0, "ununquadium", 0.0), # discovered
# ("Uup", 0.0, "", 0.0),
# ("Uuh", 0.0, "", 0.0),
# ("Uus", 0.0, "", 0.0),
# ("Uuo", 0.0, "", 0.0),
("R", 0, "R Group", 0.0),
):
if num in (1, 9, 17, 35, 53):
valences = valence_1
elif num == 5:
valences = valence_B
elif num == 6:
valences = valence_C
elif num == 7:
valences = valence_N
elif num == 8:
valences = valence_O
elif num == 15:
valences = valence_P
elif num == 16:
valences = valence_S
else:
valences = valence_unknown
# ok, we have the info, now create the class and
# add it to the table
ele = (element[0],
num, # number
element[2], # fullname
element[1], # mass
element[3], # negativity
valences, # valences
num, # equiv_class
)
defaultAtomTypes[element[0]] = ele
num = num + 1
del valence_unknown
del valence_1
del valence_B
del valence_C
del valence_N
del valence_O
del valence_P
del valence_S
del num
del ele
del valences
| |
# -*-coding:Utf-8 -*
# ====================================================================
# Packages
# ====================================================================
import configparser as cp
import copy
import glob
import muLAn
import muLAn.packages.general_tools as gtools
import numpy as np
import os
import pandas as pd
import sys
# ----------------------------------------------------------------------
# CLASS
# ----------------------------------------------------------------------
class printoption:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
reset = '\033[0m'
bright = '\033[1m'
dim = '\033[2m'
underscore = '\033[4m'
blink = '\033[5m'
reverse = '\033[7m'
hidden = '\033[8m'
level0 = "\033[1m\033[31m"
level1 = "\033[1m"
good = "\033[32m"
# ====================================================================
# Class
# ====================================================================
class McmcFiles:
"""A class to read MCMC output files and sort results.
:param str path: Path to the MCMC files.
"""
def __init__(self, path=None):
self._path = path
if path==None:
self._getconfig()
# --------------------------------------------------------------------
def _getconfig(self):
"""Load the configuration files *.ini."""
# Path of the event
path_event = muLAn.mulan.getpath_event()
# Configuration files
fname_setup = "{:s}setup.ini".format(path_event)
fname_advanced = "{:s}advancedsetup.ini".format(path_event)
# Load configuration files
cfgsetup = cp.ConfigParser()
cfgsetup.read([fname_setup, fname_advanced])
cfgobs = cp.ConfigParser()
cfgobs.read(path_event + 'observatories.ini')
# Add the path to the configuration
cfgsetup.set('FullPaths', 'Event', path_event)
# Check the paths
if cfgsetup.get('FullPaths', 'Code').replace(" ", "") != "":
if cfgsetup.get('FullPaths', 'Code')[-1] != '/':
cfgsetup.set('FullPaths', 'Code', cfgsetup.get('FullPaths', 'Code') + '/')
if cfgsetup.get('FullPaths', 'Event')[-1] != '/':
cfgsetup.set('FullPaths', 'Event', cfgsetup.get('FullPaths', 'Event') + '/')
if cfgsetup.get('RelativePaths', 'Data')[-1] != '/':
cfgsetup.set('RelativePaths', 'Data', cfgsetup.get('RelativePaths', 'Data') + '/')
if cfgsetup.get('RelativePaths', 'Plots')[-1] != '/':
cfgsetup.set('RelativePaths', 'Plots', cfgsetup.get('RelativePaths', 'Plots') + '/')
if cfgsetup.get('RelativePaths', 'Chains')[-1] != '/':
cfgsetup.set('RelativePaths', 'Chains', cfgsetup.get('RelativePaths', 'Chains') + '/')
if cfgsetup.get('RelativePaths', 'Outputs')[-1] != '/':
cfgsetup.set('RelativePaths', 'Outputs', cfgsetup.get('RelativePaths', 'Outputs') + '/')
if cfgsetup.get('RelativePaths', 'Archives')[-1] != '/':
cfgsetup.set('RelativePaths', 'Archives', cfgsetup.get('RelativePaths', 'Archives') + '/')
if cfgsetup.get('RelativePaths', 'ModelsHistory')[-1] != '/':
cfgsetup.set('RelativePaths', 'ModelsHistory', cfgsetup.get('RelativePaths', 'ModelsHistory') + '/')
self._cfgsetup = cfgsetup
# --------------------------------------------------------------------
def sort(self, cfgsetup=None):
"""Load the MCMC output files and sort the models.
:param ConfigParser cfgsetup: Content of setup.ini and advancedsetup.ini files.
"""
if cfgsetup==None:
cfgsetup = self._cfgsetup
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
fnames_chains = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*-c*.txt")
fnames_chains_exclude = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*g*.txt")
temp =[]
for a in fnames_chains:
if (a in fnames_chains_exclude)==False:
temp.append(a)
fnames_chains = copy.deepcopy(temp)
del temp, fnames_chains_exclude
nb_chains = len(fnames_chains)
if nb_chains!=0:
samples_file = dict(
{'chi2': [], 't0': [], 'u0': [], 'tE': [], 'rho': [], \
'gamma': [], 'piEE': [], 'piEN': [], 's': [], 'q': [], \
'alpha': [], 'dalpha': [], 'ds': [], 'chain': [], 'fullid': [],\
'date_save': [], 'time_save': [], 'id': [], 'accrate': [],\
'chi2/dof': []})
# Test if an history already exist
filename_history = cfgsetup.get('FullPaths', 'Event')\
+ cfgsetup.get('RelativePaths', 'ModelsHistory')\
+ cfgsetup.get('Controls', 'Archive')\
+ '-ModelsSummary.txt'
if os.path.exists(filename_history):
# if 0:
file = open(filename_history, 'r')
for line in file:
params_model = line
if params_model[0] == '#':
continue
try:
samples_file['fullid'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][0]))
samples_file['t0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][1]))
samples_file['u0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][2]))
samples_file['tE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][3]))
samples_file['rho'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][4]))
samples_file['gamma'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][5]))
samples_file['piEN'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][6]))
samples_file['piEE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][7]))
samples_file['s'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][8]))
samples_file['q'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][9]))
samples_file['alpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][10]))
samples_file['dalpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][11]))
samples_file['ds'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][12]))
samples_file['chi2'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][13]))
samples_file['chi2/dof'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][14]))
samples_file['accrate'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][15]))
samples_file['date_save'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][16]))
samples_file['time_save'].append(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][17])
samples_file['chain'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][18]))
samples_file['id'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][19]))
except:
text = "\n\033[1m\033[91mThe file\033[0m\n" + filename_history\
+ "\n\033[1m\033[91mis corrupted. muLAn killed.\033[0m"
sys.exit(text)
file.close()
# Read on the chains
if nb_chains > 0:
for i in range(nb_chains):
file = open(fnames_chains[i], 'r')
for line in file:
params_model = line
if params_model[0] == '#':
continue
try:
samples_file['id'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][0]))
samples_file['t0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][1]))
samples_file['u0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][2]))
samples_file['tE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][3]))
samples_file['rho'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][4]))
samples_file['gamma'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][5]))
samples_file['piEN'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][6]))
samples_file['piEE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][7]))
samples_file['s'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][8]))
samples_file['q'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][9]))
samples_file['alpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][10]))
samples_file['dalpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][11]))
samples_file['ds'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][12]))
samples_file['chi2'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][13]))
samples_file['accrate'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][14]))
samples_file['date_save'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][15]))
samples_file['time_save'].append(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][16])
samples_file['chi2/dof'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][17]))
samples_file['chain'].append(int(fnames_chains[i][-8:-4]))
samples_file['fullid'].append(-1)
except:
text = "\n\033[1m\033[91mThe file\033[0m\n" + "\033[1m\033[91m" + fnames_chains[i]\
+ "\033[0m\n\033[1m\033[91mis corrupted. muLAn killed.\033[0m"
sys.exit(text)
file.close()
# Order the models
chi2_min = np.min(samples_file['chi2'])
samples_file.update(
{'dchi2': samples_file['chi2'] - chi2_min})
# Remove duplicates
results = pd.DataFrame({})
for key in samples_file:
results[key] = samples_file[key]
results_sorted = results.sort_values(['dchi2', 'fullid'], ascending=[1, 0]).drop_duplicates(
subset=['t0', 'u0', 'tE', 'rho', 'gamma', 'piEN', 'piEE', 's', 'q', 'alpha', 'dalpha', 'ds', 'chi2'])
# Give a unique ID to models
id_start = np.max(results_sorted['fullid']) + 1
if id_start == 0 : id_start = 1
cond = results_sorted['fullid'] == -1
results_sorted.loc[cond, 'fullid'] = id_start + np.arange(cond.sum())
# Save new file in csv with exponential
filename_history = filename_history[:-3] + 'csv'
file = open(filename_history, 'w')
format = '#{:},'.format('UniqueID')\
+ '{:},'.format('t0')\
+ '{:},'.format('u0')\
+ '{:},'.format('tE')\
+ '{:},'.format('rho')\
+ '{:},'.format('gamma')\
+ '{:},'.format('piEN')\
+ '{:},'.format('piEE')\
+ '{:},'.format('s')\
+ '{:},'.format('q')\
+ '{:},'.format('alpha')\
+ '{:},'.format('dalpha')\
+ '{:},'.format('ds')\
+ '{:},'.format('chi2')\
+ '{:},'.format('chi2/dof')\
+ '{:},'.format('accrate')\
+ '{:}'.format('chain')\
+ '\n'
file.write(format)
for i in range(len(results_sorted)):
format = '{:},'.format(results_sorted['fullid'].values[i])\
+ '{:.10e},'.format(results_sorted['t0'].values[i])\
+ '{:.10e},'.format(results_sorted['u0'].values[i])\
+ '{:.10e},'.format(results_sorted['tE'].values[i])\
+ '{:.10e},'.format(results_sorted['rho'].values[i])\
+ '{:.10e},'.format(results_sorted['gamma'].values[i])\
+ '{:.10e},'.format(results_sorted['piEN'].values[i])\
+ '{:.10e},'.format(results_sorted['piEE'].values[i])\
+ '{:.10e},'.format(results_sorted['s'].values[i])\
+ '{:.10e},'.format(results_sorted['q'].values[i])\
+ '{:.10e},'.format(results_sorted['alpha'].values[i])\
+ '{:.10e},'.format(results_sorted['dalpha'].values[i])\
+ '{:.10e},'.format(results_sorted['ds'].values[i])\
+ '{:.10e},'.format(results_sorted['chi2'].values[i])\
+ '{:.10e},'.format(results_sorted['chi2/dof'].values[i])\
+ '{:.3f},'.format(results_sorted['accrate'].values[i])\
+ '{:}'.format(results_sorted['chain'].values[i])\
+ '\n'
file.write(format)
file.close()
self.mcmc_samples = results_sorted
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def communicate(cfg, verbose, text, opts=False, prefix=False, newline=False, tab=False):
if cfg.getint('Modelling', 'Verbose') >= verbose:
if prefix:
text = "[muLAn] " + text
if opts!=False:
text2=''
for a in opts:
text2 = text2 + a
text = text2 + text + printoption.reset
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
else:
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
# ----------------------------------------------------------------------
def unpack_options(cfgsetup, level0, level1):
options = [a.strip() for a in cfgsetup.get(level0, level1).split(',')]
del a, cfgsetup, level0, level1
return options
# ----------------------------------------------------------------------
if (__name__ == "__main__"):
pass
| |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common client utilities"""
import getpass
import logging
import os
import six
import time
from oslo.utils import importutils
from openstackclient.common import exceptions
def find_resource(manager, name_or_id, **kwargs):
"""Helper for the _find_* methods.
:param manager: A client manager class
:param name_or_id: The resource we are trying to find
:param kwargs: To be used in calling .find()
:rtype: The found resource
This method will attempt to find a resource in a variety of ways.
Primarily .get() methods will be called with `name_or_id` as an integer
value, and tried again as a string value.
If both fail, then a .find() is attempted, which is essentially calling
a .list() function with a 'name' query parameter that is set to
`name_or_id`.
Lastly, if any kwargs are passed in, they will be treated as additional
query parameters. This is particularly handy in the case of finding
resources in a domain.
"""
# Try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
pass
else:
raise
# Try directly using the passed value
try:
return manager.get(name_or_id)
except Exception:
pass
if len(kwargs) == 0:
kwargs = {}
# Prepare the kwargs for calling find
if 'NAME_ATTR' in manager.resource_class.__dict__:
# novaclient does this for oddball resources
kwargs[manager.resource_class.NAME_ATTR] = name_or_id
else:
kwargs['name'] = name_or_id
# finally try to find entity by name
try:
return manager.find(**kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
if type(ex).__name__ == 'NoUniqueMatch':
msg = "More than one %s exists with the name '%s'." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
else:
raise
def format_dict(data):
"""Return a formatted string of key value pairs
:param data: a dict
:param format: optional formatting hints
:rtype: a string formatted to key='value'
"""
output = ""
for s in sorted(data):
output = output + s + "='" + six.text_type(data[s]) + "', "
return output[:-2]
def format_list(data):
"""Return a formatted strings
:param data: a list of strings
:rtype: a string formatted to a,b,c
"""
return ', '.join(sorted(data))
def get_item_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Project, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(item, field_name, '')
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single dict resource
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = item[field_name] if field_name in item else ''
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def string_to_bool(arg):
return arg.strip().lower() in ('t', 'true', 'yes', '1')
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def get_client_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid %s client version '%s'. must be one of: %s" % (
(api_name, version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
def wait_for_status(status_f,
res_id,
status_field='status',
success_status=['active'],
sleep_time=5,
callback=None):
"""Wait for status change on a resource during a long-running operation
:param status_f: a status function that takes a single id argument
:param res_id: the resource id to watch
:param success_status: a list of status strings for successful completion
:param status_field: the status attribute in the returned resource object
:param sleep_time: wait this long (seconds)
:param callback: called per sleep cycle, useful to display progress
:rtype: True on success
"""
while True:
res = status_f(res_id)
status = getattr(res, status_field, '').lower()
if status in success_status:
retval = True
break
elif status == 'error':
retval = False
break
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
return retval
def get_effective_log_level():
"""Returns the lowest logging level considered by logging handlers
Retrieve an return the smallest log level set among the root
logger's handlers (in case of multiple handlers).
"""
root_log = logging.getLogger()
min_log_lvl = logging.CRITICAL
for handler in root_log.handlers:
min_log_lvl = min(min_log_lvl, handler.level)
return min_log_lvl
def get_password(stdin, prompt=None, confirm=True):
message = prompt or "User Password:"
if hasattr(stdin, 'isatty') and stdin.isatty():
try:
while True:
first_pass = getpass.getpass(message)
if not confirm:
return first_pass
second_pass = getpass.getpass("Repeat " + message)
if first_pass == second_pass:
return first_pass
print("The passwords entered were not the same")
except EOFError: # Ctl-D
raise exceptions.CommandError("Error reading password.")
raise exceptions.CommandError("There was a request to be prompted for a"
" password and a terminal was not detected.")
def read_blob_file_contents(blob_file):
try:
with open(blob_file) as file:
blob = file.read().strip()
return blob
except IOError:
msg = "Error occurred trying to read from file %s"
raise exceptions.CommandError(msg % blob_file)
| |
# This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
from builtins import object
from builtins import str
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext': 'ext_convert',
'cfg': 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and \
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != type:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in list(props.items()):
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
# The list needs to be sorted so that we can
# avoid disabling child loggers of explicitly
# named loggers. With a sorted list it is easier
# to find the child loggers.
existing.sort()
# We'll keep the list of existing loggers
# which are children of named loggers here...
child_loggers = []
# now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and \
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any logging.
# However, don't disable children of named loggers, as that's
# probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
# Name of parameter changed from fmt to format.
# Retry with old name.
# This is so that code can be used with older Python versions
# (e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != type:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
# Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and \
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and \
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and \
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
# The argument name changed from strm to stream
# Retry with old name.
# This is so that code can be used with older Python versions
# (e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
# Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| |
# Copyright 2015 Cloudera Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import ibis.util as util
import ibis.common as com
import ibis.expr.rules as rlz
import ibis.expr.datatypes as dt
import ibis.expr.signature as sig
import ibis.expr.operations as ops
import ibis.impala.compiler as comp
__all__ = ['add_operation', 'scalar_function', 'aggregate_function',
'wrap_udf', 'wrap_uda']
class Function(object):
def __init__(self, inputs, output, name):
self.inputs = tuple(map(dt.dtype, inputs))
self.output = dt.dtype(output)
self.name = name
self._klass = self._create_operation(name)
def _create_operation(self, name):
class_name = self._get_class_name(name)
input_type, output_type = self._type_signature()
return _create_operation_class(class_name, input_type, output_type)
def __repr__(self):
klass = type(self).__name__
return ('{0}({1}, {2!r}, {3!r})'
.format(klass, self.name, self.inputs, self.output))
def __call__(self, *args):
return self._klass(*args).to_expr()
def register(self, name, database):
"""
Registers the given operation within the Ibis SQL translation
toolchain. Can also use add_operation API
Parameters
----------
name: used in issuing statements to SQL engine
database: database the relevant operator is registered to
"""
add_operation(self._klass, name, database)
class ScalarFunction(Function):
def _get_class_name(self, name):
if name is None:
name = util.guid()
return 'UDF_{0}'.format(name)
def _type_signature(self):
input_type = _ibis_signature(self.inputs)
output_type = rlz.shape_like('args', dt.dtype(self.output))
return input_type, output_type
class AggregateFunction(Function):
def _create_operation(self, name):
klass = Function._create_operation(self, name)
klass._reduction = True
return klass
def _get_class_name(self, name):
if name is None:
name = util.guid()
return 'UDA_{0}'.format(name)
def _type_signature(self):
def output_type(op):
return dt.dtype(self.output).scalar_type()
input_type = _ibis_signature(self.inputs)
return input_type, output_type
class ImpalaFunction(object):
def __init__(self, name=None, lib_path=None):
self.lib_path = lib_path
self.name = name or util.guid()
if lib_path is not None:
self._check_library()
def _check_library(self):
suffix = self.lib_path[-3:]
if suffix not in ['.so', '.ll']:
raise ValueError('Invalid file type. Must be .so or .ll ')
def hash(self):
raise NotImplementedError
class ImpalaUDF(ScalarFunction, ImpalaFunction):
"""
Feel free to customize my __doc__ or wrap in a nicer user API
"""
def __init__(self, inputs, output, so_symbol=None, lib_path=None,
name=None):
self.so_symbol = so_symbol
ImpalaFunction.__init__(self, name=name, lib_path=lib_path)
ScalarFunction.__init__(self, inputs, output, name=self.name)
def hash(self):
# TODO: revisit this later
# from hashlib import sha1
# val = self.so_symbol
# for in_type in self.inputs:
# val += in_type.name()
# return sha1(val).hexdigest()
pass
class ImpalaUDA(AggregateFunction, ImpalaFunction):
def __init__(self, inputs, output, update_fn=None, init_fn=None,
merge_fn=None, finalize_fn=None, serialize_fn=None,
lib_path=None, name=None):
self.init_fn = init_fn
self.update_fn = update_fn
self.merge_fn = merge_fn
self.finalize_fn = finalize_fn
self.serialize_fn = serialize_fn
ImpalaFunction.__init__(self, name=name, lib_path=lib_path)
AggregateFunction.__init__(self, inputs, output, name=self.name)
def _check_library(self):
suffix = self.lib_path[-3:]
if suffix == '.ll':
raise com.IbisInputError('LLVM IR UDAs are not yet supported')
elif suffix != '.so':
raise ValueError('Invalid file type. Must be .so')
def wrap_uda(hdfs_file, inputs, output, update_fn, init_fn=None,
merge_fn=None, finalize_fn=None, serialize_fn=None,
close_fn=None, name=None):
"""
Creates a callable aggregation function object. Must be created in Impala
to be used
Parameters
----------
hdfs_file: .so file that contains relevant UDA
inputs: list of strings denoting ibis datatypes
output: string denoting ibis datatype
update_fn: string
Library symbol name for update function
init_fn: string, optional
Library symbol name for initialization function
merge_fn: string, optional
Library symbol name for merge function
finalize_fn: string, optional
Library symbol name for finalize function
serialize_fn : string, optional
Library symbol name for serialize UDA API function. Not required for all
UDAs; see documentation for more.
close_fn : string, optional
name: string, optional
Used internally to track function
Returns
-------
container : UDA object
"""
func = ImpalaUDA(inputs, output, update_fn, init_fn,
merge_fn, finalize_fn,
serialize_fn=serialize_fn,
name=name, lib_path=hdfs_file)
return func
def wrap_udf(hdfs_file, inputs, output, so_symbol, name=None):
"""
Creates a callable scalar function object. Must be created in Impala to be
used
Parameters
----------
hdfs_file: .so file that contains relevant UDF
inputs: list of strings or sig.TypeSignature
Input types to UDF
output: string
Ibis data type
so_symbol: string, C++ function name for relevant UDF
name: string (optional). Used internally to track function
Returns
-------
container : UDF object
"""
func = ImpalaUDF(inputs, output, so_symbol, name=name,
lib_path=hdfs_file)
return func
def scalar_function(inputs, output, name=None):
"""
Creates an operator class that can be passed to add_operation()
Parameters:
inputs: list of strings
Ibis data type names
output: string
Ibis data type
name: string, optional
Used internally to track function
Returns
-------
klass, user_api : class, function
"""
return ScalarFunction(inputs, output, name=name)
def aggregate_function(inputs, output, name=None):
"""
Creates an operator class that can be passed to add_operation()
Parameters:
inputs: list of strings
Ibis data type names
output: string
Ibis data type
name: string, optional
Used internally to track function
Returns
-------
klass, user_api : class, function
"""
return AggregateFunction(inputs, output, name=name)
def _ibis_signature(inputs):
if isinstance(inputs, sig.TypeSignature):
return inputs
arguments = [('_{}'.format(i), sig.Argument(rlz.value(dtype)))
for i, dtype in enumerate(inputs)]
return sig.TypeSignature(arguments)
def _create_operation_class(name, input_type, output_type):
func_dict = {
'signature': input_type,
'output_type': output_type,
}
klass = type(name, (ops.ValueOp,), func_dict)
return klass
def add_operation(op, func_name, db):
"""
Registers the given operation within the Ibis SQL translation toolchain
Parameters
----------
op: operator class
name: used in issuing statements to SQL engine
database: database the relevant operator is registered to
"""
full_name = '{0}.{1}'.format(db, func_name)
# TODO
# if op.input_type is rlz.listof:
# translator = comp.varargs(full_name)
# else:
arity = len(op.signature)
translator = comp.fixed_arity(full_name, arity)
comp._operation_registry[op] = translator
def parse_type(t):
t = t.lower()
if t in _impala_to_ibis_type:
return _impala_to_ibis_type[t]
else:
if 'varchar' in t or 'char' in t:
return 'string'
elif 'decimal' in t:
result = dt.dtype(t)
if result:
return t
else:
return ValueError(t)
else:
raise Exception(t)
_VARCHAR_RE = re.compile('varchar\((\d+)\)')
def _parse_varchar(t):
m = _VARCHAR_RE.match(t)
if m:
return 'string'
def _impala_type_to_ibis(tval):
if tval in _impala_to_ibis_type:
return _impala_to_ibis_type[tval]
return tval
def _ibis_string_to_impala(tval):
from ibis.impala.compiler import _sql_type_names
if tval in _sql_type_names:
return _sql_type_names[tval]
result = dt.validate_type(tval)
if result:
return repr(result)
_impala_to_ibis_type = {
'boolean': 'boolean',
'tinyint': 'int8',
'smallint': 'int16',
'int': 'int32',
'bigint': 'int64',
'float': 'float',
'double': 'double',
'string': 'string',
'varchar': 'string',
'char': 'string',
'timestamp': 'timestamp',
'decimal': 'decimal'
}
| |
#!/usr/bin/env python
#
# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on the software developed by:
# Copyright (c) 2008,2016 david decotigny (Pool of threads)
# Copyright (c) 2006-2008, R Oudkerk (multiprocessing.Pool)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# @brief Python Pool implementation based on TBB with monkey-patching
#
# See http://docs.python.org/dev/library/multiprocessing.html
# Differences: added imap_async and imap_unordered_async, and terminate()
# has to be called explicitly (it's not registered by atexit).
#
# The general idea is that we submit works to a workqueue, either as
# single Jobs (one function to call), or JobSequences (batch of
# Jobs). Each Job is associated with an ApplyResult object which has 2
# states: waiting for the Job to complete, or Ready. Instead of
# waiting for the jobs to finish, we wait for their ApplyResult object
# to become ready: an event mechanism is used for that.
# When we apply a function to several arguments in "parallel", we need
# a way to wait for all/part of the Jobs to be processed: that's what
# "collectors" are for; they group and wait for a set of ApplyResult
# objects. Once a collector is ready to be used, we can use a
# CollectorIterator to iterate over the result values it's collecting.
#
# The methods of a Pool object use all these concepts and expose
# them to their caller in a very simple way.
import sys
import threading
import traceback
from .api import *
__all__ = ["Pool", "TimeoutError"]
__doc__ = """
Standard Python Pool implementation based on Python API
for Intel(R) Threading Building Blocks library (Intel(R) TBB)
"""
class TimeoutError(Exception):
"""Raised when a result is not available within the given timeout"""
pass
class Pool(object):
"""
The Pool class provides standard multiprocessing.Pool interface
which is mapped onto Intel(R) TBB tasks executing in its thread pool
"""
def __init__(self, nworkers=0, name="Pool"):
"""
\param nworkers (integer) number of worker threads to start
\param name (string) prefix for the worker threads' name
"""
self._closed = False
self._tasks = task_group()
self._pool = [None,]*default_num_threads() # Dask asks for len(_pool)
def apply(self, func, args=(), kwds=dict()):
"""Equivalent of the apply() builtin function. It blocks till
the result is ready."""
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
"""A parallel equivalent of the map() builtin function. It
blocks till the result is ready.
This method chops the iterable into a number of chunks which
it submits to the process pool as separate tasks. The
(approximate) size of these chunks can be specified by setting
chunksize to a positive integer."""
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
"""
An equivalent of itertools.imap().
The chunksize argument is the same as the one used by the
map() method. For very long iterables using a large value for
chunksize can make the job complete much faster than
using the default value of 1.
Also if chunksize is 1 then the next() method of the iterator
returned by the imap() method has an optional timeout
parameter: next(timeout) will raise processing.TimeoutError if
the result cannot be returned within timeout seconds.
"""
collector = OrderedResultCollector(as_iterator=True)
self._create_sequences(func, iterable, chunksize, collector)
return iter(collector)
def imap_unordered(self, func, iterable, chunksize=1):
"""The same as imap() except that the ordering of the results
from the returned iterator should be considered
arbitrary. (Only when there is only one worker process is the
order guaranteed to be "correct".)"""
collector = UnorderedResultCollector()
self._create_sequences(func, iterable, chunksize, collector)
return iter(collector)
def apply_async(self, func, args=(), kwds=dict(), callback=None):
"""A variant of the apply() method which returns an
ApplyResult object.
If callback is specified then it should be a callable which
accepts a single argument. When the result becomes ready,
callback is applied to it (unless the call failed). callback
should complete immediately since otherwise the thread which
handles the results will get blocked."""
assert not self._closed # No lock here. We assume it's atomic...
apply_result = ApplyResult(callback=callback)
job = Job(func, args, kwds, apply_result)
self._tasks.run(job)
return apply_result
def map_async(self, func, iterable, chunksize=None, callback=None):
"""A variant of the map() method which returns a ApplyResult
object.
If callback is specified then it should be a callable which
accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed). callback
should complete immediately since otherwise the thread which
handles the results will get blocked."""
apply_result = ApplyResult(callback=callback)
collector = OrderedResultCollector(apply_result, as_iterator=False)
if not self._create_sequences(func, iterable, chunksize, collector):
apply_result._set_value([])
return apply_result
def imap_async(self, func, iterable, chunksize=None, callback=None):
"""A variant of the imap() method which returns an ApplyResult
object that provides an iterator (next method(timeout)
available).
If callback is specified then it should be a callable which
accepts a single argument. When the resulting iterator becomes
ready, callback is applied to it (unless the call
failed). callback should complete immediately since otherwise
the thread which handles the results will get blocked."""
apply_result = ApplyResult(callback=callback)
collector = OrderedResultCollector(apply_result, as_iterator=True)
if not self._create_sequences(func, iterable, chunksize, collector):
apply_result._set_value(iter([]))
return apply_result
def imap_unordered_async(self, func, iterable, chunksize=None,
callback=None):
"""A variant of the imap_unordered() method which returns an
ApplyResult object that provides an iterator (next
method(timeout) available).
If callback is specified then it should be a callable which
accepts a single argument. When the resulting iterator becomes
ready, callback is applied to it (unless the call
failed). callback should complete immediately since otherwise
the thread which handles the results will get blocked."""
apply_result = ApplyResult(callback=callback)
collector = UnorderedResultCollector(apply_result)
if not self._create_sequences(func, iterable, chunksize, collector):
apply_result._set_value(iter([]))
return apply_result
def close(self):
"""Prevents any more tasks from being submitted to the
pool. Once all the tasks have been completed the worker
processes will exit."""
# No lock here. We assume it's sufficiently atomic...
self._closed = True
def terminate(self):
"""Stops the worker processes immediately without completing
outstanding work. When the pool object is garbage collected
terminate() will be called immediately."""
self.close()
self._tasks.cancel()
def join(self):
"""Wait for the worker processes to exit. One must call
close() or terminate() before using join()."""
self._tasks.wait()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.join()
def __del__(self):
self.terminate()
self.join()
def _create_sequences(self, func, iterable, chunksize, collector):
"""
Create callable objects to process and pushes them on the
work queue. Each work unit is meant to process a slice of
iterable of size chunksize. If collector is specified, then
the ApplyResult objects associated with the jobs will notify
collector when their result becomes ready.
\return the list callable objects (basically: JobSequences)
pushed onto the work queue
"""
assert not self._closed # No lock here. We assume it's atomic...
it_ = iter(iterable)
exit_loop = False
sequences = []
while not exit_loop:
seq = []
for _ in range(chunksize or 1):
try:
arg = next(it_)
except StopIteration:
exit_loop = True
break
apply_result = ApplyResult(collector)
job = Job(func, (arg,), {}, apply_result)
seq.append(job)
if seq:
sequences.append(JobSequence(seq))
for t in sequences:
self._tasks.run(t)
return sequences
class Job:
"""A work unit that corresponds to the execution of a single function"""
def __init__(self, func, args, kwds, apply_result):
"""
\param func/args/kwds used to call the function
\param apply_result ApplyResult object that holds the result
of the function call
"""
self._func = func
self._args = args
self._kwds = kwds
self._result = apply_result
def __call__(self):
"""
Call the function with the args/kwds and tell the ApplyResult
that its result is ready. Correctly handles the exceptions
happening during the execution of the function
"""
try:
result = self._func(*self._args, **self._kwds)
except:
self._result._set_exception()
else:
self._result._set_value(result)
class JobSequence:
"""A work unit that corresponds to the processing of a continuous
sequence of Job objects"""
def __init__(self, jobs):
self._jobs = jobs
def __call__(self):
"""
Call all the Job objects that have been specified
"""
for job in self._jobs:
job()
class ApplyResult(object):
"""An object associated with a Job object that holds its result:
it's available during the whole life the Job and after, even when
the Job didn't process yet. It's possible to use this object to
wait for the result/exception of the job to be available.
The result objects returns by the Pool::*_async() methods are of
this type"""
def __init__(self, collector=None, callback=None):
"""
\param collector when not None, the notify_ready() method of
the collector will be called when the result from the Job is
ready
\param callback when not None, function to call when the
result becomes available (this is the paramater passed to the
Pool::*_async() methods.
"""
self._success = False
self._event = threading.Event()
self._data = None
self._collector = None
self._callback = callback
if collector is not None:
collector.register_result(self)
self._collector = collector
def get(self, timeout=None):
"""
Returns the result when it arrives. If timeout is not None and
the result does not arrive within timeout seconds then
TimeoutError is raised. If the remote call raised an exception
then that exception will be reraised by get().
"""
if not self.wait(timeout):
raise TimeoutError("Result not available within %fs" % timeout)
if self._success:
return self._data
if sys.version_info[0] == 3:
raise self._data[0](self._data[1]).with_traceback(self._data[2])
else:
exec("raise self._data[0], self._data[1], self._data[2]")
def wait(self, timeout=None):
"""Waits until the result is available or until timeout
seconds pass."""
self._event.wait(timeout)
return self._event.isSet()
def ready(self):
"""Returns whether the call has completed."""
return self._event.isSet()
def successful(self):
"""Returns whether the call completed without raising an
exception. Will raise AssertionError if the result is not
ready."""
assert self.ready()
return self._success
def _set_value(self, value):
"""Called by a Job object to tell the result is ready, and
provides the value of this result. The object will become
ready and successful. The collector's notify_ready() method
will be called, and the callback method too"""
assert not self.ready()
self._data = value
self._success = True
self._event.set()
if self._collector is not None:
self._collector.notify_ready(self)
if self._callback is not None:
try:
self._callback(value)
except:
traceback.print_exc()
def _set_exception(self):
"""Called by a Job object to tell that an exception occurred
during the processing of the function. The object will become
ready but not successful. The collector's notify_ready()
method will be called, but NOT the callback method"""
# traceback.print_exc()
assert not self.ready()
self._data = sys.exc_info()
self._success = False
self._event.set()
if self._collector is not None:
self._collector.notify_ready(self)
class AbstractResultCollector(object):
"""ABC to define the interface of a ResultCollector object. It is
basically an object which knows whuich results it's waiting for,
and which is able to get notify when they get available. It is
also able to provide an iterator over the results when they are
available"""
def __init__(self, to_notify):
"""
\param to_notify ApplyResult object to notify when all the
results we're waiting for become available. Can be None.
"""
self._to_notify = to_notify
def register_result(self, apply_result):
"""Used to identify which results we're waiting for. Will
always be called BEFORE the Jobs get submitted to the work
queue, and BEFORE the __iter__ and _get_result() methods can
be called
\param apply_result ApplyResult object to add in our collection
"""
raise NotImplementedError("Children classes must implement it")
def notify_ready(self, apply_result):
"""Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
"""
raise NotImplementedError("Children classes must implement it")
def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another (order defined by the
implementation)
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
raise NotImplementedError("Children classes must implement it")
def __iter__(self):
"""Return a new CollectorIterator object for this collector"""
return CollectorIterator(self)
class CollectorIterator(object):
"""An iterator that allows to iterate over the result values
available in the given collector object. Equipped with an extended
next() method accepting a timeout argument. Created by the
AbstractResultCollector::__iter__() method"""
def __init__(self, collector):
"""\param AbstractResultCollector instance"""
self._collector = collector
self._idx = 0
def __iter__(self):
return self
def next(self, timeout=None):
"""Return the next result value in the sequence. Raise
StopIteration at the end. Can raise the exception raised by
the Job"""
try:
apply_result = self._collector._get_result(self._idx, timeout)
except IndexError:
# Reset for next time
self._idx = 0
raise StopIteration
except:
self._idx = 0
raise
self._idx += 1
assert apply_result.ready()
return apply_result.get(0)
def __next__(self):
return self.next()
class UnorderedResultCollector(AbstractResultCollector):
"""An AbstractResultCollector implementation that collects the
values of the ApplyResult objects in the order they become ready. The
CollectorIterator object returned by __iter__() will iterate over
them in the order they become ready"""
def __init__(self, to_notify=None):
"""
\param to_notify ApplyResult object to notify when all the
results we're waiting for become available. Can be None.
"""
AbstractResultCollector.__init__(self, to_notify)
self._cond = threading.Condition()
self._collection = []
self._expected = 0
def register_result(self, apply_result):
"""Used to identify which results we're waiting for. Will
always be called BEFORE the Jobs get submitted to the work
queue, and BEFORE the __iter__ and _get_result() methods can
be called
\param apply_result ApplyResult object to add in our collection
"""
self._expected += 1
def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
self._cond.acquire()
try:
if idx >= self._expected:
raise IndexError
elif idx < len(self._collection):
return self._collection[idx]
elif idx != len(self._collection):
# Violation of the sequence protocol
raise IndexError()
else:
self._cond.wait(timeout=timeout)
try:
return self._collection[idx]
except IndexError:
# Still not added !
raise TimeoutError("Timeout while waiting for results")
finally:
self._cond.release()
def notify_ready(self, apply_result=None):
"""Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
"""
first_item = False
self._cond.acquire()
try:
self._collection.append(apply_result)
first_item = (len(self._collection) == 1)
self._cond.notifyAll()
finally:
self._cond.release()
if first_item and self._to_notify is not None:
self._to_notify._set_value(iter(self))
class OrderedResultCollector(AbstractResultCollector):
"""An AbstractResultCollector implementation that collects the
values of the ApplyResult objects in the order they have been
submitted. The CollectorIterator object returned by __iter__()
will iterate over them in the order they have been submitted"""
def __init__(self, to_notify=None, as_iterator=True):
"""
\param to_notify ApplyResult object to notify when all the
results we're waiting for become available. Can be None.
\param as_iterator boolean telling whether the result value
set on to_notify should be an iterator (available as soon as 1
result arrived) or a list (available only after the last
result arrived)
"""
AbstractResultCollector.__init__(self, to_notify)
self._results = []
self._lock = threading.Lock()
self._remaining = 0
self._as_iterator = as_iterator
def register_result(self, apply_result):
"""Used to identify which results we're waiting for. Will
always be called BEFORE the Jobs get submitted to the work
queue, and BEFORE the __iter__ and _get_result() methods can
be called
\param apply_result ApplyResult object to add in our collection
"""
self._results.append(apply_result)
self._remaining += 1
def _get_result(self, idx, timeout=None):
"""Called by the CollectorIterator object to retrieve the
result's values one after another (order defined by the
implementation)
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index idx to be available, or None (wait
forever)
"""
res = self._results[idx]
res.wait(timeout)
return res
def notify_ready(self, apply_result):
"""Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
"""
got_first = False
got_last = False
self._lock.acquire()
try:
assert self._remaining > 0
got_first = (len(self._results) == self._remaining)
self._remaining -= 1
got_last = (self._remaining == 0)
finally:
self._lock.release()
if self._to_notify is not None:
if self._as_iterator and got_first:
self._to_notify._set_value(iter(self))
elif not self._as_iterator and got_last:
try:
lst = [r.get(0) for r in self._results]
except:
self._to_notify._set_exception()
else:
self._to_notify._set_value(lst)
| |
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_dlp_filepattern
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_dlp_filepattern.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_dlp_filepattern_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'dlp_filepattern': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('dlp', 'filepattern', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_dlp_filepattern_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'dlp_filepattern': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('dlp', 'filepattern', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_dlp_filepattern_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'dlp_filepattern': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
delete_method_mock.assert_called_with('dlp', 'filepattern', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_dlp_filepattern_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'dlp_filepattern': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
delete_method_mock.assert_called_with('dlp', 'filepattern', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_dlp_filepattern_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'dlp_filepattern': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('dlp', 'filepattern', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_dlp_filepattern_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'dlp_filepattern': {
'random_attribute_not_valid': 'tag',
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_dlp_filepattern.fortios_dlp(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('dlp', 'filepattern', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewayConnectionsOperations:
"""VirtualNetworkGatewayConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.VirtualNetworkGatewayConnection",
**kwargs: Any
) -> "_models.VirtualNetworkGatewayConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGatewayConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.VirtualNetworkGatewayConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetworkGatewayConnection"]:
"""Creates or updates a virtual network gateway connection in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the create or update virtual network gateway
connection operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.VirtualNetworkGatewayConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkGatewayConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkGatewayConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
**kwargs: Any
) -> "_models.VirtualNetworkGatewayConnection":
"""Gets the specified virtual network gateway connection by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGatewayConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.VirtualNetworkGatewayConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network Gateway connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VirtualNetworkGatewayConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetworkGatewayConnection"]:
"""Updates a virtual network gateway connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to update virtual network gateway connection tags.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkGatewayConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkGatewayConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
async def _set_shared_key_initial(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.ConnectionSharedKey",
**kwargs: Any
) -> "_models.ConnectionSharedKey":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionSharedKey')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
async def begin_set_shared_key(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.ConnectionSharedKey",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionSharedKey"]:
"""The Put VirtualNetworkGatewayConnectionSharedKey operation sets the virtual network gateway
connection shared key for passed virtual network gateway connection in the specified resource
group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection name.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the Begin Set Virtual Network Gateway connection
Shared key operation throughNetwork resource provider.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.ConnectionSharedKey
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionSharedKey or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ConnectionSharedKey]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._set_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
async def get_shared_key(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
**kwargs: Any
) -> "_models.ConnectionSharedKey":
"""The Get VirtualNetworkGatewayConnectionSharedKey operation retrieves information about the
specified virtual network gateway connection shared key through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection shared
key name.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionSharedKey, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ConnectionSharedKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_shared_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkGatewayConnectionListResult"]:
"""The List VirtualNetworkGatewayConnections operation retrieves all the virtual network gateways
connections created.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkGatewayConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections'} # type: ignore
async def _reset_shared_key_initial(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.ConnectionResetSharedKey",
**kwargs: Any
) -> Optional["_models.ConnectionResetSharedKey"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ConnectionResetSharedKey"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionResetSharedKey')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionResetSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'} # type: ignore
async def begin_reset_shared_key(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "_models.ConnectionResetSharedKey",
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionResetSharedKey"]:
"""The VirtualNetworkGatewayConnectionResetSharedKey operation resets the virtual network gateway
connection shared key for passed virtual network gateway connection in the specified resource
group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection reset
shared key Name.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the begin reset virtual network gateway connection
shared key operation through network resource provider.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.ConnectionResetSharedKey
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionResetSharedKey or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ConnectionResetSharedKey]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionResetSharedKey"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionResetSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'} # type: ignore
| |
r"""
===============
Decoding (MVPA)
===============
.. include:: ../../links.inc
Design philosophy
=================
Decoding (a.k.a. MVPA) in MNE largely follows the machine
learning API of the scikit-learn package.
Each estimator implements ``fit``, ``transform``, ``fit_transform``, and
(optionally) ``inverse_transform`` methods. For more details on this design,
visit scikit-learn_. For additional theoretical insights into the decoding
framework in MNE :footcite:`KingEtAl2018`.
For ease of comprehension, we will denote instantiations of the class using
the same name as the class but in small caps instead of camel cases.
Let's start by loading data for a simple two-class problem:
"""
# %%
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads') # remove stim and EOG
del raw
X = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times
y = epochs.events[:, 2] # target: auditory left vs visual left
# %%
# Transformation classes
# ======================
#
# Scaler
# ^^^^^^
# The :class:`mne.decoding.Scaler` will standardize the data based on channel
# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,
# each data channel type (e.g., mag, grad, eeg) is treated separately and
# scaled by a constant. This is the approach used by e.g.,
# :func:`mne.compute_covariance` to standardize channel scales.
#
# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using
# empirical measures. Each channel is scaled independently by the mean and
# standand deviation, or median and interquartile range, respectively, across
# all epochs and time points during :class:`~mne.decoding.Scaler.fit`
# (during training). The :meth:`~mne.decoding.Scaler.transform` method is
# called to transform data (training or test set) by scaling all time points
# and epochs on a channel-by-channel basis. To perform both the ``fit`` and
# ``transform`` operations in a single call, the
# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the
# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For
# ``scalings='median'``, scikit-learn_ version 0.17+ is required.
#
# .. note:: Using this class is different from directly applying
# :class:`sklearn.preprocessing.StandardScaler` or
# :class:`sklearn.preprocessing.RobustScaler` offered by
# scikit-learn_. These scale each *classification feature*, e.g.
# each time point for each channel, with mean and standard
# deviation computed across epochs, whereas
# :class:`mne.decoding.Scaler` scales each *channel* using mean and
# standard deviation computed across all of its time points
# and epochs.
#
# Vectorizer
# ^^^^^^^^^^
# Scikit-learn API provides functionality to chain transformers and estimators
# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding
# pipelines and perform cross-validation and grid-search. However scikit-learn
# transformers and estimators generally expect 2D data
# (n_samples * n_features), whereas MNE transformers typically output data
# with a higher dimensionality
# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer
# therefore needs to be applied between the MNE and the scikit-learn steps
# like:
# Uses all MEG sensors and time points as separate classification
# features, so the resulting filters used are spatio-temporal
clf = make_pipeline(Scaler(epochs.info),
Vectorizer(),
LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
score = np.mean(scores, axis=0)
print('Spatio-temporal: %0.1f%%' % (100 * score,))
# %%
# PSDEstimator
# ^^^^^^^^^^^^
# The :class:`mne.decoding.PSDEstimator`
# computes the power spectral density (PSD) using the multitaper
# method. It takes a 3D array as input, converts it into 2D and computes the
# PSD.
#
# FilterEstimator
# ^^^^^^^^^^^^^^^
# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.
#
# Spatial filters
# ===============
#
# Just like temporal filters, spatial filters provide weights to modify the
# data along the sensor dimension. They are popular in the BCI community
# because of their simplicity and ability to distinguish spatially-separated
# neural activity.
#
# Common spatial pattern
# ^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based
# on recordings from two classes :footcite:`Koles1991` (see also
# https://en.wikipedia.org/wiki/Common_spatial_pattern).
#
# Let :math:`X \in R^{C\times T}` be a segment of data with
# :math:`C` channels and :math:`T` time points. The data at a single time point
# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.
# Common spatial pattern (CSP) finds a decomposition that projects the signal
# in the original sensor space to CSP space using the following transformation:
#
# .. math:: x_{CSP}(t) = W^{T}x(t)
# :label: csp
#
# where each column of :math:`W \in R^{C\times C}` is a spatial filter and each
# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also
# called the de-mixing matrix in other contexts. Let
# :math:`\Sigma^{+} \in R^{C\times C}` and :math:`\Sigma^{-} \in R^{C\times C}`
# be the estimates of the covariance matrices of the two conditions.
# CSP analysis is given by the simultaneous diagonalization of the two
# covariance matrices
#
# .. math:: W^{T}\Sigma^{+}W = \lambda^{+}
# :label: diagonalize_p
# .. math:: W^{T}\Sigma^{-}W = \lambda^{-}
# :label: diagonalize_n
#
# where :math:`\lambda^{C}` is a diagonal matrix whose entries are the
# eigenvalues of the following generalized eigenvalue problem
#
# .. math:: \Sigma^{+}w = \lambda \Sigma^{-}w
# :label: eigen_problem
#
# Large entries in the diagonal matrix corresponds to a spatial filter which
# gives high variance in one class but low variance in the other. Thus, the
# filter facilitates discrimination between the two classes.
#
# .. topic:: Examples
#
# * :ref:`ex-decoding-csp-eeg`
# * :ref:`ex-decoding-csp-eeg-timefreq`
#
# .. note::
#
# The winning entry of the Grasp-and-lift EEG competition in Kaggle used
# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as
# a `script of the week <sotw_>`_.
#
# .. _sotw: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/
#
# We can use CSP with these data with:
csp = CSP(n_components=3, norm_trace=False)
clf_csp = make_pipeline(csp, LinearModel(LogisticRegression(solver='lbfgs')))
scores = cross_val_multiscore(clf_csp, X, y, cv=5, n_jobs=1)
print('CSP: %0.1f%%' % (100 * scores.mean(),))
# %%
# Source power comodulation (SPoC)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Source Power Comodulation (:class:`mne.decoding.SPoC`)
# :footcite:`DahneEtAl2014` identifies the composition of
# orthogonal spatial filters that maximally correlate with a continuous target.
#
# SPoC can be seen as an extension of the CSP where the target is driven by a
# continuous variable rather than a discrete variable. Typical applications
# include extraction of motor patterns using EMG power or audio patterns using
# sound envelope.
#
# .. topic:: Examples
#
# * :ref:`ex-spoc-cmc`
#
# xDAWN
# ^^^^^
# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to
# improve the signal to signal + noise ratio (SSNR) of the ERP responses
# :footcite:`RivetEtAl2009`. Xdawn was originally
# designed for P300 evoked potential by enhancing the target response with
# respect to the non-target response. The implementation in MNE-Python is a
# generalization to any type of ERP.
#
# .. topic:: Examples
#
# * :ref:`ex-xdawn-denoising`
# * :ref:`ex-xdawn-decoding`
#
# Effect-matched spatial filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The result of :class:`mne.decoding.EMS` is a spatial filter at each time
# point and a corresponding time course :footcite:`SchurgerEtAl2013`.
# Intuitively, the result gives the similarity between the filter at
# each time point and the data vector (sensors) at that time point.
#
# .. topic:: Examples
#
# * :ref:`ex-ems-filtering`
#
# Patterns vs. filters
# ^^^^^^^^^^^^^^^^^^^^
#
# When interpreting the components of the CSP (or spatial filters in general),
# it is often more intuitive to think about how :math:`x(t)` is composed of
# the different CSP components :math:`x_{CSP}(t)`. In other words, we can
# rewrite Equation :eq:`csp` as follows:
#
# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)
# :label: patterns
#
# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.
# This is also called the mixing matrix. The example :ref:`ex-linear-patterns`
# discusses the difference between patterns and filters.
#
# These can be plotted with:
# Fit CSP on full data and plot
csp.fit(X, y)
csp.plot_patterns(epochs.info)
csp.plot_filters(epochs.info, scalings=1e-9)
# %%
# Decoding over time
# ==================
#
# This strategy consists in fitting a multivariate predictive model on each
# time instant and evaluating its performance at the same instant on new
# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a
# pair of features :math:`X` and targets :math:`y`, where :math:`X` has
# more than 2 dimensions. For decoding over time the data :math:`X`
# is the epochs data of shape n_epochs x n_channels x n_times. As the
# last dimension of :math:`X` is the time, an estimator will be fit
# on every time instant.
#
# This approach is analogous to SlidingEstimator-based approaches in fMRI,
# where here we are interested in when one can discriminate experimental
# conditions and therefore figure out when the effect of interest happens.
#
# When working with linear models as estimators, this approach boils
# down to estimating a discriminative spatial filter for each time instant.
#
# Temporal decoding
# ^^^^^^^^^^^^^^^^^
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
# %%
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked_time_gen = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked_time_gen.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
# %%
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
# transferring a trained model to a distinct learning problem, where the
# problems correspond to decoding the patterns of brain activity recorded at
# distinct time instants.
#
# The object to for Temporal generalization is
# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`
# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but
# generates predictions from each model for all time instants. The class
# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the
# last dimension as the one to be used for generalization testing. For
# convenience, here, we refer to it as different tasks. If :math:`X`
# corresponds to epochs data then the last dimension is time.
#
# This runs the analysis used in :footcite:`KingEtAl2014` and further detailed
# in :footcite:`KingDehaene2014`:
# define the Temporal generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',
verbose=True)
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
# %%
# Plot the full (generalization) matrix:
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
cbar = plt.colorbar(im, ax=ax)
cbar.set_label('AUC')
# %%
# Projecting sensor-space patterns to source space
# ================================================
# If you use a linear classifier (or regressor) for your data, you can also
# project these to source space. For example, using our ``evoked_time_gen``
# from before:
cov = mne.compute_covariance(epochs, tmax=0.)
del epochs
fwd = mne.read_forward_solution(
data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif')
inv = mne.minimum_norm.make_inverse_operator(
evoked_time_gen.info, fwd, cov, loose=0.)
stc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM')
del fwd, inv
# %%
# And this can be visualized using :meth:`stc.plot <mne.SourceEstimate.plot>`:
brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1,
subjects_dir=subjects_dir)
# %%
# Source-space decoding
# =====================
#
# Source space decoding is also possible, but because the number of features
# can be much larger than in the sensor space, univariate feature selection
# using ANOVA f-test (or some other metric) can be done to reduce the feature
# dimension. Interpreting decoding results might be easier in source space as
# compared to sensor space.
#
# .. topic:: Examples
#
# * :ref:`tut-dec-st-source`
#
# Exercise
# ========
#
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# References
# ==========
# .. footbibliography::
| |
#!/usr/bin/env python
"""
jsi.py
Unofficial justseed.it cli client
---
Copyright (c) 2014, Andy Gock. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
JSI_VERSION = "0.0"
import sys
import os
import urllib
import urllib2
import json
import argparse
import poster
import collections
import re
import StringIO
import gzip
import bencode
import math
from colorama import init, Fore, Back, Style
from xml.dom import minidom
from datetime import datetime
import platform
import glob
import csv
import io
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def hexdump(src, length=16):
hdfilter = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex_buffer = ' '.join(["%02x" % ord(x) for x in chars])
printable = ''.join(["%s" % ((ord(x) <= 127 and hdfilter[ord(x)]) or '.') for x in chars])
lines.append("%04x %-*s %s\n" % (c, length*3, hex_buffer, printable))
return ''.join(lines)
def sizeof_fmt(num):
for x in ['B' ,'KB' ,'MB' ,'GB' ,'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
class JustSeedIt():
# Default options
if os.getenv('JSI_OUTPUT_DIR'):
DEFAULT_DOWNLOAD_DIR = os.getenv('JSI_OUTPUT_DIR')
# add trailing slash, if not given
if DEFAULT_DOWNLOAD_DIR[-1:] != '/':
DEFAULT_DOWNLOAD_DIR += '/'
else:
DEFAULT_DOWNLOAD_DIR = 'd:/Downloads/justseed.it Downloads/'
if os.getenv('JSI_RATIO'):
DEFAULT_RATIO = os.getenv('JSI_RATIO')
else:
DEFAULT_RATIO = 1.0
if os.getenv('JSI_ARIA2_OPTIONS'):
DEFAULT_ARIA2_OPTIONS = os.getenv('JSI_ARIA2_OPTIONS')
else:
DEFAULT_ARIA2_OPTIONS = "--file-allocation=none --check-certificate=false --max-concurrent-downloads=8 " + \
"--continue --max-connection-per-server=8 --min-split-size=1M"
DEFAULT_API_SERVER = "https://api.justseed.it"
def __init__(self, api_key=''):
self.api_key = "" # start off blank
if self.api_key != "":
self.api_key = api_key
if self.api_key == "":
# No found in file searches above
sys.stderr.write("Error: Specified API key with --api-key was blank")
sys.exit()
else:
# Get homedir
self.homedir = os.path.expanduser("~")
# Obtain API key
for keyfile in [self.homedir + '/.justseedit_apikey',
os.path.dirname(os.path.realpath(__file__)) + '/.justseedit_apikey']:
# Try different locations for key file
try:
f = open(keyfile, 'r')
key = f.read()
self.api_key = key.strip()
#sys.stderr.write("Read API key from '{}'\n".format(keyfile))
break
except IOError:
# Could not read api key from file
# Use default api_key, which is actually an empty string
continue
if self.api_key == "":
# No found in file searches above
sys.stderr.write("Error: No API key file could be found or was specified")
sys.exit()
# Set default configs, these may be changed later
self.url = self.DEFAULT_API_SERVER
self.aria2_options = self.DEFAULT_ARIA2_OPTIONS
self.output_dir = self.DEFAULT_DOWNLOAD_DIR
self.error = False
self.debug = 0
self.dry_run = 0
self.aria2_log = False
self.xml_mode = False
self.file_data = None
self.compress = True
self.verbose = False
self.xml_response = '' # filled with results every time an api call is made
self.id_to_infohash_map = {} # map id number to infohash
self.infohash_to_name = {} # map infohas to torrent name
self.torrents = None # filled when self.list_update() is called
self.data_remaining_as_bytes = 0 # remaining quota
self.data_remaining_as_string = 0 # remaining quota
self.debug_logfile = 'debug.log'
self.aria2_logfile = 'aria2.log'
self.file_attr = []
# List modifiers
self.list_complete_only = False
self.list_incomplete_only = False
self.list_stopped_only = False
self.list_transfer_only = False
# Values used in --edit operations
self.edit_opts = []
self.ratio = self.DEFAULT_RATIO # this is also used in add, --torrent
self.name = None
self.add_tracker_url = ''
self.delete_tracker_url = ''
self.label = ''
if platform.system() == "Windows":
self._globbing = True
else:
self._globbing = False
@staticmethod
def pretty_print(d):
print(json.dumps(d, indent=4))
@staticmethod
def quit(message):
print "Error:", message
print "Quitting."
sys.exit()
def edit_append(self, option):
self.edit_opts.append(option)
return
@staticmethod
def xml_from_file(filename):
""" Experimental use only """
f = open(filename, 'r')
xml = f.read()
return xml
def debug_log(self, data, marker=None):
f = open(self.debug_logfile, 'a')
if marker:
datestr = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f.write("\n[{} -- {}]\n".format(datestr, marker))
if data != '':
f.write(data + "\n")
f.close()
return
def api(self, page, post_data=None):
""" Make a API call using multipart/form-data POST
Returns XML response on success or False on error
"""
if not post_data:
post_data = {}
post_data['api_key'] = self.api_key
if self.debug:
self.debug_log("Calling {:} with:".format(page), "API CALL")
for key, value in post_data.items():
if key == 'torrent_file':
# don't dump torrent file data into log file
self.debug_log("{:>15}: {:}".format(key, "[binary data]"))
else:
self.debug_log("{:>15}: {:}".format(key, value))
try:
# for application/x-www-form-urlencoded
#post_data = urllib.urlencode( data )
#req = urllib2.Request(self.url + page, post_data)
# for multipart/form-data
poster.streaminghttp.register_openers()
post_data, headers = poster.encode.multipart_encode(post_data)
if self.dry_run:
print "\nHeaders:\n"
for k, v in headers.items():
print "{}: {}".format(k, v)
print "\nBody:\n"
print hexdump("".join(post_data))
return "<data>Dry run mode: This is not actual API server response.</data>"
if self.verbose or self.debug:
sys.stderr.write('Requesting from '+self.url + page + " ... ")
# Form and make the actual request
req = urllib2.Request(self.url + page, post_data, headers)
if self.compress:
# Tell server we can read gzip encoded stream
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
# Server sent gzip encoded stream, uncompress it
gzbuffer = StringIO.StringIO(response.read())
f = gzip.GzipFile(fileobj=gzbuffer)
xml_response = f.read()
else:
# Normal uncompressed stream
xml_response = response.read() # Read server response
if self.verbose or self.debug:
# Tell user the response was read
sys.stderr.write("OK\n")
# Store xml for later use, maybe we might use it
self.xml_response = xml_response
if self.debug:
self.debug_log("", "XML RESPONSE")
self.debug_log(xml_response)
except urllib2.URLError, urllib2.HTTPError:
sys.stderr.write("Error: URL or HTTP error, server did not respond. Quitting\n")
sys.exit()
if self.check_server_response(xml_response):
# Server responded with "SUCCESS"
self.error = False
return xml_response
else:
# Server did NOT respond with "SUCCESS"
# self.check_server_response() will already display an error message
self.error = True
sys.stderr.write("API server did not respond with SUCCESS on last call. Quitting.")
#sys.exit()
#print xml_response
return False # do not quit, just skip this call and try next one
@staticmethod
def check_server_response(xml_data):
""" Check server response is valid and return True or False. Error is printed to
stderr if response is not "SUCCESS".
"""
status = minidom.parseString(xml_data).getElementsByTagName("status")[0].firstChild.nodeValue
if status == 'SUCCESS':
return True
else:
error = urllib.unquote(minidom.parseString(xml_data).getElementsByTagName("message")[0].firstChild.nodeValue)
sys.stderr.write('Warning: '+error+"\n")
return False
def id_to_infohash(self, torrent_id):
""" Find the info hash, when given a ID, returns info hash
"""
if torrent_id in self.id_to_infohash_map:
# There is a matching info hash found for this ID
return self.id_to_infohash_map[torrent_id]
else:
self.list_update() # Read info from API server
if torrent_id in self.id_to_infohash_map:
return self.id_to_infohash_map[torrent_id]
else:
sys.stderr.write("Error: No info hash available for ID {}\n".format(torrent_id))
return False
def info(self, infohash):
""" Grab info about a (single) torrent. Returns XML response
"""
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
xml_response = self.api("/torrent/information.csp", {'info_hash': infohash})
if self.xml_mode:
print xml_response
sys.exit()
xml = minidom.parseString(xml_response)
for element in xml.getElementsByTagName('data')[0].childNodes:
if element.nodeType == element.ELEMENT_NODE:
key = element.nodeName
try:
value = xml.getElementsByTagName(element.nodeName)[0].firstChild.nodeValue
except AttributeError:
value = ""
# Print all elements and values
if key == 'name':
# Replace unicode chars with '-' for torrent name only
print "{:>24}: {:}".format(key, self.urldecode_to_ascii(value, 'replace'))
else:
print "{:>24}: {:}".format(key, self.urldecode_to_ascii(value, 'strict'))
return xml_response
@staticmethod
def urldecode_to_ascii(s, error_opt='replace'):
output = urllib.unquote(s.encode('ascii')).decode('utf-8').encode('ascii', error_opt)
# Replace '?' with '-'
if error_opt == 'replace':
output = re.sub('\?', '-', output)
return output
def pieces(self, infohash):
""" Display pieces for given info hashes or IDs, returns XML response
"""
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
response_xml = self.api("/torrent/pieces.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
sys.exit()
return response_xml
def pieces_aria2(self, infohash):
# get pieces info
self.pieces(infohash)
pieces = minidom.parseString(self.xml_response).getElementsByTagName("row")
for piece in pieces:
# get useful info / metadata for each piece
piece_number = piece.getElementsByTagName('number')[0].firstChild.nodeValue
piece_hash = piece.getElementsByTagName('hash')[0].firstChild.nodeValue
piece_size = piece.getElementsByTagName('size')[0].firstChild.nodeValue
piece_url = urllib.unquote(piece.getElementsByTagName('url')[0].firstChild.nodeValue) + "&api_key=" + self.api_key
print "aria2c {} -o piece.{} \"{}\"".format(self.aria2_options, piece_number, piece_url)
# Note: use should perform sha1sum check on each piece
def pieces_sha1sum(self, infohash):
# get pieces info, generate sha1sum output, user uses this to redirect to a sha1sum files
# ... > pieces.sha1
self.pieces(infohash)
pieces = minidom.parseString(self.xml_response).getElementsByTagName("row")
for piece in pieces:
# get useful info / metadata for each piece
piece_number = piece.getElementsByTagName('number')[0].firstChild.nodeValue
piece_hash = piece.getElementsByTagName('hash')[0].firstChild.nodeValue
piece_size = piece.getElementsByTagName('size')[0].firstChild.nodeValue
piece_url = urllib.unquote(piece.getElementsByTagName('url')[0].firstChild.nodeValue) + "&api_key=" + self.api_key
print "{} *pieces.{}".format(piece_hash, piece_number)
def bitfield(self, infohash):
""" Display bitfield for given info hashes or IDs, returns XML response
"""
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
response_xml = self.api("/torrent/bitfield.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
sys.exit()
number_of_pieces = int(minidom.parseString(self.xml_response).getElementsByTagName('pieces')[0].firstChild.nodeValue)
bitfield_as_string = minidom.parseString(self.xml_response).getElementsByTagName('bitfield')[0].firstChild.nodeValue
# generate bitfield as an array
bitfield = []
for bit in bitfield_as_string:
if bit == '1':
bitfield.append(1)
elif bit == '0':
bitfield.append(0)
else:
sys.stderr.write("Detected invalid bitfield char, expected '0' or '1' but got '{}'".format(bit))
sys.exit()
if len(bitfield) != number_of_pieces:
sys.stderr.write("Number of elements in bitfield does not match number of pieces {} != {}".format(len(bitfield),number_of_pieces))
sys.exit()
self.bitfield_array = bitfield
#print "Bitfield for " + Style.BRIGHT + "{}".format(infohash) + Style.RESET_ALL
#print " Pieces: " + str(number_of_pieces)
#print " Bitfield: " + bitfield_as_string
return response_xml
def trackers(self, infohash):
""" Display list of trackers for given info hashes or IDs, returns XML response
"""
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
response_xml = self.api("/torrent/trackers.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
sys.exit()
return response_xml
def edit(self, infohashes):
""" Edit torrent. Can change ratio or name. Does not return anything.
"""
parameters = self.edit_opts
self.list_update()
if not isinstance(infohashes, list):
infohashes = [infohashes]
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = False
if len(infohash) != 40:
torrent_id = infohash
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
if 'ratio' in parameters:
# ratio already set in self.ratio, given my --ratio arg
if torrent_id:
sys.stderr.write("Changing ratio of torrent {} to {}\n".format(torrent_id, self.ratio))
else:
sys.stderr.write("Changing ratio of torrent {} to {}\n".format(infohash, self.ratio))
response_xml = self.api("/torrent/set_maximum_ratio.csp",
{'info_hash': infohash, 'maximum_ratio': self.ratio})
if self.xml_mode:
print response_xml
if 'name' in parameters:
if torrent_id:
sys.stderr.write("Changing name of torrent {} to \"{}\"\n".format(torrent_id, self.name))
else:
sys.stderr.write("Changing name of torrent {} to \"{}\"\n".format(infohash, self.name))
if self.name != "":
response_xml = self.api("/torrent/set_name.csp",
{'info_hash': infohash, 'name': self.name})
else:
sys.stderr.write("Resetting torrent name to default.\n")
response_xml = self.api("/torrent/set_name.csp",
{'info_hash': infohash})
if self.xml_mode:
print response_xml
if 'add_tracker' in parameters:
# add tracker url
if torrent_id:
sys.stderr.write("Adding tracker \"{}\" to torrent {}\n".format(self.add_tracker_url, torrent_id))
else:
sys.stderr.write("Adding tracker \"{}\" to torrent {}\n".format(self.add_tracker_url, infohash))
if self.add_tracker_url != "":
response_xml = self.api("/torrent/add_tracker.csp",
{'info_hash': infohash, 'url': self.add_tracker_url})
if self.xml_mode:
print response_xml
if 'delete_tracker' in parameters:
# delete tracker url
if torrent_id:
sys.stderr.write("Deleting tracker \"{}\" from torrent {}\n".format(self.delete_tracker_url, torrent_id))
else:
sys.stderr.write("Deleting tracker \"{}\" from torrent {}\n".format(self.delete_tracker_url, infohash))
if self.add_tracker_url != "":
response_xml = self.api("/torrent/delete_tracker.csp",
{'info_hash': infohash, 'url': self.add_tracker_url})
if self.xml_mode:
print response_xml
if 'label' in parameters:
# edit label of torrent
if torrent_id:
sys.stderr.write("Adding label \"{}\" to torrent {}\n".format(self.label, torrent_id))
else:
sys.stderr.write("Adding label \"{}\" to torrent {}\n".format(self.label, infohash))
if self.add_tracker_url != "":
response_xml = self.api("/torrent/label.csp",
{'info_hash': infohash, 'url': self.label})
else:
# remove label
response_xml = self.api("/torrent/label.csp",
{'info_hash': infohash})
if self.xml_mode:
print response_xml
if self.xml_mode:
sys.exit()
return
def peers(self, infohash):
""" Display list of peers, returns XML response.
Currently not implemented.
"""
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
response_xml = self.api("/torrent/peers.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
sys.exit()
return response_xml
def reset(self, infohashes):
""" Reset downloaded, uploaded, ratio counter for torrent(s)
"""
self.list_update()
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = infohash
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
sys.stderr.write("Resetting torrent: {}\n".format(torrent_id))
response_xml = self.api("/torrent/reset.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
continue
return
def start(self, infohashes):
""" Start torrent(s)
"""
self.list_update()
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = infohash
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
sys.stderr.write("Starting torrent: {}\n".format(torrent_id))
response_xml = self.api("/torrent/start.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
continue
return
def stop(self, infohashes):
""" Stop torrent(s)
"""
self.list_update()
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = infohash
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
sys.stderr.write("Stopping torrent: {}\n".format(torrent_id))
response_xml = self.api("/torrent/stop.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
continue
return
def delete(self, infohashes):
""" Delete torrent(s)
"""
self.list_update()
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = infohash
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
sys.stderr.write("Deleting torrent: {}\n".format(torrent_id))
response_xml = self.api("/torrent/delete.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
continue
return
def delete_stopped(self):
""" Delete all torrents in a stopped state
"""
#self.list_stopped_only = True
stopped_torrents = []
self.list_update()
for torrent in self.torrents:
#self.pretty_print(torrent)
if torrent.getElementsByTagName('status')[0].firstChild.nodeValue != "stopped":
# skip anything that is not stopped
continue
# Make list of stopped torrents, we'll come back to this later
stopped_torrents.append(torrent)
# From out list of stopped torrents, delete each one
for torrent in stopped_torrents:
sys.stderr.write("Deleting {} {}\n".format(Fore.RED + torrent.getAttribute('id') + Fore.RESET, torrent.getElementsByTagName('name')[0].firstChild.nodeValue))
torrent_hash = torrent.getElementsByTagName('info_hash')[0].firstChild.nodeValue
self.delete(torrent_hash)
return
def download_links_renew(self, infohashes):
""" Regenerate download links for torrent
"""
self.list_update()
infohashes = self.expand(infohashes)
for infohash in infohashes:
torrent_id = infohash
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
sys.stderr.write("Renewing links for torrent: {}\n".format(torrent_id))
response_xml = self.api("/torrent/links/create.csp", {'info_hash': infohash, 'force': 1})
if self.xml_mode:
print response_xml
continue
return
def files(self, infohash):
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
self.bitfield(infohash) # check bitfield for piece information
response_xml = self.api("/torrent/files.csp", {'info_hash': infohash})
if self.xml_mode:
print response_xml
sys.exit()
rows = minidom.parseString(self.xml_response).getElementsByTagName("row")
#sys.stderr.write("Number of files: " + str(len(rows)) + "\n")
for row in rows:
try:
url = urllib.unquote(row.getElementsByTagName('url')[0].firstChild.nodeValue)
except AttributeError:
url = ""
# check whether individual files are completed or not, byt referencing against bitfield
start_piece = int(row.getElementsByTagName('start_piece')[0].firstChild.nodeValue)
end_piece = int(row.getElementsByTagName('end_piece')[0].firstChild.nodeValue)
pieces_complete = 1
for p in range(start_piece, end_piece):
if jsi.bitfield_array[p] != 1:
pieces_complete = 0
data = (
row.getElementsByTagName('torrent_offset')[0].firstChild.nodeValue,
row.getElementsByTagName('start_piece')[0].firstChild.nodeValue,
row.getElementsByTagName('start_piece_offset')[0].firstChild.nodeValue,
row.getElementsByTagName('end_piece')[0].firstChild.nodeValue,
row.getElementsByTagName('end_piece_offset')[0].firstChild.nodeValue,
urllib.unquote(row.getElementsByTagName('path')[0].firstChild.nodeValue),
row.getElementsByTagName('size_as_bytes')[0].firstChild.nodeValue,
row.getElementsByTagName('total_downloaded_as_bytes')[0].firstChild.nodeValue,
url,
str(pieces_complete)
)
# store it internally too
jsi.file_attr.append(data)
return response_xml
def files_csv(self, infohash=None):
# Displayed detailed csv on file information
if infohash:
self.files(infohash)
writer = csv.writer(sys.stdout, dialect='excel')
#csvout = io.StringIO()
#writer = csv.writer(csvout, dialect='excel')
# write heading
writer.writerow([
'torrent_offset',
'start_piece',
'start_piece_offset',
'end_piece',
'end_piece_offset',
'path',
'size_as_bytes',
'total_downloaded_as_bytes',
'url',
'piece_complete' # is the piece complete? 0 or 1
])
# extra blank lines written to stdout on Windows, need to fix!
# can fix by opening stdout as "wb", but not sure if this is possible?
for file in jsi.file_attr:
writer.writerow(file)
#print csvout.getvalue()
def files_pretty(self, infohash):
# Display pretty coloured file info (file path, name and sizes
if infohash:
self.files(infohash)
for file in jsi.file_attr:
# must use index numbers, see self.files_csv for positions
# all fields are string
if file[9] == "1":
# file complete
print "{} {}".format(file[5], Fore.GREEN + file[6] + Fore.RESET)
else:
# file incomplete
print "{} {}".format(file[5], Fore.RED + int(file[6]) + Fore.RESET)
def files_xml(self, infohash):
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
return
response_xml = self.api("/torrent/files.csp", {'info_hash': infohash})
return response_xml
def download_links(self, infohashes):
""" Get download links for infohash or ID number.
Return list of direct download urls.
"""
# grab list info, so we can get the torrent name
self.list_update()
url_list = []
infohashes = self.expand(infohashes)
for infohash in infohashes:
if len(infohash) != 40:
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
self.api("/torrent/files.csp", {'info_hash': infohash})
if self.xml_mode:
print self.xml_response
continue
downloads = minidom.parseString(self.xml_response).getElementsByTagName("row")
self.file_data = downloads
for download in downloads:
try:
url_list.append(urllib.unquote(download.getElementsByTagName('url')[0].firstChild.nodeValue))
except AttributeError:
# No download link for this file
pass
if self.xml_mode:
sys.exit()
return url_list
@staticmethod
def expand(original):
""" Expands ['5..8'] to ['5','6','7','8']
"""
if not isinstance(original, list):
original = [original]
if len(original) == 1:
result = re.match("([0-9]+)\.\.([0-9]+)", original[0])
if result:
matched = result.groups()
if len(matched) == 2:
output = []
for n in range(int(matched[0]), int(matched[1])+1):
output.append(str(n))
return output
else:
return original
else:
return original
else:
# no change
return original
def aria2_script(self, infohashes, options=None):
""" Generate a aria2 download script for selected infohash or id number
"""
infohashes = self.expand(infohashes)
self.list_update()
# grab *all* download links
xml_download_links_all = self.api("/links/list.csp")
for infohash in infohashes:
# get info hash
if len(infohash) != 40:
torrent_id = infohash
infohash = self.id_to_infohash(infohash)
if not infohash:
continue
else:
torrent_id = infohash
# loop through each row of link list output
link_rows = minidom.parseString(jsi.xml_response).getElementsByTagName("row")
for row in link_rows:
link_infohash = row.getElementsByTagName('info_hash')[0].firstChild.nodeValue
# check for missing infohash
if link_infohash == infohash:
# found a matching file for the selected infohash
filename = row.getElementsByTagName('file_name')[0].firstChild.nodeValue # not used
url = self.urldecode_to_ascii(row.getElementsByTagName('url')[0].firstChild.nodeValue)
# get torrent name
name = self.infohash_to_name[infohash]
# prepare aria2 command line
aria2_options = self.aria2_options
file_path = self.urldecode_to_ascii(re.sub('https://download.justseed\.it/.{40}/', '', url))
subdir_name = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
output_dir = self.output_dir + subdir_name
if self.aria2_log:
aria2_options = aria2_options + " --log=\"" + self.aria2_logfile + "\" --log-level=notice"
print "aria2c {} -d \"{}\" -o \"{}\" \"{}\"".format(aria2_options, output_dir, file_name, url)
continue # go to next infohash
return
#def info_map(self):
# self.list_update()
# print " ID INFOHASH"
# for torrent_id, torrent in self.torrents.items():
# print "{:>3} {}".format(torrent_id, torrent['info_hash'])
@staticmethod
def glob_expand(arguments):
""" On Windows console, it does not glob *.torrent - so lets do it manually
"""
globbed_list = []
for item in arguments:
items = glob.glob(item)
if len(items) == 0:
sys.stderr.write("Could not find file '{0}'".format(item))
continue
for x in items:
globbed_list.append(x)
return globbed_list
def add_magnet(self, magnets):
""" Add magnet links defined in list 'magnets'.
Doesn't return anything
"""
# if argument is single line string, turn it into a single element list
if isinstance(magnets,str):
single_magnet = magnets
magnets = []
magnets.append(single_magnet)
for magnet in magnets:
sys.stderr.write("Adding magnet link \"{}\" with ratio {}\n".format(magnet, self.ratio))
# Check magnet data is valid
# @todo
response_xml = self.api("/torrent/add.csp", {'maximum_ratio': str(self.ratio), 'url': magnet})
if self.xml_mode:
print response_xml
return
def add_torrent_file(self, filenames):
""" Add .torrent files to system. 'filenames' is a list of filenames.
Doesn't return anything.
"""
if self._globbing:
# We need to manually glob
filenames = self.glob_expand(filenames)
for filename in filenames:
sys.stderr.write("Adding torrent file '{}' with ratio {}\n".format(filename, self.ratio))
try:
f = open(filename, 'rb')
torrent_data = f.read()
except IOError:
sys.stderr.write("Could not open file '{0}'".format(filename))
continue
# Check .torrent file data is valid
try:
bencode.bdecode(torrent_data)
except bencode.BTFailure:
sys.stderr.write("Error: Ignoring '{}', not a valid .torrent file!\n".format(filename))
continue
self.api("/torrent/add.csp", {'torrent_file': torrent_data, 'maximum_ratio': str(self.ratio)})
if self.xml_mode:
print self.xml_response
continue
return
def add_infohash(self, infohashes):
""" Add torrents to system, referenced by infohash
Doesn't return anything.
"""
for infohash in infohashes:
sys.stderr.write("Adding infohash '{}' with ratio {}\n".format(infohash, self.ratio))
# Check infohash is valid string (40 char hexadecimal string)
match = re.search("^[0-9A-Fa-f]{40}$", infohash)
if not match:
sys.stderr.write("Not a valid hash! Skipping \"{}\"...".format(infohash))
continue
self.api("/torrent/add.csp", {'info_hash': infohash, 'maximum_ratio': str(self.ratio)})
if self.xml_mode:
print self.xml_response
continue
return
def list_update(self):
""" Read list information and save in self.torrents
"""
if not self.torrents:
xml_response = self.api("/torrents/list.csp")
if not xml_response:
return
# Make new map
self.id_to_infohash_map = collections.OrderedDict()
# Get all torrent data in xml format
self.torrents = minidom.parseString(xml_response).getElementsByTagName("row")
for torrent in self.torrents:
# Each torrent
self.id_to_infohash_map[torrent.getAttribute('id')] = torrent.getElementsByTagName('info_hash')[0].firstChild.nodeValue
self.infohash_to_name[torrent.getElementsByTagName('info_hash')[0].firstChild.nodeValue] = torrent.getElementsByTagName('name')[0].firstChild.nodeValue
self.data_remaining_as_bytes = minidom.parseString(xml_response).getElementsByTagName("data_remaining_as_bytes")[0].firstChild.nodeValue
self.data_remaining_as_string = minidom.parseString(xml_response).getElementsByTagName("data_remaining_as_string")[0].firstChild.nodeValue
else:
# list already up to date
# don't need to do anything
pass
return self.xml_response
def list_links(self):
xml_response = self.api("/links/list.csp")
if not xml_response:
return
return self.xml_response
def list(self):
""" Show torrents in pretty format
"""
xml_response = self.list_update()
if self.xml_mode:
print xml_response
sys.exit()
# count all listed downloads and uploads
total_downloaded = 0
total_uploaded = 0
# count total bandwidth current being used
total_rate_in = 0
total_rate_out = 0
# count number of items listed
list_count = 0
for torrent in self.torrents:
# 'name' is a urlencoded UTF-8 string
# clean this up, many consoles can't display UTF-8, so lets replace unknown chars
name = self.urldecode_to_ascii(torrent.getElementsByTagName('name')[0].firstChild.nodeValue)
torrent_id = torrent.getAttribute("id")
if self.list_incomplete_only:
if torrent.getElementsByTagName('percentage_as_decimal')[0].firstChild.nodeValue == "100.0":
# skip completed torrents
continue
if self.list_complete_only:
if torrent.getElementsByTagName('percentage_as_decimal')[0].firstChild.nodeValue != "100.0":
# skip completed torrents
continue
if self.list_transfer_only:
if int(torrent.getElementsByTagName('data_rate_in_as_bytes')[0].firstChild.nodeValue) == 0 and \
int(torrent.getElementsByTagName('data_rate_out_as_bytes')[0].firstChild.nodeValue) == 0:
continue
if self.list_stopped_only:
if torrent.getElementsByTagName('status')[0].firstChild.nodeValue != "stopped":
# skip anything that is not stopped
continue
# Print torrent name
print Fore.CYAN + "[" + Fore.RESET + Style.BRIGHT + "{:>3}".format(torrent_id) +\
Style.RESET_ALL + Fore.CYAN + "] {}".format(name) + Fore.RESET
if float(torrent.getElementsByTagName('downloaded_as_bytes')[0].firstChild.nodeValue) == 0:
ratio = 0.0
else:
ratio = float(torrent.getElementsByTagName('uploaded_as_bytes')[0].firstChild.nodeValue) / float(torrent.getElementsByTagName('downloaded_as_bytes')[0].firstChild.nodeValue)
status = torrent.getElementsByTagName('status')[0].firstChild.nodeValue
if status == 'stopped':
# Show progress in RED if stopped
status = Fore.RED + status + Fore.RESET
else:
if torrent.getElementsByTagName('percentage_as_decimal')[0].firstChild.nodeValue != "100.0":
# Show status in GREEN, if progress is under 100%
status = Fore.GREEN + status + Fore.RESET
total_downloaded += int(torrent.getElementsByTagName('downloaded_as_bytes')[0].firstChild.nodeValue)
total_uploaded += int(torrent.getElementsByTagName('uploaded_as_bytes')[0].firstChild.nodeValue)
total_rate_in += int(torrent.getElementsByTagName('data_rate_in_as_bytes')[0].firstChild.nodeValue)
total_rate_out += int(torrent.getElementsByTagName('data_rate_out_as_bytes')[0].firstChild.nodeValue)
# ammend in/out rate to status string
rate_in = int(torrent.getElementsByTagName('data_rate_in_as_bytes')[0].firstChild.nodeValue)
rate_out = int(torrent.getElementsByTagName('data_rate_out_as_bytes')[0].firstChild.nodeValue)
if math.floor(int(rate_in)):
status += " IN:"+Fore.RED+"{}".format(int(rate_in)/1024)+Fore.RESET+"K"
if math.floor(int(rate_out)):
status += " OUT:"+Fore.RED+"{}".format(int(rate_out)/1024)+Fore.RESET+"K"
#if math.floor(int(rate_in)):
# status += " IN:"+Fore.RED+"{}".format(int(rate_in))+Fore.RESET
#if math.floor(int(rate_out)):
# status += " OUT:"+Fore.RED+"{}".format(int(rate_out))+Fore.RESET
print "{:>13} {:>8} {:>12} {:.2f} {:5.2f} {}".format(torrent.getElementsByTagName('size_as_string')[0].firstChild.nodeValue,
torrent.getElementsByTagName('percentage_as_decimal')[0].firstChild.nodeValue + "%",
torrent.getElementsByTagName('elapsed_as_string')[0].firstChild.nodeValue,
ratio,
float(torrent.getElementsByTagName('maximum_ratio_as_decimal')[0].firstChild.nodeValue),
status)
list_count += 1
print ""
print "Listed " + Fore.RED + "{}".format(list_count) + Fore.RESET + " torrents"
print "Downloaded: " + Fore.RED + "{}".format(sizeof_fmt(total_downloaded)) + Fore.RESET +\
" Uploaded: " + Fore.RED + "{}".format(sizeof_fmt(total_uploaded)) + Fore.RESET
print "Download rate: " + Fore.RED + "{}/s".format(sizeof_fmt(total_rate_in)) + Fore.RESET +\
" Upload rate: " + Fore.RED + "{}/s".format(sizeof_fmt(total_rate_out)) + Fore.RESET
print "Quota remaining: " + Fore.RED + "{}".format(sizeof_fmt(int(self.data_remaining_as_bytes))) + Fore.RESET
return
if __name__ == "__main__":
# Set up CLI arguments
parser = argparse.ArgumentParser(prog='jsi.py', description='justseed.it cli client, version ' + JSI_VERSION, epilog='When INFO-HASH is asked as a parameter, a torrent ID may also be used. This corresponding ID number is shown in the first column of the --list output.')
parser.add_argument("--add-tracker", type=str, metavar='TRACKER-URL', help='add tracker (use together with -e)')
parser.add_argument("--aria2", type=str, nargs='*', metavar='INFO-HASH', help='generate aria2 script for downloading files')
parser.add_argument("--aria2-options", type=str, metavar='OPTIONS', help='options to pass to aria2c (default: "{}")'.format(JustSeedIt.DEFAULT_ARIA2_OPTIONS))
parser.add_argument("--aria2-log", action='store_true', help='log aria2 messages to aria2.log, used with --aria2')
parser.add_argument("--api-key", type=str, metavar='APIKEY', help='specify 40-char api key')
parser.add_argument("--bitfield", type=str, metavar='INFO-HASH', help='get bitfield info')
parser.add_argument("--debug", action='store_true', help='debug mode, write log file to debug.log')
parser.add_argument("--delete-tracker", type=str, metavar='TRACKER-URL', help='delete tracker (use together with -e)')
parser.add_argument("--delete", type=str, nargs='*', metavar='INFO-HASH', help='delete torrent')
parser.add_argument("--delete-stopped", action='store_true', help='delete all torrents in a stopped state')
parser.add_argument("--download-links", "--dl", type=str, nargs='*', metavar='INFO-HASH', help='get download links')
parser.add_argument("--download-links-renew", type=str, metavar='INFO-HASH', help='generate all new download links')
parser.add_argument("--dry", action='store_true', help='dry run')
parser.add_argument("-e", "--edit", type=str, nargs='*', metavar='INFO-HASH', help='edit torrent, use with --ratio, --name, --add-tracker or --delete-tracker')
parser.add_argument("--files", type=str, metavar='INFO-HASH', help='display file names and sizes')
parser.add_argument("--files-csv", type=str, metavar='INFO-HASH', help='display detailed file information in csv format')
parser.add_argument("-i", "--info", type=str, metavar='INFO-HASH', help='show info for torrent')
parser.add_argument("--infohash", type=str, nargs='*', metavar='INFO-HASH', help='add torrent by infohash')
#parser.add_argument("--infomap", action='store_true', help='show ID to infohash map')
parser.add_argument("--label", type=str, metavar='LABEL', help='edit labelm set to "" to remove label')
parser.add_argument("-l", "--list", action='store_true', help='list torrents')
parser.add_argument("--list-complete", action='store_true', help='list only complete torrents')
parser.add_argument("--list-incomplete", action='store_true', help='list only incomplete torrents')
parser.add_argument("--list-links", action='store_true', help='list all download links, xml format')
parser.add_argument("--list-stopped", action='store_true', help='list only stopped torrents')
parser.add_argument("--list-transfer", action='store_true', help='list only torrents with data transfer in progress')
parser.add_argument("--list-tags", action='store_true', help=argparse.SUPPRESS)
parser.add_argument("--list-variables", action='store_true', help=argparse.SUPPRESS)
parser.add_argument("-m", "--magnet", type=str, nargs='*', help="add torrent using magnet link", metavar='MAGNET-TEXT')
parser.add_argument("--name", type=str, help='set name (used with -e), set as a empty string "" to reset to default name')
parser.add_argument("--no-compress", action='store_true', help='request api server to not use gzip encoding')
parser.add_argument("-o", "--output-dir", type=str, help='set output dir for aria2 scripts, always use a trailing slash (default: "{}")'.format(JustSeedIt.DEFAULT_DOWNLOAD_DIR))
parser.add_argument("-p", "--pause", action='store_true', help='pause when finished')
parser.add_argument("--peers", type=str, metavar='INFO-HASH', help='get peers info')
parser.add_argument("--pieces", type=str, metavar='INFO-HASH', help='get pieces info')
parser.add_argument("--pieces-aria2", type=str, metavar='INFO-HASH', help='generate aria2 script to download completed pieces')
parser.add_argument("--pieces-sha1sum", type=str, metavar='INFO-HASH', help='generate sha1sums of individual pieces')
parser.add_argument("--reset", type=str, metavar='INFO-HASH', help='reset downloaded and uploaded counter for torrent, will also reset the ratio')
parser.add_argument("-t", "--torrent-file", type=str, nargs='*', metavar='TORRENT-FILE', help='add torrent with .torrent file')
parser.add_argument("--trackers", type=str, metavar='INFO-HASH', help='get trackers info')
parser.add_argument("-r", "--ratio", type=float, help='set maximum ratio, used in conjunction with -t, -m or -e (default: {})'.format(str(JustSeedIt.DEFAULT_RATIO)))
parser.add_argument("--start", type=str, nargs='*', metavar='INFO-HASH', help='start torrent')
parser.add_argument("--stop", type=str, nargs='*', metavar='INFO-HASH', help='stop torrent')
parser.add_argument("-v", "--verbose", action='store_true', help='verbose mode')
parser.add_argument("--version", action='store_true', help='display version number')
parser.add_argument("--xml", action='store_true', help='display result as XML')
parser.add_argument("-z", "--compress", action='store_true', help='request api server to use gzip encoding (default: True)')
# set up coloring with colorama
terminal = os.getenv('TERM')
if terminal == 'rxvt' or terminal == 'xterm':
# Cygwin, xterm emulators
init(autoreset=True, convert=False, strip=False)
else:
# Standard windows console
init(autoreset=True)
args = parser.parse_args()
if args.api_key:
jsi = JustSeedIt(args.api_key)
else:
jsi = JustSeedIt()
if args.debug:
jsi.debug = 1
if args.verbose:
jsi.verbose = True
if args.xml:
jsi.xml_mode = True
if args.no_compress:
jsi.compress = False
else:
jsi.compress = True
if args.dry:
jsi.dry_run = 1
if args.aria2_log:
jsi.aria2_log = True
if args.aria2_options:
jsi.aria2_options = args.aria2_options
if args.output_dir:
# Add trailing slash if missing
if args.output_dir[-1:] != '/':
args.output_dir += '/'
jsi.output_dir = args.output_dir
# parameters which can be edited for a torrent
if args.ratio:
jsi.ratio = args.ratio
jsi.edit_append('ratio')
if args.add_tracker:
jsi.add_tracker_url = args.add_tracker
jsi.edit_append('add_tracker')
if args.delete_tracker:
jsi.delete_tracker_url = args.delete_tracker
jsi.edit_append('delete_tracker')
if args.name or args.name == "":
jsi.name = args.name
jsi.edit_append('name')
if args.label or args.label == "":
jsi.label = args.label
jsi.edit_append('label')
if args.version:
print "Version", JSI_VERSION
sys.exit()
# Perform main actions
# Check for problems first, like conflicting commands
if (args.list_incomplete or args.list_complete) and args.list:
sys.stderr.write("Can not use both --list-incomplete or --list-complete together with --list. Use them on its own.\n")
sys.exit()
if args.list_incomplete and args.list_complete:
sys.stderr.write("Can not use both --list-incomplete and --list-complete in the one command\n")
sys.exit()
# No problems
if args.magnet:
jsi.add_magnet(args.magnet)
elif args.torrent_file:
jsi.add_torrent_file(args.torrent_file)
elif args.infohash:
jsi.add_infohash(args.infohash)
elif args.list:
jsi.list()
elif args.list_incomplete:
# List only incomplete torrents
jsi.list_incomplete_only = True
jsi.list()
elif args.list_complete:
# List only 100% completed torrents
jsi.list_complete_only = True
jsi.list()
elif args.list_stopped:
# List only 100% completed torrents
jsi.list_stopped_only = True
jsi.list()
elif args.list_transfer:
# List only torrents with data transfers in progress
jsi.list_transfer_only = True
jsi.list()
elif args.list_links:
# List only torrents with data transfers in progress
print jsi.list_links()
elif args.info:
jsi.info(args.info)
elif args.edit:
jsi.edit(args.edit)
#elif args.infomap:
# jsi.info_map()
elif args.pieces:
# show pretty list of pieces for this torrent
print "Pieces for " + Style.BRIGHT + "{}".format(args.pieces) + Style.RESET_ALL
jsi.pieces(args.pieces)
rows = minidom.parseString(jsi.xml_response).getElementsByTagName("row")
if len(rows):
print "NUMBER HASH SIZE LINK"
else:
sys.stderr.write("No pieces available for this torrent.\n")
for row in rows:
# hash, number, size, url
print "{} {} {} {}".format(
Fore.WHITE + row.getElementsByTagName('number')[0].firstChild.nodeValue,
Fore.CYAN + row.getElementsByTagName('hash')[0].firstChild.nodeValue,
Fore.GREEN + row.getElementsByTagName('size')[0].firstChild.nodeValue,
Fore.WHITE + urllib.unquote(row.getElementsByTagName('url')[0].firstChild.nodeValue) + "&api_key=" + jsi.api_key + Fore.RESET
)
elif args.pieces_aria2:
jsi.pieces_aria2(args.pieces_aria2)
elif args.pieces_sha1sum:
jsi.pieces_sha1sum(args.pieces_sha1sum)
elif args.start:
jsi.start(args.start)
elif args.stop:
jsi.stop(args.stop)
elif args.reset:
jsi.reset(args.reset)
elif args.delete:
jsi.delete(args.delete)
elif args.delete_stopped:
jsi.delete_stopped()
elif args.bitfield:
param = jsi.expand(args.bitfield)
for infohash in param:
jsi.bitfield(infohash)
elif args.trackers:
param = jsi.expand(args.trackers)
for infohash in param:
print "Trackers for " + Style.BRIGHT + "{}".format(infohash) + Style.RESET_ALL
jsi.trackers(infohash)
rows = minidom.parseString(jsi.xml_response).getElementsByTagName("row")
for row in rows:
print " " + urllib.unquote(row.getElementsByTagName('url')[0].firstChild.nodeValue) +\
" S:" + Style.BRIGHT + Fore.GREEN + row.getElementsByTagName('seeders')[0].firstChild.nodeValue + Style.RESET_ALL +\
" P:" + Style.BRIGHT + Fore.CYAN + row.getElementsByTagName('peers')[0].firstChild.nodeValue + Style.RESET_ALL +\
" L:" + Style.BRIGHT + Fore.WHITE + row.getElementsByTagName('leechers')[0].firstChild.nodeValue + Style.RESET_ALL
elif args.peers:
param = jsi.expand(args.peers)
for infohash in param:
data = jsi.peers(infohash)
peers = minidom.parseString(data).getElementsByTagName("row")
#if len(param):
# print "---"
if len(peers):
print "Connected Peers for " + Style.BRIGHT + "{}".format(infohash) + Style.RESET_ALL + ": {}".format(len(peers))
for peer in peers:
peer_direction = peer.getElementsByTagName('direction')[0].firstChild.nodeValue
peer_ip = peer.getElementsByTagName('ip_address')[0].firstChild.nodeValue
peer_port = peer.getElementsByTagName('port')[0].firstChild.nodeValue
peer_id = jsi.urldecode_to_ascii(peer.getElementsByTagName('peer_id')[0].firstChild.nodeValue)
peer_percentage = peer.getElementsByTagName('percentage_as_decimal')[0].firstChild.nodeValue
if float(peer_percentage) == 100.0:
print "{:>3} {:>5} ".format(peer_direction, peer_id) +\
Fore.GREEN + "{:>6}".format(float(peer_percentage)) + Fore.RESET + "% " +\
Style.BRIGHT + Fore.BLUE + "{}".format(peer_ip) + Fore.RESET + Style.RESET_ALL + ":{}".format(peer_port) + Fore.RESET
else:
print "{:>3} {:>5} ".format(peer_direction, peer_id) +\
Fore.RED + "{:>6}".format(float(peer_percentage)) + Fore.RESET + "% " +\
Style.BRIGHT + Fore.BLUE + "{}".format(peer_ip) + Fore.RESET + Style.RESET_ALL + ":{}".format(peer_port) + Fore.RESET
else:
print "Connected Peers for " + Style.BRIGHT + "{}".format(infohash) + Style.RESET_ALL + ": {}".format(len(peers))
continue
elif args.files:
# display coloured file name and size info
jsi.files_pretty(args.files)
elif args.files_csv:
# write csv output, representing files information
jsi.files_csv(args.files_csv)
elif args.download_links:
urls = jsi.download_links(args.download_links)
for line in urls:
print line
elif args.download_links_renew:
jsi.download_links_renew(args.download_links_renew)
elif args.aria2:
if args.aria2_options:
jsi.aria2_options = args.aria2_options
elif os.getenv('JSI_ARIA2_OPTIONS'):
jsi.aria2_options = os.getenv('JSI_ARIA2_OPTIONS')
jsi.aria2_script(args.aria2)
elif args.list_tags:
jsi.api("/tags/list.csp")
elif args.list_variables:
jsi.api("/variables/list.csp")
else:
parser.print_help()
if args.pause:
raw_input("Press Enter to continue...")
sys.exit()
| |
# -*- coding: utf-8 -*-
"""
DiscoBot the Amazing Chat Companion
"""
import logging, json
from disco import checks
from .config import Config
from .utils import configure_logger, get_destination
import discord, asyncio
from discord.ext import commands
from discord.ext.commands import Context
from discord import Message, Channel, Member, Server, Role
configure_logger("disco", stream=True, level=Config.LOGGING_LEVEL)
configure_logger("discord", stream=False, level=Config.LOGGING_LEVEL)
logger = logging.getLogger("disco")
#class DiscoBot(commands.Bot):
# """DiscoBot the Amazing Chat Companion"""
#
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# missing_token = Config.DISCORD_TOKEN is None
# if missing_token:
# raise AttributeError("Missing token.")
#
# self.lockdown = False
#
# #async def go(self):
# # """Go, go, go"""
# #logger.debug(Config.DISCORD_TOKEN)
# #await self.login(Config.DISCORD_TOKEN)
# # super().run()
#
# def register_extensions(self, extension: [str]):
# """Register the required cogs"""
# logger.info("Loading extension...")
# try:
# for ext in extensions:
# logger.info(" loaded cog: {0}".format(ext))
# self.load_extension(ext)
# except Exception as e:
# logger.error("Error loading extension \'{0}\': {1}".format(ext, e))
desc = """Disco the Amazing Chat Companion.
The number one Discord bot for useless things.
Written by Syntox32 with Python 3.5.1 and the discord.py API wrapper by rapptz
DiscoBot at GitHub: github.com/Syntox32/DiscoBot
Find me on discord @syn
List of commands by category:
"""
extensions = [
"disco.cogs.meme",
"disco.cogs.reddit",
"disco.cogs.misc",
"disco.cogs.tags",
"disco.cogs.mood",
"disco.cogs.music",
]
if Config.DISCORD_TOKEN is None:
raise AttributeError("Missing token.")
bot = commands.Bot(command_prefix=["!", "?"], description=desc)
bot.lockdown = False
def register_extensions(extension: [str]):
"""Register the required cogs"""
logger.info("Loading extension...")
try:
for ext in extensions:
logger.info(" loaded cog: {0}".format(ext))
bot.load_extension(ext)
except Exception as e:
logger.error("Error loading extension \'{0}\': {1}".format(ext, e))
register_extensions(extensions)
@bot.event
async def on_ready():
"""On ready message"""
logger.info("Connected!")
logger.info("Username: {0}".format(bot.user.name))
logger.info("ID: {0}".format(bot.user.id))
bot.commands_executed = 0
bot.verbose = True if Config.LOGGING_LEVEL is logging.DEBUG else False
logger.info("High verbosity set to {}".format(str(bot.verbose)))
@bot.event
async def on_command(command, ctx: Context):
"""Called whenever a command is called"""
bot.commands_executed += 1
dest = get_destination(ctx.message)
logger.info("{0.author.name} in {1}: {0.content}".format(ctx.message, dest))
@bot.event
async def on_message(message: Message):
"""Called when a message is created and sent to a server."""
# If we're in lockdown, just answer to the owner
if bot.lockdown:
if message.author.id != Config.OWNER_ID:
return
# if we override the on message we need to
# make sure the bot sees the message if we want
# any other on_message events to fire
await bot.process_commands(message)
@bot.command(pass_context=True, no_pm=True)
async def id(ctx):
"""Send a message with the user id of the author."""
await bot.say("Your user ID is: {0}".format(ctx.message.author.id))
@bot.command(pass_context=True, no_pm=True, hidden=True)
async def servid(ctx):
"""Send a message with the user id of the author."""
await bot.say("This server ID is: {0}".format(ctx.message.server.id))
@bot.command(pass_context=True, hidden=True)
@checks.is_owner()
async def change_game_status(ctx, game : str):
"""Change the playing status to a given label"""
# if the name is equal to None, the playing status is removed
if game == "":
game = None
await ctx.bot.change_status(discord.Game(name=game))
@bot.command(name="lockdown", pass_context=True, hidden=True)
@checks.is_owner()
async def _lockdown(ctx):
"""Locks down the bot to only respond to commands from the owner"""
bot.lockdown = not bot.lockdown
if bot.lockdown:
await bot.say("I'm now in lockdown mode.")
else:
await bot.say("I'm now out of lockdown mode.")
@bot.command(pass_context=True, hidden=True)
@checks.is_owner()
async def verbose(ctx):
"""Toggle logging level between info and debug."""
if bot.verbose:
logger.setLevel(logging.INFO)
bot.verbose = False
await bot.say("Set log verbosity to INFO.")
else:
logger.setLevel(logging.DEBUG)
bot.verbose = True
await bot.say("Set log verbosity to DEBUG.")
@bot.command(pass_context=True, hidden=True, aliases=["quit"])
@checks.is_owner()
async def shutdown(ctx):
bot.say("Sutting down.")
await bot.logout()
@bot.command(pass_context=True, hidden=True)
@checks.is_owner()
async def debug(ctx, *, code : str):
"""Evaluates code"""
# Shamelessly stolen from Danny
code = code.strip('` ')
python = '```py\n{}\n```'
result = None
try:
result = eval(code)
except Exception as e:
await bot.say(python.format(type(e).__name__ + ': ' + str(e)))
return
if asyncio.iscoroutine(result):
result = await result
await bot.say(python.format(result))
@bot.command(pass_context=True, no_pm=True, hidden=True)
@checks.is_owner()
async def rescue(ctx, count : int):
"""Saves the the last n messages from the log in a file"""
with open("shitsave.txt", "w") as f:
async for message in bot.logs_from(ctx.message.channel, limit=count):
obj = {
"auth": message.author.name,
"msg": message.content
}
f.write(json.JSONEncoder().encode(obj)+ "\n")
@bot.event
async def on_command_error(error, ctx):
"""Called when a command raises an error"""
if isinstance(error, commands.NoPrivateMessage):
await ctx.bot.say("This command cannot be used in private messages.")
# Other events, uncomment as needed
# Having them uncommented all the time might
# cause some wierd behaviour with overrides sometimes(?)
#@bot.event
#async def on_error():
# """Override normal error handling behaviour"""
# pass
#@bot.event
#async def on_channel_update(before: Channel, after: Channel):
# """Called whenever a channel is updated. e.g. changed name, topic, permissions."""
# msg = "Channel changed name from {0} to {1}".format(before.name, after.name)
# await self.bot.send_message(after, msg)
#@bot.event
#async def on_member_update(before: Member, after: Member):
# """Called when a Member updates their profile."""
# pass
#@bot.event
#async def on_server_role_update(before: Role, after: Role):
# """Called when a Role is changed server-wide."""
# pass
#@bot.event
#async def on_voice_state_update(before: Member, after: Member):
# """Called when a Member changes their voice state."""
# pass
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{incremental.update}.
"""
from __future__ import division, absolute_import
import sys
import os
import datetime
from twisted.python.filepath import FilePath
from twisted.python.compat import NativeStringIO
from twisted.trial.unittest import TestCase
from incremental.update import _run, run
class NonCreatedUpdateTests(TestCase):
def setUp(self):
self.srcdir = FilePath(self.mktemp())
self.srcdir.makedirs()
packagedir = self.srcdir.child('inctestpkg')
packagedir.makedirs()
packagedir.child('__init__.py').setContent(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 'NEXT', 0, 0).short()
next_released_version = "inctestpkg NEXT"
""")
self.getcwd = lambda: self.srcdir.path
self.packagedir = packagedir
class Date(object):
year = 2016
month = 8
self.date = Date()
def test_create(self):
"""
`incremental.update package --create` initialises the version.
"""
self.assertFalse(self.packagedir.child("_version.py").exists())
out = []
_run('inctestpkg', path=None, newversion=None, patch=False, rc=False,
dev=False, create=True, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertTrue(self.packagedir.child("_version.py").exists())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 16, 8, 0)
__all__ = ["__version__"]
''')
class MissingTests(TestCase):
def setUp(self):
self.srcdir = FilePath(self.mktemp())
self.srcdir.makedirs()
self.srcdir.child('srca').makedirs()
packagedir = self.srcdir.child('srca').child('inctestpkg')
packagedir.makedirs()
packagedir.child('__init__.py').setContent(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 'NEXT', 0, 0).short()
next_released_version = "inctestpkg NEXT"
""")
packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3)
__all__ = ["__version__"]
""")
self.getcwd = lambda: self.srcdir.path
self.packagedir = packagedir
class Date(object):
year = 2016
month = 8
self.date = Date()
def test_path(self):
"""
`incremental.update package --dev` raises and quits if it can't find
the package.
"""
out = []
with self.assertRaises(ValueError):
_run(u'inctestpkg', path=None, newversion=None,
patch=False, rc=False, dev=True, create=False,
_date=self.date, _getcwd=self.getcwd, _print=out.append)
class CreatedUpdateInSrcTests(TestCase):
def setUp(self):
self.srcdir = FilePath(self.mktemp())
self.srcdir.makedirs()
self.srcdir.child('src').makedirs()
packagedir = self.srcdir.child('src').child('inctestpkg')
packagedir.makedirs()
packagedir.child('__init__.py').setContent(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 'NEXT', 0, 0).short()
next_released_version = "inctestpkg NEXT"
""")
packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3)
__all__ = ["__version__"]
""")
self.getcwd = lambda: self.srcdir.path
self.packagedir = packagedir
class Date(object):
year = 2016
month = 8
self.date = Date()
def test_path(self):
"""
`incremental.update package --path=<path> --dev` increments the dev
version of the package on the given path
"""
out = []
_run(u'inctestpkg', path=None, newversion=None,
patch=False, rc=False, dev=True, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertTrue(self.packagedir.child("_version.py").exists())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, dev=0)
__all__ = ["__version__"]
''')
_run(u'inctestpkg', path=None, newversion=None,
patch=False, rc=False, dev=True, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertTrue(self.packagedir.child("_version.py").exists())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, dev=1)
__all__ = ["__version__"]
''')
class CreatedUpdateTests(TestCase):
maxDiff = None
def setUp(self):
self.srcdir = FilePath(self.mktemp())
self.srcdir.makedirs()
packagedir = self.srcdir.child('inctestpkg')
packagedir.makedirs()
packagedir.child('__init__.py').setContent(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 'NEXT', 0, 0).short()
next_released_version = "inctestpkg NEXT"
""")
packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3)
__all__ = ["__version__"]
""")
self.getcwd = lambda: self.srcdir.path
self.packagedir = packagedir
class Date(object):
year = 2016
month = 8
self.date = Date()
def test_path(self):
"""
`incremental.update package --path=<path> --dev` increments the dev
version of the package on the given path
"""
out = []
_run(u'inctestpkg', path=self.packagedir.path, newversion=None,
patch=False, rc=False, dev=True, create=False, _date=self.date,
_print=out.append)
self.assertTrue(self.packagedir.child("_version.py").exists())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, dev=0)
__all__ = ["__version__"]
''')
def test_dev(self):
"""
`incremental.update package --dev` increments the dev version.
"""
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=False, rc=False,
dev=True, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertTrue(self.packagedir.child("_version.py").exists())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, dev=0)
__all__ = ["__version__"]
''')
def test_patch(self):
"""
`incremental.update package --patch` increments the patch version.
"""
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=True, rc=False,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 4)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 1, 2, 4).short()
next_released_version = "inctestpkg 1.2.4"
""")
def test_patch_with_prerelease_and_dev(self):
"""
`incremental.update package --patch` increments the patch version, and
disregards any old prerelease/dev versions.
"""
self.packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, release_candidate=1, dev=2)
__all__ = ["__version__"]
""")
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=True, rc=False,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 4)
__all__ = ["__version__"]
''')
def test_rc_patch(self):
"""
`incremental.update package --patch --rc` increments the patch
version and makes it a release candidate.
"""
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=True, rc=True,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 4, release_candidate=1)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 1, 2, 4, release_candidate=1).short()
next_released_version = "inctestpkg 1.2.4rc1"
""")
def test_rc_with_existing_rc(self):
"""
`incremental.update package --rc` increments the rc version if the
existing version is an rc, and discards any dev version.
"""
self.packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, release_candidate=1, dev=2)
__all__ = ["__version__"]
""")
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=False, rc=True,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, release_candidate=2)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 1, 2, 3, release_candidate=2).short()
next_released_version = "inctestpkg 1.2.3rc2"
""")
def test_rc_with_no_rc(self):
"""
`incremental.update package --rc`, when the package is not a release
candidate, will issue a new major/minor rc, and disregards the micro
and dev.
"""
self.packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, dev=2)
__all__ = ["__version__"]
""")
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=False, rc=True,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 16, 8, 0, release_candidate=1)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 16, 8, 0, release_candidate=1).short()
next_released_version = "inctestpkg 16.8.0rc1"
""")
def test_full_with_rc(self):
"""
`incremental.update package`, when the package is a release
candidate, will issue the major/minor, sans release candidate or dev.
"""
out = []
_run(u'inctestpkg', path=None, newversion=None, patch=False, rc=True,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 16, 8, 0, release_candidate=1)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 16, 8, 0, release_candidate=1).short()
next_released_version = "inctestpkg 16.8.0rc1"
""")
_run(u'inctestpkg', path=None, newversion=None, patch=False, rc=False,
dev=False, create=False, _date=self.date, _getcwd=self.getcwd,
_print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 16, 8, 0)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 16, 8, 0).short()
next_released_version = "inctestpkg 16.8.0"
""")
def test_full_without_rc(self):
"""
`incremental.update package`, when the package is NOT a release
candidate, will raise an error.
"""
out = []
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=False,
rc=False, dev=False, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(
e.exception.args[0],
"You need to issue a rc before updating the major/minor")
def test_no_mix_newversion(self):
"""
The `--newversion` flag can't be mixed with --patch, --rc, or --dev.
"""
out = []
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion="1", patch=True,
rc=False, dev=False, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --newversion")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion="1", patch=False,
rc=True, dev=False, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --newversion")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion="1", patch=False,
rc=False, dev=True, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --newversion")
def test_no_mix_dev(self):
"""
The `--dev` flag can't be mixed with --patch, or --rc.
"""
out = []
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=True,
rc=False, dev=True, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --dev")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=False,
rc=True, dev=True, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --dev")
def test_no_mix_create(self):
"""
The `--create` flag can't be mixed with --patch, --rc, --dev, or
--newversion.
"""
out = []
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=True,
rc=False, dev=False, create=True, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --create")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion="1", patch=False,
rc=False, dev=False, create=True, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --create")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=False,
rc=True, dev=False, create=True, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --create")
with self.assertRaises(ValueError) as e:
_run(u'inctestpkg', path=None, newversion=None, patch=False,
rc=False, dev=True, create=True, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(e.exception.args[0], "Only give --create")
def test_newversion(self):
"""
`incremental.update package --newversion=1.2.3rc1dev3`, will set that
version in the package.
"""
out = []
_run(u'inctestpkg', path=None, newversion="1.2.3rc1dev3", patch=False,
rc=False, dev=False, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3, release_candidate=1, dev=3)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 1, 2, 3, """
b"""release_candidate=1, dev=3).short()
next_released_version = "inctestpkg 1.2.3rc1dev3"
"""))
def test_newversion_bare(self):
"""
`incremental.update package --newversion=1`, will set that
version in the package.
"""
out = []
_run(u'inctestpkg', path=None, newversion="1", patch=False,
rc=False, dev=False, create=False, _date=self.date,
_getcwd=self.getcwd, _print=out.append)
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 1, 0, 0)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 1, 0, 0).short()
next_released_version = "inctestpkg 1.0.0"
""")
class ScriptTests(TestCase):
def setUp(self):
self.srcdir = FilePath(self.mktemp())
self.srcdir.makedirs()
self.srcdir.child('src').makedirs()
packagedir = self.srcdir.child('src').child('inctestpkg')
packagedir.makedirs()
packagedir.child('__init__.py').setContent(b"""
from incremental import Version
introduced_in = Version('inctestpkg', 'NEXT', 0, 0).short()
next_released_version = "inctestpkg NEXT"
""")
packagedir.child('_version.py').setContent(b"""
from incremental import Version
__version__ = Version('inctestpkg', 1, 2, 3)
__all__ = ["__version__"]
""")
self.getcwd = lambda: self.srcdir.path
self.packagedir = packagedir
class Date(object):
year = 2016
month = 8
class DateModule(object):
def today(self):
return Date()
self.date = DateModule()
def test_run(self):
"""
Calling run() with no args will cause it to print help.
"""
stringio = NativeStringIO()
self.patch(sys, 'stdout', stringio)
with self.assertRaises(SystemExit) as e:
run(["--help"])
self.assertEqual(e.exception.args[0], 0)
self.assertIn("Show this message and exit", stringio.getvalue())
def test_insufficient_args(self):
"""
Calling run() with no args will cause it to print help.
"""
stringio = NativeStringIO()
self.patch(sys, 'stdout', stringio)
self.patch(os, 'getcwd', self.getcwd)
self.patch(datetime, 'date', self.date)
with self.assertRaises(SystemExit) as e:
run(["inctestpkg", "--rc"])
self.assertEqual(e.exception.args[0], 0)
self.assertIn("Updating codebase", stringio.getvalue())
self.assertEqual(self.packagedir.child("_version.py").getContent(),
b'''"""
Provides inctestpkg version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update inctestpkg` to change this file.
from incremental import Version
__version__ = Version('inctestpkg', 16, 8, 0, release_candidate=1)
__all__ = ["__version__"]
''')
self.assertEqual(self.packagedir.child("__init__.py").getContent(),
b"""
from incremental import Version
introduced_in = Version('inctestpkg', 16, 8, 0, release_candidate=1).short()
next_released_version = "inctestpkg 16.8.0rc1"
""")
| |
import csv
import io
import os
import re
import sys
from national_voter_file.transformers.base import (DATA_DIR,
BasePreparer,
BaseTransformer)
import usaddress
__all__ = ['default_file', 'StatePreparer', 'StateTransformer']
default_file = 'voters.txt'
class StatePreparer(BasePreparer):
state_path = 'ut' # Two letter code for state
state_name = 'Utah' # Name of state with no spaces. Use CamelCase
sep = ',' # The character used to delimit records
def __init__(self, input_path, *args, **kwargs):
super(StatePreparer, self).__init__(input_path, *args, **kwargs)
if not self.transformer:
self.transformer = StateTransformer()
def process(self):
fp = open(self.input_path, 'r', encoding='utf-8-sig')
reader = self.dict_iterator(fp)
for row in reader:
yield row
class StateTransformer(BaseTransformer):
date_format = '%m/%d/%Y' # The format used for dates
input_fields = None # This can be a list of column names for the input file.
# Use None if the file has headers
col_map = {
'TITLE': None,
'FIRST_NAME': 'First Name',
'MIDDLE_NAME': 'Middle Name',
'LAST_NAME': 'Last Name',
'NAME_SUFFIX': 'Name Suffix',
'GENDER': None,
'RACE': None,
'BIRTH_STATE': None,
'LANGUAGE_CHOICE': None,
'EMAIL': None,
'PHONE': 'Phone',
'DO_NOT_CALL_STATUS': None,
'ABSENTEE_TYPE': 'Absentee',
'CONGRESSIONAL_DIST': 'Congressional',
'UPPER_HOUSE_DIST': 'State Senate',
'LOWER_HOUSE_DIST': 'State House',
'SCHOOL_BOARD_DIST': 'State Schoolboard',
'COUNTY_BOARD_DIST': None,
'COUNTYCODE': 'County ID',
'COUNTY_VOTER_REF': None,
'PRECINCT': 'Precinct',
'PRECINCT_SPLIT': None,
}
ut_party_map = {
# Commented values appeared in the data file but couldn't be mapped
'Republican': 'REP',
'Unaffiliated': 'UN',
'Democratic': 'DEM',
'Libertarian': 'LIB',
'Independent American': 'AI',
'Constitution': 'AMC',
'Independent': 'UN',
'Other': 'UN',
'Green': 'GRN',
'Personal Choice': 'PCP',
'Americans Elect': 'AE',
'Reform': 'REF',
'Natural Law': 'NLP',
'Socialist Workers': 'SWP',
'Socialist': 'SP',
'Utah Justice Party': 'UJP',
'U.S. Taxpayers': 'TAX',
'Peace and Freedom': 'PF',
'Independent Patriot Party Of Utah': 'IPU',
'Independent Patriot Party of Utah': 'IPU',
'Desert Greens': 'GPU',
#'American': '',
#'Populist': '',
#'Independents for Economic Recovery': '',
}
col_type_dict = BaseTransformer.col_type_dict.copy()
# File contains some missing First Name values
col_type_dict['FIRST_NAME'] = set([str, type(None)])
col_type_dict['PRECINCT_SPLIT'] = set([str, type(None)])
#### Demographics methods ##################################################
def extract_birthdate(self, input_columns):
"""
Inputs:
input_columns: name or list of columns
Outputs:
Dictionary with following keys
'BIRTHDATE'
"""
dob = None
try:
dob = self.convert_date(input_columns['DOB'])
except ValueError:
# Some rows have invalid date values
pass
return {
'BIRTHDATE': dob,
'BIRTHDATE_IS_ESTIMATE': 'N',
}
#### Address methods #######################################################
def extract_registration_address(self, input_dict):
"""
Relies on the usaddress package.
Call the self.convert_usaddress_dict() method on the output of
usaddress.tag. We provide example code in the method to make this clear.
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'ADDRESS_NUMBER'
'ADDRESS_NUMBER_PREFIX'
'ADDRESS_NUMBER_SUFFIX'
'BUILDING_NAME'
'CORNER_OF'
'INTERSECTION_SEPARATOR'
'LANDMARK_NAME'
'NOT_ADDRESS'
'OCCUPANCY_TYPE'
'OCCUPANCY_IDENTIFIER'
'PLACE_NAME'
'STATE_NAME'
'STREET_NAME'
'STREET_NAME_PRE_DIRECTIONAL'
'STREET_NAME_PRE_MODIFIER'
'STREET_NAME_PRE_TYPE'
'STREET_NAME_POST_DIRECTIONAL'
'STREET_NAME_POST_MODIFIER'
'STREET_NAME_POST_TYPE'
'SUBADDRESS_IDENTIFIER'
'SUBADDRESS_TYPE'
'USPS_BOX_GROUP_ID'
'USPS_BOX_GROUP_TYPE'
'USPS_BOX_ID'
'USPS_BOX_TYPE'
'ZIP_CODE'
"""
# columns to create address, in order
address_components = [
'House Number', 'House Number Suffix', 'Direction Prefix', 'Street', 'Direction Suffix',
'Street Type', 'Unit Type', 'Unit Number',
]
# create address string for usaddress.tag
address_str = ' '.join([
input_dict[x] for x in address_components if input_dict[x] is not None
])
raw_dict = {
'RAW_ADDR1': address_str,
'RAW_ADDR2': address_str,
'RAW_CITY': input_dict['City'],
'RAW_ZIP': input_dict['Zip'],
}
# use the usaddress_tag method to handle errors
usaddress_dict, usaddress_type = self.usaddress_tag(address_str)
# use the convert_usaddress_dict to get correct column names
# and fill in missing values
if usaddress_dict:
converted = self.convert_usaddress_dict(usaddress_dict)
converted['VALIDATION_STATUS'] = '2' # ??
else:
converted = self.constructEmptyResidentialAddress()
converted['VALIDATION_STATUS'] = '1'
converted.update(raw_dict)
converted['STATE_NAME'] = 'UT'
return converted
def extract_mailing_address(self, input_dict):
"""
Relies on the usaddress package.
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'MAIL_ADDRESS_LINE1'
'MAIL_ADDRESS_LINE2'
'MAIL_CITY'
'MAIL_STATE'
'MAIL_ZIP_CODE'
'MAIL_COUNTRY'
"""
columns = ['Mailing Address']
mail_str = ' '.join([x for x in columns])
usaddress_dict, usaddress_type = self.usaddress_tag(mail_str)
city = state = zipcode = None
city_state_zip = input_dict['Mailing city, state zip']
try:
if ',' in city_state_zip:
city, state_zip = input_dict['Mailing city, state zip'].split(',')
state_zip = state_zip.split()
if len(state_zip) == 2:
state, zipcode = state_zip
elif state_zip:
state = None
zipcode = state_zip[-1]
except ValueError:
# Some rows have weird values for this field
pass
return {
'MAIL_ADDRESS_LINE1': self.construct_mail_address_1(
usaddress_dict,
usaddress_type,
),
'MAIL_ADDRESS_LINE2': self.construct_mail_address_2(usaddress_dict),
'MAIL_CITY': city,
'MAIL_ZIP_CODE': zipcode,
'MAIL_STATE': state,
'MAIL_COUNTRY': 'US',
}
#### Political methods #####################################################
def extract_state_voter_ref(self, input_dict):
return {'STATE_VOTER_REF' : 'UT' + input_dict['Voter ID']}
def extract_registration_date(self, input_columns):
"""
Inputs:
input_columns: name or list of columns
Outputs:
Dictionary with following keys
'REGISTRATION_DATE'
"""
d = None
try:
d = self.convert_date(input_columns['Registration Date'])
except ValueError:
# Some rows have invalid date values
pass
return {'REGISTRATION_DATE': d}
def extract_registration_status(self, input_columns):
"""
Inputs:
input_columns: name or list of columns
Outputs:
Dictionary with following keys
'REGISTRATION_STATUS'
"""
return {'REGISTRATION_STATUS': input_columns['Status']}
def extract_party(self, input_columns):
"""
Inputs:
input_columns: name or list of columns
Outputs:
Dictionary with following keys
'PARTY'
"""
party = input_columns['Party']
return {'PARTY': self.ut_party_map.get(party)}
| |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2021 Greg Davill <greg.davill@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
# Build/Use:
# ./gsd_butterstick.py --uart-name=crossover --with-etherbone --csr-csv=csr.csv --build --load
# litex_server --udp
# litex_term crossover
import os
import sys
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import butterstick
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litex.soc.cores.gpio import GPIOTristate
from litedram.modules import MT41K64M16,MT41K128M16,MT41K256M16,MT41K512M16
from litedram.phy import ECP5DDRPHY
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
# CRG ---------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
self.stop = Signal()
self.reset = Signal()
# Clk / Rst
clk30 = platform.request("clk30")
rst_n = platform.request("user_btn", 0)
# Power on reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(clk30)
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# PLL
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst)
pll.register_clkin(clk30, 30e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 25e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.reset,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset),
AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset),
]
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, revision="1.0", device="85F", sdram_device="MT41K64M16", sys_clk_freq=int(60e6),
toolchain="trellis", with_ethernet=False, with_etherbone=False, eth_ip="192.168.1.50",
eth_dynamic_ip = False,
with_spi_flash = False,
with_led_chaser = True,
with_syzygy_gpio = True,
**kwargs) :
platform = butterstick.Platform(revision=revision, device=device ,toolchain=toolchain)
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "crossover"
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on ButterStick",
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
available_sdram_modules = {
"MT41K64M16": MT41K64M16,
"MT41K128M16": MT41K128M16,
"MT41K256M16": MT41K256M16,
"MT41K512M16": MT41K512M16,
}
sdram_module = available_sdram_modules.get(sdram_device)
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.comb += self.crg.reset.eq(self.ddrphy.init.reset)
self.add_sdram("sdram",
phy = self.ddrphy,
module = sdram_module(sys_clk_freq, "1:2"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy, dynamic_ip=eth_dynamic_ip)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# SPI Flash --------------------------------------------------------------------------------
if with_spi_flash:
from litespi.modules import W25Q128JV
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=W25Q128JV(Codes.READ_1_1_4), with_master=False)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.comb += platform.request("user_led_color").eq(0b010) # Blue.
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# GPIOs ------------------------------------------------------------------------------------
if with_syzygy_gpio:
platform.add_extension(butterstick.raw_syzygy_io("SYZYGY0"))
self.submodules.gpio = GPIOTristate(platform.request("SYZYGY0"))
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on ButterStick")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--toolchain", default="trellis", help="FPGA toolchain (trellis or diamond).")
parser.add_argument("--sys-clk-freq", default=75e6, help="System clock frequency.")
parser.add_argument("--revision", default="1.0", help="Board Revision (1.0).")
parser.add_argument("--device", default="85F", help="ECP5 device (25F, 45F, 85F).")
parser.add_argument("--sdram-device", default="MT41K64M16", help="SDRAM device (MT41K64M16, MT41K128M16, MT41K256M16 or MT41K512M16).")
ethopts = parser.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Add Ethernet.")
ethopts.add_argument("--with-etherbone", action="store_true", help="Add EtherBone.")
parser.add_argument("--eth-ip", default="192.168.1.50", help="Ethernet/Etherbone IP address.")
parser.add_argument("--eth-dynamic-ip", action="store_true", help="Enable dynamic Ethernet IP addresses setting.")
parser.add_argument("--with-spi-flash", action="store_true", help="Enable SPI Flash (MMAPed).")
sdopts = parser.add_mutually_exclusive_group()
sdopts.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support.")
sdopts.add_argument("--with-sdcard", action="store_true", help="Enable SDCard support.")
parser.add_argument("--with-syzygy-gpio",action="store_true", help="Enable GPIOs through SYZYGY Breakout on Port-A.")
builder_args(parser)
soc_core_args(parser)
trellis_args(parser)
args = parser.parse_args()
assert not (args.with_etherbone and args.eth_dynamic_ip)
soc = BaseSoC(
toolchain = args.toolchain,
revision = args.revision,
device = args.device,
sdram_device = args.sdram_device,
sys_clk_freq = int(float(args.sys_clk_freq)),
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
eth_dynamic_ip = args.eth_dynamic_ip,
with_spi_flash = args.with_spi_flash,
with_syzygy_gpio = args.with_syzygy_gpio,
**soc_core_argdict(args))
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
builder = Builder(soc, **builder_argdict(args))
builder_kargs = trellis_argdict(args) if args.toolchain == "trellis" else {}
builder.build(**builder_kargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.