gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import httplib
import logging
import os
import tempfile
import time
import android_commands
from chrome_test_server_spawner import SpawningServer
import constants
from flag_changer import FlagChanger
from forwarder import Forwarder
import lighttpd_server
import ports
from valgrind_tools import CreateTool
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = '/data/local/tmp/net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device.
A subclass should implement RunTests() with no parameter, so that calling
the Run() method will set up tests, run them and tear them down.
"""
def __init__(self, device, tool, shard_index, build_type):
"""
Args:
device: Tests will run on the device of this ID.
shard_index: Index number of the shard on which the test suite will run.
build_type: 'Release' or 'Debug'.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
self.tool = CreateTool(tool, self.adb)
# Synchronize date/time between host and device. Otherwise same file on
# host and device may have different timestamp which may cause
# AndroidCommands.PushIfNeeded failed, or a test which may compare timestamp
# got from http head and local time could be failed.
self.adb.SynchronizeDateTime()
self._http_server = None
self._forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
self.shard_index = shard_index
self.flags.AddFlags(['--disable-fre'])
self._spawning_server = None
self._spawner_forwarder = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self.build_type = build_type
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.adb.SetFileContents(NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port,
self.test_server_port))
def Run(self):
"""Calls subclass functions to set up tests, run them and tear them down.
Returns:
Test results returned from RunTests().
"""
if not self.HasTests():
return True
self.SetUp()
try:
return self.RunTests()
finally:
self.TearDown()
def SetUp(self):
"""Called before tests run."""
pass
def HasTests(self):
"""Whether the test suite has tests to run."""
return True
def RunTests(self):
"""Runs the tests. Need to be overridden."""
raise NotImplementedError
def TearDown(self):
"""Called when tests finish running."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|CHROME_DIR|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(constants.CHROME_DIR, p),
os.path.join(dest_dir, p))
def LinkSdCardPathsToTempDir(self, paths):
"""Link |paths| which are under sdcard to /data/local/tmp.
For example, the test data '/sdcard/my_data' will be linked to
'/data/local/tmp/my_data'.
Args:
paths: A list of files and directories relative to /sdcard.
"""
links = set()
for path in paths:
link_name = os.path.dirname(path)
assert link_name, 'Linked paths must be in a subdir of /sdcard/.'
link_name = link_name.split('/')[0]
if link_name not in links:
mapped_device_path = '/data/local/tmp/' + link_name
# Unlink the mapped_device_path at first in case it was mapped to
# a wrong path. Add option '-r' becuase the old path could be a dir.
self.adb.RunShellCommand('rm -r %s' % mapped_device_path)
self.adb.RunShellCommand(
'ln -s /sdcard/%s %s' % (link_name, mapped_device_path))
links.add(link_name)
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self.StartForwarderForHttpServer()
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
# Sometimes the forwarder device port may be already used. We have to kill
# all forwarder processes to ensure that the forwarder can be started since
# currently we can not associate the specified port to related pid.
self.adb.KillAll('forwarder')
if self._forwarder:
self._forwarder.Close()
self._forwarder = Forwarder(
self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self.StartForwarder([(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
if self._forwarder or self._spawner_forwarder:
# Kill all forwarders on the device and then kill the process on the host
# (if it exists)
self.adb.KillAll('forwarder')
if self._forwarder:
self._forwarder.Close()
if self._spawner_forwarder:
self._spawner_forwarder.Close()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# Try 3 times to launch test spawner server.
for i in xrange(0, 3):
# Do not allocate port for test server here. We will allocate
# different port for individual test in TestServerThread.
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.test_server_port)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
self._spawner_forwarder = Forwarder(
self.adb,
[(self.test_server_spawner_port, self.test_server_spawner_port)],
self.tool, '127.0.0.1', self.build_type)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.operators.campaign_manager import (
GoogleCampaignManagerDeleteReportOperator, GoogleCampaignManagerDownloadReportOperator,
GoogleCampaignManagerInsertReportOperator, GoogleCampaignManagerRunReportOperator,
)
API_VERSION = "api_version"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleCampaignManagerDeleteReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
op = GoogleCampaignManagerDeleteReportOperator(
profile_id=profile_id,
report_id=report_id,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.delete_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id
)
class TestGoogleCampaignManagerGetReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.http"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.tempfile"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GCSHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerDownloadReportOperator.xcom_push"
)
def test_execute(
self,
xcom_mock,
mock_base_op,
gcs_hook_mock,
hook_mock,
tempfile_mock,
http_mock,
):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
bucket_name = "test_bucket"
report_name = "test_report.csv"
temp_file_name = "TEST"
http_mock.MediaIoBaseDownload.return_value.next_chunk.return_value = (
None,
True,
)
tempfile_mock.NamedTemporaryFile.return_value.__enter__.return_value.name = (
temp_file_name
)
op = GoogleCampaignManagerDownloadReportOperator(
profile_id=profile_id,
report_id=report_id,
file_id=file_id,
bucket_name=bucket_name,
report_name=report_name,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.get_report_file.assert_called_once_with(
profile_id=profile_id, report_id=report_id, file_id=file_id
)
gcs_hook_mock.assert_called_once_with(
google_cloud_storage_conn_id=GCP_CONN_ID, delegate_to=None
)
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=bucket_name,
object_name=report_name + ".gz",
gzip=True,
filename=temp_file_name,
mime_type="text/csv",
)
xcom_mock.assert_called_once_with(
None, key="report_name", value=report_name + ".gz"
)
class TestGoogleCampaignManagerInsertReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerInsertReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report = {"report": "test"}
report_id = "test"
hook_mock.return_value.insert_report.return_value = {"id": report_id}
op = GoogleCampaignManagerInsertReportOperator(
profile_id=profile_id,
report=report,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.insert_report.assert_called_once_with(
profile_id=profile_id, report=report
)
xcom_mock.assert_called_once_with(None, key="report_id", value=report_id)
class TestGoogleCampaignManagerRunReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.BaseOperator"
)
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerRunReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
synchronous = True
hook_mock.return_value.run_report.return_value = {"id": file_id}
op = GoogleCampaignManagerRunReportOperator(
profile_id=profile_id,
report_id=report_id,
synchronous=synchronous,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID, delegate_to=None, api_version=API_VERSION
)
hook_mock.return_value.run_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id, synchronous=synchronous
)
xcom_mock.assert_called_once_with(None, key="file_id", value=file_id)
| |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
import uuid
from .voc_eval import voc_eval
from model.config import cfg
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(cfg.TRAIN.CACHE_PATH, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
folder_name = os.path.dirname(os.path.abspath(filename))
if(not os.path.exists(folder_name)):
os.mkdirs(folder_name)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| |
#!/usr/bin/env python
# $Id$
# -*- coding: utf-8
'''
test libcint
'''
__author__ = "Qiming Sun <osirpt.sun@gmail.com>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm+1,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
nfitid = nbas*2
off += n
bas[nfitid,ATOM_OF ] = 0
bas[nfitid,ANG_OF ] = 0
bas[nfitid,KAPPA_OF] = 0
bas[nfitid,NPRIM_OF] = 1
bas[nfitid,NCTR_OF ] = 1
bas[nfitid,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
nfitid1 = nbas*2 + 1
off += n
bas[nfitid1,ATOM_OF ] = 0
bas[nfitid1,ANG_OF ] = 0
bas[nfitid1,KAPPA_OF] = 0
bas[nfitid1,NPRIM_OF] = 1
bas[nfitid1,NCTR_OF ] = 1
bas[nfitid1,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid1,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
def close(v1, vref, count, place):
return round(abs(v1-vref)/count, place) == 0
def test_int3c2e_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
bas[l,ATOM_OF] = bas[k,ATOM_OF]
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dj*dk*dim
shls = (ctypes.c_int * 4)(i, j, k, l)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print 'Fail:', name, i,j,k
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print "pass: ", name
else:
print "* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref
def sf2spinor(mat, i, j, bas):
import pyscf.symm.cg
import scipy.linalg
assert(mat.ndim == 3)
l1 = bas[i,ANG_OF]
l2 = bas[j,ANG_OF]
d1 = bas[i,NCTR_OF]
d2 = bas[j,NCTR_OF]
u1a, u1b = pyscf.symm.cg.real2spinor(l1)
u2a, u2b = pyscf.symm.cg.real2spinor(l2)
u1a = scipy.linalg.block_diag(*((u1a,)*d1))
u1b = scipy.linalg.block_diag(*((u1b,)*d1))
u2a = scipy.linalg.block_diag(*((u2a,)*d2))
u2b = scipy.linalg.block_diag(*((u2b,)*d2))
u1 = numpy.vstack((u1a,u1b))
u2 = numpy.vstack((u2a,u2b))
m, n, k = mat.shape
matab = numpy.zeros((m*2,n*2,k))
matab[:m,:n,:] = matab[m:,n:,:] = mat
zmat = numpy.einsum('pjk,pi->ijk', matab, u1.conj())
zmat = numpy.einsum('ipk,pj->ijk', zmat, u2)
return zmat
def test_int3c2e_spinor(name, fnref, vref, dim, place):
abas = bas.copy()
abas[:,KAPPA_OF] = 0
c_bas = abas.ctypes.data_as(ctypes.c_void_p)
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
shls = (ctypes.c_int * 4)(i, j, k, l)
opref = numpy.empty((di,dj,dk,dim), order='F')
intoref(opref.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
zmat = sf2spinor(opref[:,:,:,0], i, j, bas)
di = (bas[i,ANG_OF] * 4 + 2) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 4 + 2) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
op = numpy.empty((di,dj,dk,dim), order='F', dtype=numpy.complex)
intor(op.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(zmat, op[:,:,:,0]):
print 'Fail:', name, i,j,k
v1 += abs(numpy.array(op)).sum()
cnt += op.size
if close(v1, vref, cnt, place):
print "pass: ", name
else:
print "* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref
def test_int2c_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
for i in range(nbas.value):
j = nfitid1
bas[j,ATOM_OF] = bas[i,ATOM_OF]
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dk*dim
shls = (ctypes.c_int * 3)(i, j, k)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
shls = (ctypes.c_int * 2)(i, k)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print 'Fail:', name, i,k
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print "pass: ", name
else:
print "* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref
if __name__ == "__main__":
if "--high-prec" in sys.argv:
def close(v1, vref, count, place):
return round(abs(v1-vref), place) == 0
for f in (('cint3c2e_sph', 'cint2e_sph', 1586.350797432699, 1, 10),
('cint3c2e_ip1_sph', 'cint2e_ip1_sph', 2242.052249267909, 3, 10),
('cint3c2e_ip2_sph', 'cint2e_ip2_sph', 1970.982483860059, 3, 10),
):
test_int3c2e_sph(*f)
if "--quick" not in sys.argv:
for f in (('cint3c2e_spinor', 'cint3c2e_sph', 4412.363002831547, 1, 10),
):
test_int3c2e_spinor(*f)
# for f in (('cint2c2e_sph', 'cint2e_sph', 782.3104849606677, 1, 10),
# ('cint2c2e_ip1_sph', 'cint2e_ip1_sph', 394.6515972715189, 3, 10),
# ('cint2c2e_ip2_sph', 'cint2e_ip2_sph', 394.6515972715189, 3, 10),
# ):
# test_int2c2e_sph(*f)
for f in (('cint2c2e_sph', 'cint3c2e_sph', 782.3104849606677, 1, 10),
('cint2c2e_ip1_sph', 'cint3c2e_ip1_sph', 394.6515972715189, 3, 10),
('cint2c2e_ip2_sph', 'cint3c2e_ip2_sph', 394.6515972715189, 3, 10),
('cint1e_ovlp_sph', 'cint3c1e_sph', 288.739411257669, 1, 10),
#('cint1e_kin_sph'*2.0, 'cint3c1e_p2_sph', 1662.148571297274, 1, 10),
('cint1e_r2_origj_sph', 'cint3c1e_r2_origk_sph', 1467.040217557744, 1, 10),
):
test_int2c_sph(*f)
| |
# Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import time
import urllib2
from tempest.common import commands
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
from tempest.services.network import resources as net_resources
from tempest import test
config = config.CONF
class TestLoadBalancerBasic(manager.NetworkScenarioTest):
"""
This test checks basic load balancing.
The following is the scenario outline:
1. Create an instance
2. SSH to the instance and start two servers
3. Create a load balancer with two members and with ROUND_ROBIN algorithm
associate the VIP with a floating ip
4. Send NUM requests to the floating ip and check that they are shared
between the two servers.
"""
@classmethod
def check_preconditions(cls):
super(TestLoadBalancerBasic, cls).check_preconditions()
cfg = config.network
if not test.is_extension_enabled('lbaas', 'network'):
msg = 'LBaaS Extension is not enabled'
raise cls.skipException(msg)
if not (cfg.tenant_networks_reachable or cfg.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(TestLoadBalancerBasic, cls).resource_setup()
cls.check_preconditions()
cls.servers_keypairs = {}
cls.members = []
cls.floating_ips = {}
cls.server_ips = {}
cls.port1 = 80
cls.port2 = 88
cls.num = 50
def setUp(self):
super(TestLoadBalancerBasic, self).setUp()
self.server_ips = {}
self.server_fixed_ips = {}
self._create_security_group_for_test()
self._set_net_and_subnet()
def _set_net_and_subnet(self):
"""
Query and set appropriate network and subnet attributes to be used
for the test. Existing tenant networks are used if they are found.
The configured private network and associated subnet is used as a
fallback in absence of tenant networking.
"""
try:
tenant_net = self._list_networks(tenant_id=self.tenant_id)[0]
except IndexError:
tenant_net = None
if tenant_net:
tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
self.subnet = net_resources.DeletableSubnet(
client=self.network_client,
**tenant_subnet)
self.network = tenant_net
else:
self.network = self._get_network_by_name(
config.compute.fixed_network_name)
# TODO(adam_g): We are assuming that the first subnet associated
# with the fixed network is the one we want. In the future, we
# should instead pull a subnet id from config, which is set by
# devstack/admin/etc.
subnet = self._list_subnets(network_id=self.network['id'])[0]
self.subnet = net_resources.AttributeDict(subnet)
def _create_security_group_for_test(self):
self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
def _create_security_group_rules_for_port(self, port):
rule = {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': port,
'port_range_max': port,
}
self._create_security_group_rule(
secgroup=self.security_group,
tenant_id=self.tenant_id,
**rule)
def _create_server(self, name):
keypair = self.create_keypair()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': self.network['id']},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
net_name = self.network['name']
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.servers_keypairs[server['id']] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self.create_floating_ip(
server, public_network_id)
self.floating_ips[floating_ip] = server
self.server_ips[server['id']] = floating_ip.floating_ip_address
else:
self.server_ips[server['id']] =\
server['addresses'][net_name][0]['addr']
self.server_fixed_ips[server['id']] =\
server['addresses'][net_name][0]['addr']
self.assertTrue(self.servers_keypairs)
return server
def _create_servers(self):
for count in range(2):
self._create_server(name=("server%s" % (count + 1)))
self.assertEqual(len(self.servers_keypairs), 2)
def _start_servers(self):
"""
Start two backends
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
"""
for server_id, ip in self.server_ips.iteritems():
private_key = self.servers_keypairs[server_id]['private_key']
server_name = self.servers_client.get_server(server_id)[1]['name']
username = config.scenario.ssh_user
ssh_client = self.get_remote_client(
server_or_ip=ip,
private_key=private_key)
# Write a backend's response into a file
resp = ('echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n'
'Connection: close\r\nContent-Type: text/html; '
'charset=UTF-8\r\n\r\n%s"; cat >/dev/null')
with tempfile.NamedTemporaryFile() as script:
script.write(resp % server_name)
script.flush()
with tempfile.NamedTemporaryFile() as key:
key.write(private_key)
key.flush()
commands.copy_file_to_host(script.name,
"/tmp/script1",
ip,
username, key.name)
# Start netcat
start_server = ('while true; do '
'sudo nc -l -p %(port)s -e sh /tmp/%(script)s; '
'done &')
cmd = start_server % {'port': self.port1,
'script': 'script1'}
ssh_client.exec_command(cmd)
if len(self.server_ips) == 1:
with tempfile.NamedTemporaryFile() as script:
script.write(resp % 'server2')
script.flush()
with tempfile.NamedTemporaryFile() as key:
key.write(private_key)
key.flush()
commands.copy_file_to_host(script.name,
"/tmp/script2", ip,
username, key.name)
cmd = start_server % {'port': self.port2,
'script': 'script2'}
ssh_client.exec_command(cmd)
def _check_connection(self, check_ip, port=80):
def try_connect(ip, port):
try:
resp = urllib2.urlopen("http://{0}:{1}/".format(ip, port))
if resp.getcode() == 200:
return True
return False
except IOError:
return False
except urllib2.HTTPError:
return False
timeout = config.compute.ping_timeout
start = time.time()
while not try_connect(check_ip, port):
if (time.time() - start) > timeout:
message = "Timed out trying to connect to %s" % check_ip
raise exceptions.TimeoutException(message)
def _create_pool(self):
"""Create a pool with ROUND_ROBIN algorithm."""
self.pool = super(TestLoadBalancerBasic, self)._create_pool(
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet.id)
self.assertTrue(self.pool)
def _create_members(self):
"""
Create two members.
In case there is only one server, create both members with the same ip
but with different ports to listen on.
"""
for server_id, ip in self.server_fixed_ips.iteritems():
if len(self.server_fixed_ips) == 1:
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
member2 = self._create_member(address=ip,
protocol_port=self.port2,
pool_id=self.pool.id)
self.members.extend([member1, member2])
else:
member = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
self.members.append(member)
self.assertTrue(self.members)
def _assign_floating_ip_to_vip(self, vip):
public_network_id = config.network.public_network_id
port_id = vip.port_id
floating_ip = self.create_floating_ip(vip, public_network_id,
port_id=port_id)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
def _create_load_balancer(self):
self._create_pool()
self._create_members()
self.vip = self._create_vip(protocol='HTTP',
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
self.vip.wait_for_status('ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
self.vip_ip = self.floating_ips[
self.vip.id][0]['floating_ip_address']
else:
self.vip_ip = self.vip.address
# Currently the ovs-agent is not enforcing security groups on the
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
# However the linuxbridge-agent does, and it is necessary to add a
# security group with a rule that allows tcp port 80 to the vip port.
self.network_client.update_port(
self.vip.port_id, security_groups=[self.security_group.id])
def _check_load_balancing(self):
"""
1. Send NUM requests on the floating ip associated with the VIP
2. Check that the requests are shared between the two servers
"""
self._check_connection(self.vip_ip)
self._send_requests(self.vip_ip, ["server1", "server2"])
def _send_requests(self, vip_ip, servers):
counters = dict.fromkeys(servers, 0)
for i in range(self.num):
try:
server = urllib2.urlopen("http://{0}/".format(vip_ip)).read()
counters[server] += 1
# HTTP exception means fail of server, so don't increase counter
# of success and continue connection tries
except urllib2.HTTPError:
continue
# Assert that each member of the pool gets balanced at least once
for member, counter in counters.iteritems():
self.assertGreater(counter, 0, 'Member %s never balanced' % member)
@test.services('compute', 'network')
def test_load_balancer_basic(self):
self._create_server('server1')
self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
| |
import re
import sys
import copy
import socket
from datetime import datetime
from decimal import Decimal
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
class FieldValidationError(ValidationError):
"""
validation error that refers to a specific field
Includes `fieldname` and `value` attributes.
"""
def __init__(self, message, fieldname, value):
super(FieldValidationError, self).__init__(message)
self.fieldname = fieldname
self.value = value
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
raise FieldValidationError(
"Value %(value)r of field '%(fieldname)s' is not in "
"'%(format_option)s' format" % locals(), fieldname, value)
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time',
'%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, _int_types + (float, Decimal)):
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
"is not a number" % locals(), fieldname,
value)
if not value > 0:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
" is not a positive number" % locals(),
fieldname, value)
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
socket.inet_aton(value)
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1"
# to "0.0.0.1"
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s'"
"is not a ip-address" % locals(), fieldname,
value)
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
:param disallow_unknown_properties: defaults to False, set to True to
disallow properties not listed in the schema definition
:param apply_default_to_data: defaults to False, set to True to modify the
data in case the schema definition includes a "default" property
'''
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False, disallow_unknown_properties=False,
apply_default_to_data=False):
if format_validators is None:
format_validators = DEFAULT_FORMAT_VALIDATORS.copy()
self._format_validators = format_validators
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.disallow_unknown_properties = disallow_unknown_properties
self.apply_default_to_data = apply_default_to_data
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float, Decimal,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping) or (hasattr(val, 'keys')
and hasattr(val, 'items'))
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc % params
raise FieldValidationError(message, fieldname, value)
def _validate_unknown_properties(self, schema, data, fieldname):
schema_properties = set(schema)
data_properties = set(data)
delta = data_properties - schema_properties
if delta:
unknowns = ''
for x in delta:
unknowns += '"%s", ' % x
unknowns = unknowns.rstrip(", ")
raise SchemaError('Unknown properties for field '
'"%(fieldname)s": %(unknowns)s' %
locals())
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
errorlist = []
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValidationError as err:
errorlist.append(err)
if not datavalid:
self._error("Value %(value)r for field '%(fieldname)s' "
"doesn't match any of %(numsubtypes)d "
"subtypes in %(fieldtype)s; "
"errorlist = %(errorlist)r",
value, fieldname, numsubtypes=len(fieldtype),
fieldtype=fieldtype, errorlist=errorlist)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_%s' %
fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error("Value %(value)r for field '%(fieldname)s' "
"is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, dict):
if isinstance(properties, dict):
if self.disallow_unknown_properties:
self._validate_unknown_properties(properties, value,
fieldname)
for eachProp in properties:
self.__validate(eachProp, value,
properties.get(eachProp))
else:
raise SchemaError("Properties definition of field '%s' is "
"not an object" % fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if (not 'additionalItems' in schema and
len(items) != len(value)):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length "
"of schema list", value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex],
items[itemIndex])
except FieldValidationError as e:
raise type(e)("Failed to validate field '%s' "
"list schema: %s" %
(fieldname, e), fieldname,
e.value)
elif isinstance(items, dict):
for eachItem in value:
if (self.disallow_unknown_properties and
'properties' in items):
self._validate_unknown_properties(
items['properties'], eachItem, fieldname)
try:
self._validate(eachItem, items)
except FieldValidationError as e:
# a bit of a hack: replace reference to _data
# with 'list item' so error messages make sense
old_error = str(e).replace("field '_data'",
'list item')
raise type(e)("Failed to validate field '%s' list "
"schema: %s" %
(fieldname, old_error), fieldname,
e.value)
else:
raise SchemaError("Properties definition of field '%s' is "
"not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if fieldname not in x and required:
self._error("Required field '%(fieldname)s' is missing",
None, fieldname)
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("Value %(value)r for field '%(fieldname)s' cannot be "
"blank'", value, fieldname)
def validate_patternProperties(self, x, fieldname, schema,
patternproperties=None):
if patternproperties is None:
patternproperties = {}
value_obj = x.get(fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.validate(value, schema)
def validate_additionalItems(self, x, fieldname, schema,
additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length of schema "
"list", value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema,
additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# Shouldn't be validating additionalProperties on non-dicts
value = x.get(fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty not in properties:
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and not
additionalProperties):
self._error("additional property '%(prop)s' "
"not defined by 'properties' are not "
"allowed in field '%(fieldname)s'",
None, fieldname, prop=eachProperty)
self.__validate(eachProperty, value,
additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for "
"field '%s' is not an object" % fieldname)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '%(dependency)s' is required by "
"field '%(fieldname)s'",
None, fieldname, dependency=dependency)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '%(v)s' is required by field "
"'%(k)s'", None, fieldname, k=k, v=v)
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"less than minimum value: %(minimum)f",
value, fieldname, minimum=minimum)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"greater than maximum value: %(maximum)f",
value, fieldname, maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be less than or equal to %(length)d",
value, fieldname, length=length)
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be greater than or equal to %(length)d",
value, fieldname, length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error("Value %(value)r for field '%(fieldname)s' does "
"not match regular expression '%(pattern)s'",
value, fieldname, pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error(
"Value %(value)r for field '%(fieldname)s' is not unique",
value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a "
"container", (options, fieldname))
if value not in options:
self._error("Value %(value)r for field '%(fieldname)s' is not "
"in the enumeration: %(options)r",
value, fieldname, options=options)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string"
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error("Value %(value)r field '%(fieldname)s' is not "
"divisible by '%(divisibleBy)s'.",
x.get(fieldname), fieldname, divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, disallow)
except ValidationError:
return
self._error("Value %(value)r of type %(disallow)s is disallowed for "
"field '%(fieldname)s'",
x.get(fieldname), fieldname, disallow=disallow)
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
self._validate(data, schema)
def _validate(self, data, schema):
self.__validate("_data", {"_data": data}, schema)
def __validate(self, fieldname, data, schema):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError(
"Type for field '%s' must be 'dict', got: '%s'" %
(fieldname, type(schema).__name__))
newschema = copy.copy(schema)
if 'optional' in schema:
raise SchemaError('The "optional" attribute has been replaced'
' by "required"')
if 'requires' in schema:
raise SchemaError('The "requires" attribute has been replaced'
' by "dependencies"')
if 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, schema,
newschema.get(schemaprop))
if self.apply_default_to_data and 'default' in schema:
try:
self.validate_type(
x={'_ds': schema['default']},
fieldname='_ds',
schema=schema,
fieldtype=schema['type'] if 'type' in schema else None
)
except FieldValidationError as exc:
raise SchemaError(exc)
if not fieldname in data:
data[fieldname] = schema['default']
return data
__all__ = ['SchemaValidator', 'FieldValidationError']
| |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the PointNet networks.
@inproceedings{qi2017pointnet,
title={Pointnet: Deep learning on point sets
for3d classification and segmentation},
author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
booktitle={Proceedings of the IEEE conference on computer vision and pattern
recognition},
pages={652--660},
year={2017}}
NOTE: scheduling of batchnorm momentum currently not available in keras. However
experimentally, using the batch norm from Keras resulted in better test accuracy
(+1.5%) than the author's [custom batch norm
version](https://github.com/charlesq34/pointnet/blob/master/utils/tf_util.py)
even when coupled with batchnorm momentum decay. Further, note the author's
version is actually performing a "global normalization", as mentioned in the
[tf.nn.moments documentation]
(https://www.tensorflow.org/api_docs/python/tf/nn/moments).
This shorthand notation is used throughout this module:
`B`: Number of elements in a batch.
`N`: The number of points in the point set.
`D`: Number of dimensions (e.g. 2 for 2D, 3 for 3D).
`C`: The number of feature channels.
"""
from typing import Optional
import tensorflow as tf
from tensorflow_graphics.util import export_api
class PointNetConv2Layer(tf.keras.layers.Layer):
"""The 2D convolution layer used by the feature encoder in PointNet."""
def __init__(self, channels, momentum):
"""Constructs a Conv2 layer.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
Args:
channels: the number of generated feature.
momentum: the momentum of the batch normalization layer.
"""
super(PointNetConv2Layer, self).__init__()
self.channels = channels
self.momentum = momentum
def build(self, input_shape: tf.Tensor):
"""Builds the layer with a specified input_shape."""
self.conv = tf.keras.layers.Conv2D(
self.channels, (1, 1), input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, N, 1, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, 1, C]`.
"""
return tf.nn.relu(self.bn(self.conv(inputs), training))
class PointNetDenseLayer(tf.keras.layers.Layer):
"""The fully connected layer used by the classification head in pointnet.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
"""
def __init__(self, channels, momentum):
super(PointNetDenseLayer, self).__init__()
self.momentum = momentum
self.channels = channels
def build(self, input_shape: tf.Tensor):
"""Builds the layer with a specified input_shape."""
self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, C]`.
"""
return tf.nn.relu(self.bn(self.dense(inputs), training))
class VanillaEncoder(tf.keras.layers.Layer):
"""The Vanilla PointNet feature encoder.
Consists of five conv2 layers with (64,64,64,128,1024) output channels.
Note:
PointNetConv2Layer are used instead of tf.keras.layers.Conv2D.
https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py
"""
def __init__(self, momentum: float = .5):
"""Constructs a VanillaEncoder keras layer.
Args:
momentum: the momentum used for the batch normalization layer.
"""
super(VanillaEncoder, self).__init__()
self.conv1 = PointNetConv2Layer(64, momentum)
self.conv2 = PointNetConv2Layer(64, momentum)
self.conv3 = PointNetConv2Layer(64, momentum)
self.conv4 = PointNetConv2Layer(128, momentum)
self.conv5 = PointNetConv2Layer(1024, momentum)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ
"""Computes the PointNet features.
Args:
inputs: a dense tensor of size `[B,N,D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, C=1024]`
"""
x = tf.expand_dims(inputs, axis=2) # [B,N,1,D]
x = self.conv1(x, training) # [B,N,1,64]
x = self.conv2(x, training) # [B,N,1,64]
x = self.conv3(x, training) # [B,N,1,64]
x = self.conv4(x, training) # [B,N,1,128]
x = self.conv5(x, training) # [B,N,1,1024]
x = tf.math.reduce_max(input_tensor=x, axis=1) # [B,1,1024]
return tf.squeeze(x) # [B,1024]
class ClassificationHead(tf.keras.layers.Layer):
"""The PointNet classification head.
The head consists of 2x PointNetDenseLayer layers (512 and 256 channels)
followed by a dropout layer (drop rate=30%) a dense linear layer producing the
logits of the num_classes classes.
"""
def __init__(self,
num_classes: int = 40,
momentum: float = 0.5,
dropout_rate: float = 0.3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for fully connected layer
"""
super(ClassificationHead, self).__init__()
self.dense1 = PointNetDenseLayer(512, momentum)
self.dense2 = PointNetDenseLayer(256, momentum)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.dense3 = tf.keras.layers.Dense(num_classes, activation="linear")
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ
"""Computes the classifiation logits given features (note: without softmax).
Args:
inputs: tensor of points with shape `[B,D]`.
training: flag for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
x = self.dense1(inputs, training) # [B,512]
x = self.dense2(x, training) # [B,256]
x = self.dropout(x, training) # [B,256]
return self.dense3(x) # [B,num_classes)
class PointNetVanillaClassifier(tf.keras.layers.Layer):
"""The PointNet 'Vanilla' classifier (i.e. without spatial transformer)."""
def __init__(self,
num_classes: int = 40,
momentum: float = .5,
dropout_rate: float = .3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for the classification head.
"""
super(PointNetVanillaClassifier, self).__init__()
self.encoder = VanillaEncoder(momentum)
self.classifier = ClassificationHead(
num_classes=num_classes, momentum=momentum, dropout_rate=dropout_rate)
def call(self,
points: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor: # pylint: disable=arguments-differ
"""Computes the classifiation logits of a point set.
Args:
points: a tensor of points with shape `[B, D]`
training: for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
features = self.encoder(points, training) # (B,1024)
logits = self.classifier(features, training) # (B,num_classes)
return logits
@staticmethod
def loss(labels: tf.Tensor,
logits: tf.Tensor) -> tf.Tensor:
"""The classification model training loss.
Note:
see tf.nn.sparse_softmax_cross_entropy_with_logits
Args:
labels: a tensor with shape `[B,]`
logits: a tensor with shape `[B,num_classes]`
Returns:
A tensor with the same shape as labels and of the same type as logits.
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits
residual = cross_entropy(labels, logits)
return tf.reduce_mean(input_tensor=residual)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'HostVirtualDNSDriver'
]
import sys
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.compute.drivers.hostvirtual import API_ROOT
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl']
class HostVirtualDNSResponse(HostVirtualResponse):
def parse_error(self):
context = self.connection.context
status = int(self.status)
if status == httplib.NOT_FOUND:
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(
value=self.parse_body()['error']['message'],
driver=self, zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(
value=self.parse_body()['error']['message'],
driver=self, record_id=context['id'])
super(HostVirtualDNSResponse, self).parse_error()
return self.body
class HostVirtualDNSConnection(HostVirtualConnection):
responseCls = HostVirtualDNSResponse
class HostVirtualDNSDriver(DNSDriver):
type = Provider.HOSTVIRTUAL
name = 'Host Virtual DNS'
website = 'http://www.vr.org/'
connectionCls = HostVirtualDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def __init__(self, key, secure=True, host=None, port=None):
super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_zone(self, item):
extra = {}
if 'records' in item:
extra['records'] = item['records']
if item['type'] == 'NATIVE':
item['type'] = 'master'
zone = Zone(id=item['id'], domain=item['name'],
type=item['type'], ttl=item['ttl'],
driver=self, extra=extra)
return zone
def _to_records(self, items, zone=None):
records = []
for item in items:
records.append(self._to_record(item=item, zone=zone))
return records
def _to_record(self, item, zone=None):
extra = {'ttl': item['ttl']}
type = self._string_to_record_type(item['type'])
record = Record(id=item['id'], name=item['name'],
type=type, data=item['content'],
zone=zone, driver=self, extra=extra)
return record
def list_zones(self):
result = self.connection.request(
API_ROOT + '/dns/zones/').object
zones = self._to_zones(result)
return zones
def list_records(self, zone):
params = {'id': zone.id}
self.connection.set_context({'resource': 'zone', 'id': zone.id})
try:
result = self.connection.request(
API_ROOT + '/dns/records/', params=params).object
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
if 'Not Found: No Records Found' in e.value:
return []
raise e
records = self._to_records(items=result, zone=zone)
return records
def get_zone(self, zone_id):
params = {'id': zone_id}
self.connection.set_context({'resource': 'zone', 'id': zone_id})
result = self.connection.request(
API_ROOT + '/dns/zone/', params=params).object
if 'id' not in result:
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
zone = self._to_zone(result)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
params = {'id': record_id}
self.connection.set_context({'resource': 'record', 'id': record_id})
result = self.connection.request(
API_ROOT + '/dns/record/', params=params).object
if 'id' not in result:
raise RecordDoesNotExistError(value='',
driver=self, record_id=record_id)
record = self._to_record(item=result, zone=zone)
return record
def delete_zone(self, zone):
params = {'zone_id': zone.id}
self.connection.set_context({'resource': 'zone', 'id': zone.id})
result = self.connection.request(
API_ROOT + '/dns/zone/', params=params, method='DELETE').object
return bool(result)
def delete_record(self, record):
params = {'id': record.id}
self.connection.set_context({'resource': 'record', 'id': record.id})
result = self.connection.request(
API_ROOT + '/dns/record/', params=params, method='DELETE').object
return bool(result)
def create_zone(self, domain, type='NATIVE', ttl=None, extra=None):
if type == 'master':
type = 'NATIVE'
elif type == 'slave':
type = 'SLAVE'
params = {'name': domain, 'type': type, 'ttl': ttl}
result = self.connection.request(
API_ROOT + '/dns/zone/',
data=json.dumps(params), method='POST').object
extra = {
'soa': result['soa'],
'ns': result['ns']
}
zone = Zone(id=result['id'], domain=domain,
type=type, ttl=ttl, extra=extra, driver=self)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
params = {'id': zone.id}
if domain:
params['name'] = domain
if type:
params['type'] = type
self.connection.set_context({'resource': 'zone', 'id': zone.id})
self.connection.request(API_ROOT + '/dns/zone/',
data=json.dumps(params), method='PUT').object
updated_zone = get_new_obj(
obj=zone, klass=Zone,
attributes={
'domain': domain,
'type': type,
'ttl': ttl,
'extra': extra
})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
params = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'domain_id': zone.id,
'content': data
}
merged = merge_valid_keys(
params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra
)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
result = self.connection.request(
API_ROOT + '/dns/record/',
data=json.dumps(params), method='POST').object
record = Record(id=result['id'], name=name,
type=type, data=data,
extra=merged, zone=zone, driver=self)
return record
def update_record(self, record, name=None, type=None,
data=None, extra=None):
params = {
'domain_id': record.zone.id,
'record_id': record.id
}
if name:
params['name'] = name
if data:
params['content'] = data
if type is not None:
params['type'] = self.RECORD_TYPE_MAP[type]
merged = merge_valid_keys(
params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra
)
self.connection.set_context({'resource': 'record', 'id': record.id})
self.connection.request(API_ROOT + '/dns/record/',
data=json.dumps(params), method='PUT').object
updated_record = get_new_obj(
obj=record, klass=Record, attributes={
'name': name, 'data': data,
'type': type,
'extra': merged
})
return updated_record
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import SecurityCenterConfiguration
from .operations import ComplianceResultsOperations
from .operations import PricingsOperations
from .operations import AdvancedThreatProtectionOperations
from .operations import DeviceSecurityGroupsOperations
from .operations import IotSecuritySolutionOperations
from .operations import IotSecuritySolutionAnalyticsOperations
from .operations import IotSecuritySolutionsAnalyticsAggregatedAlertOperations
from .operations import IotSecuritySolutionsAnalyticsRecommendationOperations
from .operations import LocationsOperations
from .operations import Operations
from .operations import TasksOperations
from .operations import AutoProvisioningSettingsOperations
from .operations import CompliancesOperations
from .operations import InformationProtectionPoliciesOperations
from .operations import SecurityContactsOperations
from .operations import WorkspaceSettingsOperations
from .operations import RegulatoryComplianceStandardsOperations
from .operations import RegulatoryComplianceControlsOperations
from .operations import RegulatoryComplianceAssessmentsOperations
from .operations import SubAssessmentsOperations
from .operations import AutomationsOperations
from .operations import AlertsSuppressionRulesOperations
from .operations import ServerVulnerabilityAssessmentOperations
from .operations import AssessmentsMetadataOperations
from .operations import AssessmentsOperations
from .operations import AdaptiveApplicationControlsOperations
from .operations import AdaptiveNetworkHardeningsOperations
from .operations import AllowedConnectionsOperations
from .operations import TopologyOperations
from .operations import JitNetworkAccessPoliciesOperations
from .operations import DiscoveredSecuritySolutionsOperations
from .operations import SecuritySolutionsReferenceDataOperations
from .operations import ExternalSecuritySolutionsOperations
from .operations import SecureScoresOperations
from .operations import SecureScoreControlsOperations
from .operations import SecureScoreControlDefinitionsOperations
from .operations import SecuritySolutionsOperations
from .operations import ConnectorsOperations
from .operations import SqlVulnerabilityAssessmentScansOperations
from .operations import SqlVulnerabilityAssessmentScanResultsOperations
from .operations import SqlVulnerabilityAssessmentBaselineRulesOperations
from .operations import AlertsOperations
from .operations import SettingsOperations
from .operations import IngestionSettingsOperations
from .operations import SoftwareInventoriesOperations
from .. import models
class SecurityCenter(object):
"""API spec for Microsoft.Security (Azure Security Center) resource provider.
:ivar compliance_results: ComplianceResultsOperations operations
:vartype compliance_results: azure.mgmt.security.aio.operations.ComplianceResultsOperations
:ivar pricings: PricingsOperations operations
:vartype pricings: azure.mgmt.security.aio.operations.PricingsOperations
:ivar advanced_threat_protection: AdvancedThreatProtectionOperations operations
:vartype advanced_threat_protection: azure.mgmt.security.aio.operations.AdvancedThreatProtectionOperations
:ivar device_security_groups: DeviceSecurityGroupsOperations operations
:vartype device_security_groups: azure.mgmt.security.aio.operations.DeviceSecurityGroupsOperations
:ivar iot_security_solution: IotSecuritySolutionOperations operations
:vartype iot_security_solution: azure.mgmt.security.aio.operations.IotSecuritySolutionOperations
:ivar iot_security_solution_analytics: IotSecuritySolutionAnalyticsOperations operations
:vartype iot_security_solution_analytics: azure.mgmt.security.aio.operations.IotSecuritySolutionAnalyticsOperations
:ivar iot_security_solutions_analytics_aggregated_alert: IotSecuritySolutionsAnalyticsAggregatedAlertOperations operations
:vartype iot_security_solutions_analytics_aggregated_alert: azure.mgmt.security.aio.operations.IotSecuritySolutionsAnalyticsAggregatedAlertOperations
:ivar iot_security_solutions_analytics_recommendation: IotSecuritySolutionsAnalyticsRecommendationOperations operations
:vartype iot_security_solutions_analytics_recommendation: azure.mgmt.security.aio.operations.IotSecuritySolutionsAnalyticsRecommendationOperations
:ivar locations: LocationsOperations operations
:vartype locations: azure.mgmt.security.aio.operations.LocationsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.security.aio.operations.Operations
:ivar tasks: TasksOperations operations
:vartype tasks: azure.mgmt.security.aio.operations.TasksOperations
:ivar auto_provisioning_settings: AutoProvisioningSettingsOperations operations
:vartype auto_provisioning_settings: azure.mgmt.security.aio.operations.AutoProvisioningSettingsOperations
:ivar compliances: CompliancesOperations operations
:vartype compliances: azure.mgmt.security.aio.operations.CompliancesOperations
:ivar information_protection_policies: InformationProtectionPoliciesOperations operations
:vartype information_protection_policies: azure.mgmt.security.aio.operations.InformationProtectionPoliciesOperations
:ivar security_contacts: SecurityContactsOperations operations
:vartype security_contacts: azure.mgmt.security.aio.operations.SecurityContactsOperations
:ivar workspace_settings: WorkspaceSettingsOperations operations
:vartype workspace_settings: azure.mgmt.security.aio.operations.WorkspaceSettingsOperations
:ivar regulatory_compliance_standards: RegulatoryComplianceStandardsOperations operations
:vartype regulatory_compliance_standards: azure.mgmt.security.aio.operations.RegulatoryComplianceStandardsOperations
:ivar regulatory_compliance_controls: RegulatoryComplianceControlsOperations operations
:vartype regulatory_compliance_controls: azure.mgmt.security.aio.operations.RegulatoryComplianceControlsOperations
:ivar regulatory_compliance_assessments: RegulatoryComplianceAssessmentsOperations operations
:vartype regulatory_compliance_assessments: azure.mgmt.security.aio.operations.RegulatoryComplianceAssessmentsOperations
:ivar sub_assessments: SubAssessmentsOperations operations
:vartype sub_assessments: azure.mgmt.security.aio.operations.SubAssessmentsOperations
:ivar automations: AutomationsOperations operations
:vartype automations: azure.mgmt.security.aio.operations.AutomationsOperations
:ivar alerts_suppression_rules: AlertsSuppressionRulesOperations operations
:vartype alerts_suppression_rules: azure.mgmt.security.aio.operations.AlertsSuppressionRulesOperations
:ivar server_vulnerability_assessment: ServerVulnerabilityAssessmentOperations operations
:vartype server_vulnerability_assessment: azure.mgmt.security.aio.operations.ServerVulnerabilityAssessmentOperations
:ivar assessments_metadata: AssessmentsMetadataOperations operations
:vartype assessments_metadata: azure.mgmt.security.aio.operations.AssessmentsMetadataOperations
:ivar assessments: AssessmentsOperations operations
:vartype assessments: azure.mgmt.security.aio.operations.AssessmentsOperations
:ivar adaptive_application_controls: AdaptiveApplicationControlsOperations operations
:vartype adaptive_application_controls: azure.mgmt.security.aio.operations.AdaptiveApplicationControlsOperations
:ivar adaptive_network_hardenings: AdaptiveNetworkHardeningsOperations operations
:vartype adaptive_network_hardenings: azure.mgmt.security.aio.operations.AdaptiveNetworkHardeningsOperations
:ivar allowed_connections: AllowedConnectionsOperations operations
:vartype allowed_connections: azure.mgmt.security.aio.operations.AllowedConnectionsOperations
:ivar topology: TopologyOperations operations
:vartype topology: azure.mgmt.security.aio.operations.TopologyOperations
:ivar jit_network_access_policies: JitNetworkAccessPoliciesOperations operations
:vartype jit_network_access_policies: azure.mgmt.security.aio.operations.JitNetworkAccessPoliciesOperations
:ivar discovered_security_solutions: DiscoveredSecuritySolutionsOperations operations
:vartype discovered_security_solutions: azure.mgmt.security.aio.operations.DiscoveredSecuritySolutionsOperations
:ivar security_solutions_reference_data: SecuritySolutionsReferenceDataOperations operations
:vartype security_solutions_reference_data: azure.mgmt.security.aio.operations.SecuritySolutionsReferenceDataOperations
:ivar external_security_solutions: ExternalSecuritySolutionsOperations operations
:vartype external_security_solutions: azure.mgmt.security.aio.operations.ExternalSecuritySolutionsOperations
:ivar secure_scores: SecureScoresOperations operations
:vartype secure_scores: azure.mgmt.security.aio.operations.SecureScoresOperations
:ivar secure_score_controls: SecureScoreControlsOperations operations
:vartype secure_score_controls: azure.mgmt.security.aio.operations.SecureScoreControlsOperations
:ivar secure_score_control_definitions: SecureScoreControlDefinitionsOperations operations
:vartype secure_score_control_definitions: azure.mgmt.security.aio.operations.SecureScoreControlDefinitionsOperations
:ivar security_solutions: SecuritySolutionsOperations operations
:vartype security_solutions: azure.mgmt.security.aio.operations.SecuritySolutionsOperations
:ivar connectors: ConnectorsOperations operations
:vartype connectors: azure.mgmt.security.aio.operations.ConnectorsOperations
:ivar sql_vulnerability_assessment_scans: SqlVulnerabilityAssessmentScansOperations operations
:vartype sql_vulnerability_assessment_scans: azure.mgmt.security.aio.operations.SqlVulnerabilityAssessmentScansOperations
:ivar sql_vulnerability_assessment_scan_results: SqlVulnerabilityAssessmentScanResultsOperations operations
:vartype sql_vulnerability_assessment_scan_results: azure.mgmt.security.aio.operations.SqlVulnerabilityAssessmentScanResultsOperations
:ivar sql_vulnerability_assessment_baseline_rules: SqlVulnerabilityAssessmentBaselineRulesOperations operations
:vartype sql_vulnerability_assessment_baseline_rules: azure.mgmt.security.aio.operations.SqlVulnerabilityAssessmentBaselineRulesOperations
:ivar alerts: AlertsOperations operations
:vartype alerts: azure.mgmt.security.aio.operations.AlertsOperations
:ivar settings: SettingsOperations operations
:vartype settings: azure.mgmt.security.aio.operations.SettingsOperations
:ivar ingestion_settings: IngestionSettingsOperations operations
:vartype ingestion_settings: azure.mgmt.security.aio.operations.IngestionSettingsOperations
:ivar software_inventories: SoftwareInventoriesOperations operations
:vartype software_inventories: azure.mgmt.security.aio.operations.SoftwareInventoriesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure subscription ID.
:type subscription_id: str
:param asc_location: The location where ASC stores the data of the subscription. can be retrieved from Get locations.
:type asc_location: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
asc_location: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = SecurityCenterConfiguration(credential, subscription_id, asc_location, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.compliance_results = ComplianceResultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.pricings = PricingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.advanced_threat_protection = AdvancedThreatProtectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.device_security_groups = DeviceSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_security_solution = IotSecuritySolutionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_security_solution_analytics = IotSecuritySolutionAnalyticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_security_solutions_analytics_aggregated_alert = IotSecuritySolutionsAnalyticsAggregatedAlertOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_security_solutions_analytics_recommendation = IotSecuritySolutionsAnalyticsRecommendationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.locations = LocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.tasks = TasksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.auto_provisioning_settings = AutoProvisioningSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.compliances = CompliancesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.information_protection_policies = InformationProtectionPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_contacts = SecurityContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_settings = WorkspaceSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.regulatory_compliance_standards = RegulatoryComplianceStandardsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.regulatory_compliance_controls = RegulatoryComplianceControlsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.regulatory_compliance_assessments = RegulatoryComplianceAssessmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sub_assessments = SubAssessmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.automations = AutomationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.alerts_suppression_rules = AlertsSuppressionRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.server_vulnerability_assessment = ServerVulnerabilityAssessmentOperations(
self._client, self._config, self._serialize, self._deserialize)
self.assessments_metadata = AssessmentsMetadataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.assessments = AssessmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.adaptive_application_controls = AdaptiveApplicationControlsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.adaptive_network_hardenings = AdaptiveNetworkHardeningsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.allowed_connections = AllowedConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.topology = TopologyOperations(
self._client, self._config, self._serialize, self._deserialize)
self.jit_network_access_policies = JitNetworkAccessPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.discovered_security_solutions = DiscoveredSecuritySolutionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_solutions_reference_data = SecuritySolutionsReferenceDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.external_security_solutions = ExternalSecuritySolutionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.secure_scores = SecureScoresOperations(
self._client, self._config, self._serialize, self._deserialize)
self.secure_score_controls = SecureScoreControlsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.secure_score_control_definitions = SecureScoreControlDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_solutions = SecuritySolutionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connectors = ConnectorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_vulnerability_assessment_scans = SqlVulnerabilityAssessmentScansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_vulnerability_assessment_scan_results = SqlVulnerabilityAssessmentScanResultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_vulnerability_assessment_baseline_rules = SqlVulnerabilityAssessmentBaselineRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.alerts = AlertsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.settings = SettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ingestion_settings = IngestionSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.software_inventories = SoftwareInventoriesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'ascLocation': self._serialize.url("self._config.asc_location", self._config.asc_location, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SecurityCenter":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| |
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# CreateSamples
#
class CreateSamples(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CreateSamples" # TODO make this more human readable by adding spaces
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# CreateSamplesWidget
#
class CreateSamplesWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
generalParametersCollapsibleButton = ctk.ctkCollapsibleButton()
generalParametersCollapsibleButton.text = "General parameters"
self.layout.addWidget(generalParametersCollapsibleButton)
# Layout within the dummy collapsible button
hlayout = qt.QHBoxLayout(generalParametersCollapsibleButton)
self.label=qt.QLabel("Volume Name:")
hlayout.addWidget(self.label)
self.volumeNameLine=qt.QLineEdit()
hlayout.addWidget(self.volumeNameLine)
self.volumeNameLine.connect('textChanged(QString)', self.onLabelChanged)
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Sample From Nothing"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Sample Label map Button
#
self.labelButton = qt.QPushButton("Create Sample Label Map")
self.labelButton.toolTip = "Create sample label map."
self.labelButton.enabled = True
parametersFormLayout.addRow(self.labelButton)
#
# Sample Volume Button
#
self.volumeButton = qt.QPushButton("Create Sample Volume")
self.volumeButton.toolTip = "Create sample volume."
self.volumeButton.enabled = True
parametersFormLayout.addRow(self.volumeButton)
#
# Sample model Button
#
self.modelButton = qt.QPushButton("Create Sample Model")
self.modelButton.toolTip = "Create sample Model."
self.modelButton.enabled = True
parametersFormLayout.addRow(self.modelButton)
# connections
self.labelButton.connect('clicked(bool)', self.onLabelButton)
self.volumeButton.connect('clicked(bool)', self.onVolumeButton)
self.modelButton.connect('clicked(bool)', self.onModelButton)
parametersCollapsibleButton2 = ctk.ctkCollapsibleButton()
parametersCollapsibleButton2.text = "Sample From example"
self.layout.addWidget(parametersCollapsibleButton2)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton2)
#
# input volume selector
#
self.inputSelector = slicer.qMRMLNodeComboBox()
self.inputSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
# Keep the following line as an example
#self.inputSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", 0 )
self.inputSelector.selectNodeUponCreation = True
self.inputSelector.addEnabled = False
self.inputSelector.removeEnabled = False
self.inputSelector.noneEnabled = True
self.inputSelector.showHidden = False
self.inputSelector.showChildNodeTypes = False
self.inputSelector.setMRMLScene( slicer.mrmlScene )
self.inputSelector.setToolTip( "reference image." )
parametersFormLayout.addRow("Reference Volume: ", self.inputSelector)
self.inputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSampleFromReferenceSelect)
#
# Sample From reference Button
#
self.referenceButton = qt.QPushButton("Create Sample Model from a reference")
self.referenceButton.toolTip = "Create sample Model from a reference."
parametersFormLayout.addRow(self.referenceButton)
self.referenceButton.connect('clicked(bool)', self.onReferenceButton)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onLabelChanged(self.volumeNameLine.text)
def ButtonsClickable(self, value):
self.labelButton.setEnabled(value)
self.volumeButton.setEnabled(value)
self.modelButton.setEnabled(value)
self.onSampleFromReferenceSelect()
def cleanup(self):
pass
def onLabelChanged(self,myString):
if not myString=='':
self.ButtonsClickable(True)
else:
self.ButtonsClickable(False)
def onSampleFromReferenceSelect(self):
self.referenceButton.enabled = self.inputSelector.currentNode() and self.volumeNameLine.text != ''
def onLabelButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True)
def onVolumeButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text)
def onModelButton(self):
logic = CreateSamplesLogic()
logic.createModel()
def onReferenceButton(self):
logic = CreateSamplesLogic()
logic.createVolume(self.volumeNameLine.text, labelmap=True, reference=self.inputSelector.currentNode())
#
# CreateSamplesLogic
#
class CreateSamplesLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setVolumeAsBackgroundImage(self, node):
count = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLSliceCompositeNode')
for n in xrange(count):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLSliceCompositeNode')
compNode.SetBackgroundVolumeID(node.GetID())
return True
# Create sample labelmap with same geometry as input volume
def createVolume(self , volumeName, labelmap=False, reference=None):
if volumeName == '':
raise Exception('The name of the output volume cannot be empty')
value = 1
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
imageData = vtk.vtkImageData()
if reference == None:
mySpacing = (0.5,0.6,0.5)
myOrigin = (20,50,50)
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
imageData.SetDimensions(30,5,15)
imageData.AllocateScalars(vtk.VTK_DOUBLE, 1)
sampleVolumeNode.SetSpacing(mySpacing[0],mySpacing[1],mySpacing[2])
sampleVolumeNode.SetOrigin(myOrigin[0],myOrigin[1],myOrigin[2])
else:
sampleVolumeNode.Copy(reference)
imageData.DeepCopy(reference.GetImageData())
sampleVolumeNode.SetName(volumeName)
sampleVolumeNode.SetAndObserveImageData(imageData)
extent = imageData.GetExtent()
for x in xrange(extent[0], extent[1]+1):
for y in xrange(extent[2], extent[3]+1):
for z in xrange(extent[4], extent[5]+1):
if (x >= (extent[1]/4) and x <= (extent[1]/4) * 3) and (y >= (extent[3]/4) and y <= (extent[3]/4) * 3) and (z >= (extent[5]/4) and z <= (extent[5]/4) * 3):
imageData.SetScalarComponentFromDouble(x,y,z,0,value)
else:
imageData.SetScalarComponentFromDouble(x,y,z,0,0)
# Display labelmap
if labelmap:
sampleVolumeNode.SetLabelMap(1)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
else:
volumeDisplayNode = slicer.vtkMRMLScalarVolumeDisplayNode()
slicer.mrmlScene.AddNode(volumeDisplayNode)
colorNode = slicer.util.getNode('Grey')
volumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
volumeDisplayNode.VisibilityOn()
sampleVolumeNode.SetAndObserveDisplayNodeID(volumeDisplayNode.GetID())
self.setVolumeAsBackgroundImage(sampleVolumeNode)
return True
def createModel(self):
print "model"
class CreateSamplesTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from future import standard_library
standard_library.install_aliases()
import logging
import re
import fnmatch
import configparser
import math
import os
from urllib.parse import urlparse
import warnings
import boto
from boto.s3.connection import S3Connection, NoHostProvided
from boto.sts import STSConnection
boto.set_stream_logger('boto')
logging.getLogger("boto").setLevel(logging.INFO)
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
Config = configparser.ConfigParser()
if Config.read(config_file_name): # pragma: no cover
sections = Config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = Config.get(cred_section, key_id_option)
secret_key = Config.get(cred_section, secret_key_option)
calling_format = None
if Config.has_option(cred_section, 'calling_format'):
calling_format = Config.get(cred_section, 'calling_format')
except:
logging.warning("Option Error in parsing s3 config file")
raise
return (access_key, secret_key, calling_format)
class S3Hook(BaseHook):
"""
Interact with S3. This class is a wrapper around the boto library.
"""
def __init__(
self,
s3_conn_id='s3_default'):
self.s3_conn_id = s3_conn_id
self.s3_conn = self.get_connection(s3_conn_id)
self.extra_params = self.s3_conn.extra_dejson
self.profile = self.extra_params.get('profile')
self.calling_format = None
self.s3_host = None
self._creds_in_conn = 'aws_secret_access_key' in self.extra_params
self._creds_in_config_file = 's3_config_file' in self.extra_params
self._default_to_boto = False
if 'host' in self.extra_params:
self.s3_host = self.extra_params['host']
if self._creds_in_conn:
self._a_key = self.extra_params['aws_access_key_id']
self._s_key = self.extra_params['aws_secret_access_key']
if 'calling_format' in self.extra_params:
self.calling_format = self.extra_params['calling_format']
elif self._creds_in_config_file:
self.s3_config_file = self.extra_params['s3_config_file']
# The format can be None and will default to boto in the parser
self.s3_config_format = self.extra_params.get('s3_config_format')
else:
self._default_to_boto = True
# STS support for cross account resource access
self._sts_conn_required = ('aws_account_id' in self.extra_params or
'role_arn' in self.extra_params)
if self._sts_conn_required:
self.role_arn = (self.extra_params.get('role_arn') or
"arn:aws:iam::" +
self.extra_params['aws_account_id'] +
":role/" +
self.extra_params['aws_iam_role'])
self.connection = self.get_conn()
def __getstate__(self):
pickled_dict = dict(self.__dict__)
del pickled_dict['connection']
return pickled_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['connection'] = self.get_conn()
def _parse_s3_url(self, s3url):
warnings.warn(
'Please note: S3Hook._parse_s3_url() is now '
'S3Hook.parse_s3_url() (no leading underscore).',
DeprecationWarning)
return self.parse_s3_url(s3url)
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def get_conn(self):
"""
Returns the boto S3Connection object.
"""
if self._default_to_boto:
return S3Connection(profile_name=self.profile)
a_key = s_key = None
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
calling_format = self.calling_format
s3_host = self.s3_host
if calling_format is None:
calling_format = 'boto.s3.connection.SubdomainCallingFormat'
if s3_host is None:
s3_host = NoHostProvided
if self._sts_conn_required:
sts_connection = STSConnection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
profile_name=self.profile)
assumed_role_object = sts_connection.assume_role(
role_arn=self.role_arn,
role_session_name="Airflow_" + self.s3_conn_id
)
creds = assumed_role_object.credentials
connection = S3Connection(
aws_access_key_id=creds.access_key,
aws_secret_access_key=creds.secret_key,
calling_format=calling_format,
security_token=creds.session_token
)
else:
connection = S3Connection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
calling_format=calling_format,
host=s3_host,
profile_name=self.profile)
return connection
def get_credentials(self):
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
return a_key, s_key
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.lookup(bucket_name) is not None
def get_bucket(self, bucket_name):
"""
Returns a boto.s3.bucket.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.get_bucket(bucket_name)
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
keylist = list(b.list(prefix=prefix, delimiter=delimiter))
return [k.name for k in keylist] if keylist != [] else None
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
plist = b.list(prefix=prefix, delimiter=delimiter)
prefix_names = [p.name for p in plist
if isinstance(p, boto.s3.prefix.Prefix)]
return prefix_names if prefix_names != [] else None
def check_for_key(self, key, bucket_name=None):
"""
Checks that a key exists in a bucket
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key) is not None
def get_key(self, key, bucket_name=None):
"""
Returns a boto.s3.key.Key object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto.s3.key.Key object matching the regular expression
:param regex_key: the path to the key
:type regex_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
bucket = self.get_bucket(bucket_name)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if not klist:
return None
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
return bucket.get_key(key_matches[0]) if key_matches else None
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def load_file(
self,
filename,
key,
bucket_name=None,
replace=False,
multipart_bytes=5 * (1024 ** 3),
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param multipart_bytes: If provided, the file is uploaded in parts of
this size (minimum 5242880). The default value is 5GB, since S3
cannot accept non-multipart uploads for files larger than 5GB. If
the file is smaller than the specified limit, the option will be
ignored.
:type multipart_bytes: int
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
key_size = os.path.getsize(filename)
if multipart_bytes and key_size >= multipart_bytes:
# multipart upload
from filechunkio import FileChunkIO
mp = bucket.initiate_multipart_upload(key_name=key,
encrypt_key=encrypt)
total_chunks = int(math.ceil(key_size / multipart_bytes))
sent_bytes = 0
try:
for chunk in range(total_chunks):
offset = chunk * multipart_bytes
bytes = min(multipart_bytes, key_size - offset)
with FileChunkIO(
filename, 'r', offset=offset, bytes=bytes) as fp:
logging.info('Sending chunk {c} of {tc}...'.format(
c=chunk + 1, tc=total_chunks))
mp.upload_part_from_file(fp, part_num=chunk + 1)
except:
mp.cancel_upload()
raise
mp.complete_upload()
else:
# regular upload
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_filename(filename,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
def load_string(self, string_data,
key, bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
This is provided as a convenience to drop a file in S3. It uses the
boto infrastructure to ship a file to s3. It is currently using only
a single part download, and should not be used to move large files.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_string(string_data,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
| |
from __future__ import with_statement, absolute_import
import re
import sys
from functools import wraps, partial
from math import ceil
from operator import itemgetter
from threading import Lock
from time import time
import sqlalchemy
from flask import _app_ctx_stack, abort, url_for
from flask.signals import Namespace
from sqlalchemy import orm
from sqlalchemy.engine.url import make_url
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.session import Session
__version__ = '0.3.7'
connection_stack = _app_ctx_stack
_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
_signals = Namespace()
models_committed = _signals.signal('models-committed')
before_models_committed = _signals.signal('before-models-committed')
class _SQLAlchemyState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, db, app):
self.db = db
self.app = app
self.connectors = {}
def _include_sqlalchemy(obj):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# Note: obj.Table does not attempt to be a SQLAlchemy Table class.
obj.Table = _make_table(obj)
#obj.mapper = sqlalchemy.orm.mapper()#signalling_mapper \\ perhaps some addition to the mapper allowed
obj.relationship = _wrap_with_default_query_class(obj.relationship)
obj.relation = _wrap_with_default_query_class(obj.relation)
obj.dynamic_loader = _wrap_with_default_query_class(obj.dynamic_loader)
def _make_table(db):
def _make_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return _make_table
def _set_default_query_class(d):
if 'query_class' not in d:
d['query_class'] = BaseQuery
def _wrap_with_default_query_class(fn):
@wraps(fn)
def newfn(*args, **kwargs):
_set_default_query_class(kwargs)
if "backref" in kwargs:
backref = kwargs['backref']
if isinstance(backref, basestring):
backref = (backref, {})
_set_default_query_class(backref[1])
return fn(*args, **kwargs)
return newfn
class _SignallingSession(Session):
""""""
def __init__(self, db, autocommit=False, autoflush=False, **options):
self.app = db.get_app()
self._model_changes = {}
Session.__init__(self, autocommit=autocommit, autoflush=autoflush,
bind=db.engine,
binds=db.get_binds(self.app), **options)
def get_bind(self, mapper, clause=None):
# mapper is None if someone tries to just get a connection
if mapper is not None:
info = getattr(mapper.mapped_table, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return Session.get_bind(self, mapper, clause)
class _SessionSignalEvents(object):
def register(self):
listen(Session, 'before_commit', self.squll_before_commit)
listen(Session, 'after_commit', self.squll_after_commit)
listen(Session, 'after_rollback', self.squll_after_rollback)
@staticmethod
def squll_before_commit(session):
d = session._model_changes
if d:
before_models_committed.send(session.app, changes=d.values())
@staticmethod
def squll_after_commit(session):
d = session._model_changes
if d:
models_committed.send(session.app, changes=d.values())
d.clear()
@staticmethod
def squll_after_rollback(session):
session._model_changes.clear()
class _MapperSignalEvents(object):
def __init__(self, mapper):
self.mapper = mapper
def register(self):
listen(self.mapper, 'after_delete', self.squll_after_delete)
listen(self.mapper, 'after_insert', self.squll_after_insert)
listen(self.mapper, 'after_update', self.squll_after_update)
def squll_after_delete(self, mapper, connection, target):
self._record(mapper, target, 'delete')
def squll_after_insert(self, mapper, connection, target):
self._record(mapper, target, 'insert')
def squll_after_update(self, mapper, connection, target):
self._record(mapper, target, 'update')
@staticmethod
def _record(mapper, target, operation):
pk = tuple(mapper.primary_key_from_instance(target))
orm.object_session(target)._model_changes[pk] = (target, operation)
class _BoundDeclarativeMeta(DeclarativeMeta):
def __new__(cls, name, bases, d):
tablename = d.get('__tablename__')
# generate a table name automatically if it's missing and the
# class dictionary declares a primary key. We cannot always
# attach a primary key to support model inheritance that does
# not use joins. We also don't want a table name if a whole
# table is defined
if not tablename and d.get('__table__') is None and \
_defines_primary_key(d):
def _join(match):
word = match.group()
if len(word) > 1:
return ('_%s_%s' % (word[:-1], word[-1])).lower()
return '_' + word.lower()
d['__tablename__'] = _camelcase_re.sub(_join, name).lstrip('_')
return DeclarativeMeta.__new__(cls, name, bases, d)
def __init__(self, name, bases, d):
bind_key = d.pop('__bind_key__', None)
DeclarativeMeta.__init__(self, name, bases, d)
if bind_key is not None:
self.__table__.info['bind_key'] = bind_key
def get_state(app):
assert 'sqlalchemy' in app.extensions, \
'The sqlalchemy extension was not registered to the current ' \
'application. Please make sure to call init_app() first.'
return app.extensions['sqlalchemy']
class Pagination(object):
def __init__(self, query, page, endpoint, per_page, total, items):
self.query = query
self.page = page
self.endpoint = endpoint
self.per_page = per_page
self.total = total
self.items = items
def call_endpoint(self, which_page):
if self.endpoint:
return url_for(endpoint=self.endpoint, page=which_page)
@property
def pages(self):
return int(ceil(self.total / float(self.per_page)))
def prev(self, error_out=False):
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page - 1, self.endpoint, self.per_page, error_out)
@property
def prev_num(self):
return self.page - 1
@property
def has_prev(self):
return self.page > 1
def next(self, error_out=False):
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page + 1, self.endpoint, self.per_page, error_out)
@property
def has_next(self):
return self.page < self.pages
@property
def next_num(self):
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class BaseQuery(orm.Query):
def get_or_404(self, ident):
rv = self.get(ident)
if rv is None:
abort(404)
return rv
def first_or_404(self):
rv = self.first()
if rv is None:
abort(404)
return rv
def paginate(self, page, endpoint=None, per_page=20, error_out=True):
if error_out and page < 1:
abort(404)
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, endpoint, per_page, total, items)
class Model(object):
"""Baseclass for custom user models."""
#: the query class used. The :attr:`query` attribute is an instance
#: of this class. By default a :class:`BaseQuery` is used.
query_class = BaseQuery
#: an instance of :attr:`query_class`. Can be used to query the
#: database for instances of this model.
query = None
class _EngineConnector(object):
def __init__(self, sa, app, bind=None):
self._sa = sa
self._app = app
self._engine = None
self._connected_for = None
self._bind = bind
self._lock = Lock()
def get_uri(self):
if self._bind is None:
return self._app.config['SQLALCHEMY_DATABASE_URI']
binds = self._app.config.get('SQLALCHEMY_BINDS') or ()
assert self._bind in binds, \
'Bind %r is not specified. Set it in the SQLALCHEMY_BINDS ' \
'configuration variable' % self._bind
return binds[self._bind]
def get_engine(self):
with self._lock:
uri = self.get_uri()
echo = self._app.config['SQLALCHEMY_ECHO']
if (uri, echo) == self._connected_for:
return self._engine
info = make_url(uri)
options = {'convert_unicode': True}
self._sa.apply_pool_defaults(self._app, options)
#self._sa.apply_driver_hacks(self._app, info, options)
if _record_queries(self._app):
options['proxy'] = _ConnectionDebugProxy(self._app.import_name)
if echo:
options['echo'] = True
self._engine = rv = sqlalchemy.create_engine(info, **options)
self._connected_for = (uri, echo)
return rv
def _defines_primary_key(d):
"""Figures out if the given dictonary defines a primary key column."""
return any(v.primary_key for k, v in d.iteritems()
if isinstance(v, sqlalchemy.Column))
class _QueryProperty(object):
""""""
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
class Squll(object):
def __init__(self, app=None,
use_native_unicode=True,
session_options=None):
if session_options is None:
session_options = {}
session_options.setdefault(
'scopefunc', connection_stack.__ident_func__)
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base()
self._engine_lock = Lock()
if app is not None:
self.app = app
self.init_app(app)
else:
self.app = None
_include_sqlalchemy(self)
_MapperSignalEvents(self.mapper).register()
_SessionSignalEvents().register()
self.Query = BaseQuery
@property
def metadata(self):
"""Returns the metadata"""
return self.Model.metadata
def create_scoped_session(self, options=None):
if options is None:
options = {}
scopefunc = options.pop('scopefunc', None)
return orm.scoped_session(
partial(_SignallingSession, self, **options), scopefunc=scopefunc
)
def make_declarative_base(self):
base = declarative_base(cls=Model, name='Model',
metaclass=_BoundDeclarativeMeta)
base.query = _QueryProperty(self)
return base
def init_app(self, app):
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite://')
app.config.setdefault('SQLALCHEMY_BINDS', None)
app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
app.config.setdefault('SQLALCHEMY_ECHO', False)
app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)
app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
app.config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', False)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sqlalchemy'] = _SQLAlchemyState(self, app)
teardown = app.teardown_appcontext
@teardown
def shutdown_session(response_or_exc):
if app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
if response_or_exc is None:
self.session.commit()
self.session.remove()
return response_or_exc
def apply_pool_defaults(self, app, options):
def _setdefault(optionkey, configkey):
value = app.config[configkey]
if value is not None:
options[optionkey] = value
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
@property
def engine(self):
return self.get_engine(self.get_app())
def make_connector(self, app, bind=None):
return _EngineConnector(self, app, bind)
def get_engine(self, app, bind=None):
with self._engine_lock:
state = get_state(app)
connector = state.connectors.get(bind)
if connector is None:
connector = self.make_connector(app, bind)
state.connectors[bind] = connector
return connector.get_engine()
def get_app(self, reference_app=None):
if reference_app is not None:
return reference_app
if self.app is not None:
return self.app
ctx = connection_stack.top
if ctx is not None:
return ctx.app
raise RuntimeError('application not registered on db '
'instance and no application bound '
'to current context')
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
result = []
for table in self.Model.metadata.tables.itervalues():
if table.info.get('bind_key') == bind:
result.append(table)
return result
def get_binds(self, app=None):
"""Returns a dictionary with a table->engine mapping.
This is suitable for use of sessionmaker(binds=db.get_binds(app)).
"""
app = self.get_app(app)
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
retval = {}
for bind in binds:
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def _execute_for_all_tables(self, app, bind, operation):
app = self.get_app(app)
if bind == '__all__':
binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ())
elif isinstance(bind, basestring):
binds = [bind]
else:
binds = bind
for bind in binds:
tables = self.get_tables_for_bind(bind)
op = getattr(self.Model.metadata, operation)
op(bind=self.get_engine(app, bind), tables=tables)
def create_all(self, bind='__all__', app=None):
self._execute_for_all_tables(app, bind, 'create_all')
def drop_all(self, bind='__all__', app=None):
self._execute_for_all_tables(app, bind, 'drop_all')
def reflect(self, bind='__all__', app=None):
self._execute_for_all_tables(app, bind, 'reflect')
def __repr__(self):
app = None
if self.app is not None:
app = self.app
else:
ctx = connection_stack.top
if ctx is not None:
app = ctx.app
return '<%s engine=%r>' % (
self.__class__.__name__,
app and app.config['SQLALCHEMY_DATABASE_URI'] or None
)
#debug\testing aid
_timer = time
def get_debug_queries():
return getattr(connection_stack.top, 'sqlalchemy_queries', [])
def _record_queries(app):
if app.debug:
return True
rq = app.config['SQLALCHEMY_RECORD_QUERIES']
if rq is not None:
return rq
return bool(app.config.get('TESTING'))
class _ConnectionDebugProxy(ConnectionProxy):
"""Helps debugging the database."""
def __init__(self, import_name):
self.app_package = import_name
def cursor_execute(self, execute, cursor, statement, parameters,
context, executemany):
start = _timer()
try:
return execute(cursor, statement, parameters, context)
finally:
ctx = connection_stack.top
if ctx is not None:
queries = getattr(ctx, 'sqlalchemy_queries', None)
if queries is None:
queries = []
setattr(ctx, 'sqlalchemy_queries', queries)
queries.append(_DebugQueryTuple((
statement, parameters, start, _timer(),
_calling_context(self.app_package))))
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement,
self.parameters,
self.duration
)
def _calling_context(app_path):
frm = sys._getframe(1)
while frm.f_back is not None:
name = frm.f_globals.get('__name__')
if name and (name == app_path or name.startswith(app_path + '.')):
funcname = frm.f_code.co_name
return '%s:%s (%s)' % (
frm.f_code.co_filename,
frm.f_lineno,
funcname
)
frm = frm.f_back
return '<unknown>'
| |
"""
Module containing MPG Ranch NFC coarse classifier, version 3.0.
An NFC coarse classifier classifies an unclassified clip as a `'Call'`
if it appears to be a nocturnal flight call, or as a `'Noise'` otherwise.
It does not classify a clip that has already been classified, whether
manually or automatically.
This classifier was trained on clips created by the Old Bird Tseep
and Thrush Redux 1.1 detectors from MPG Ranch recordings collected in
the fall of 2017.
"""
from collections import defaultdict
import logging
import numpy as np
import resampy
import tensorflow as tf
from vesper.command.annotator import Annotator
from vesper.django.app.models import AnnotationInfo
from vesper.singleton.clip_manager import clip_manager
from vesper.util.settings import Settings
import vesper.django.app.model_utils as model_utils
import vesper.mpg_ranch.nfc_coarse_classifier_3_0.classifier_utils as \
classifier_utils
import vesper.mpg_ranch.nfc_coarse_classifier_3_0.dataset_utils as \
dataset_utils
import vesper.util.open_mp_utils as open_mp_utils
import vesper.util.signal_utils as signal_utils
import vesper.util.yaml_utils as yaml_utils
_EVALUATION_MODE_ENABLED = False
'''
This classifier can run in one of two modes, *normal mode* and
*evaluation mode*. In normal mode, it annotates only unclassified clips,
assigning to each a "Classification" annotation value or either "Call"
or "Noise".
In evaluation mode, the classifier classifies every clip whose clip type
(e.g. "Tseep" or "Thrush") it recognizes and that already has a
classification that is "Noise" or starts with "Call" or "XCall".
The new classification is a function of both the existing classification
and the *normal classification* that the classifier would assign to the
clip in normal mode if it had no existing classification. The new
classifications are as follows (where the classification pairs are
(existing classification, normal classification)):
(Noise, Noise) -> Noise (i.e. no change)
(Noise, Call) -> FP
(Call*, Call) -> Call* (i.e. no change)
(Call*, Noise) -> FN* (i.e. only coarse part changes)
(XCall*, Call) -> XCallP* (i.e. only coarse part changes)
(XCall*, Noise) -> XCallN* (i.e. only coarse part changes)
This reclassifies clips for which the normal classification differs from
the existing classification in such a way that important sets of clips
(i.e. false positives, false negatives, excluded call positives, and
excluded call negatives) can subsequently be viewed in clip albums.
'''
class Classifier(Annotator):
extension_name = 'MPG Ranch NFC Coarse Classifier 3.0'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
open_mp_utils.work_around_multiple_copies_issue()
# Suppress TensorFlow INFO and DEBUG log messages.
tf.logging.set_verbosity(tf.logging.WARN)
self._classifiers = dict(
(t, _Classifier(t)) for t in ('Tseep', 'Thrush'))
if _EVALUATION_MODE_ENABLED:
self._score_annotation_info = \
AnnotationInfo.objects.get(name='Score')
def annotate_clips(self, clips):
"""Annotates the specified clips with the appropriate classifiers."""
clip_lists = self._get_clip_lists(clips)
num_clips_classified = 0
for clip_type, clips in clip_lists.items():
classifier = self._classifiers.get(clip_type)
if classifier is not None:
# have classifier for this clip type
num_clips_classified += self._annotate_clips(clips, classifier)
return num_clips_classified
def _get_clip_lists(self, clips):
"""Gets a mapping from clip types to lists of clips to classify."""
clip_lists = defaultdict(list)
for clip in clips:
if _EVALUATION_MODE_ENABLED or \
self._get_annotation_value(clip) is None:
# clip should be classified
clip_type = model_utils.get_clip_type(clip)
clip_lists[clip_type].append(clip)
return clip_lists
def _annotate_clips(self, clips, classifier):
"""Annotates the specified clips with the specified classifier."""
num_clips_classified = 0
triples = classifier.classify_clips(clips)
# if _EVALUATION_MODE_ENABLED and len(triples) > 0:
# self._show_classification_errors(triples)
for clip, auto_classification, score in triples:
if auto_classification is not None:
if _EVALUATION_MODE_ENABLED:
old_classification = self._get_annotation_value(clip)
new_classification = self._get_new_classification(
old_classification, auto_classification)
if new_classification is not None:
self._annotate(clip, new_classification)
num_clips_classified += 1
self._set_clip_score(clip, score)
else:
# normal mode
self._annotate(clip, auto_classification)
num_clips_classified += 1
return num_clips_classified
def _get_new_classification(self, old_classification, auto_classification):
old = old_classification
auto = auto_classification
if old is None:
return None
elif old.startswith('Call') and auto == 'Noise':
return 'FN' + old[len('Call'):]
elif old == 'Noise' and auto == 'Call':
return 'FP'
elif old.startswith('XCall') and auto == 'Noise':
return 'XCallN' + old_classification[len('XCall'):]
elif old.startswith('XCall') and auto == 'Call':
return 'XCallP' + old_classification[len('XCall'):]
else:
return None
def _set_clip_score(self, clip, score):
value = '{:.3f}'.format(score)
model_utils.annotate_clip(
clip, self._score_annotation_info, value,
creating_user=self._creating_user,
creating_job=self._creating_job,
creating_processor=self._creating_processor)
def _show_classification_errors(self, triples):
num_positives = 0
num_negatives = 0
false_positives = []
false_negatives = []
for i, (clip, new_classification, score) in enumerate(triples):
old_classification = self._get_annotation_value(clip)
if old_classification.startswith('Call'):
num_positives += 1
if new_classification == 'Noise':
false_negatives.append(
(i, old_classification, new_classification, score))
else:
# old classification does not start with 'Call'
num_negatives += 1
if new_classification == 'Call':
false_positives.append(
(i, old_classification, new_classification, score))
num_clips = len(triples)
logging.info('Classified {} clips.'.format(num_clips))
self._show_classification_errors_aux(
'calls', false_negatives, num_positives)
self._show_classification_errors_aux(
'non-calls', false_positives, num_negatives)
num_errors = len(false_positives) + len(false_negatives)
accuracy = 100 * (1 - num_errors / num_clips)
logging.info(
'The overall accuracy was {:.1f} percent.'.format(accuracy))
def _show_classification_errors_aux(self, category, errors, num_clips):
num_errors = len(errors)
percent = 100 * num_errors / num_clips
logging.info((
'{} of {} {} ({:.1f} percent) where incorrectly '
'classified:').format(num_errors, num_clips, category, percent))
for i, old_classification, new_classification, score in errors:
logging.info(
' {} {} -> {} {}'.format(
i, old_classification, new_classification, score))
class _Classifier:
def __init__(self, clip_type):
self.clip_type = clip_type
self._estimator = self._create_estimator()
self._settings = self._load_settings()
# Configure waveform slicing.
s = self._settings
fs = s.waveform_sample_rate
s2f = signal_utils.seconds_to_frames
self._waveform_start_time = \
s.waveform_start_time + s.inference_waveform_start_time_offset
self._waveform_duration = s.waveform_duration
self._waveform_length = s2f(self._waveform_duration, fs)
self._classification_threshold = \
self._settings.classification_threshold
def _create_estimator(self):
path = classifier_utils.get_tensorflow_model_dir_path(self.clip_type)
logging.info((
'Creating TensorFlow estimator from saved model in directory '
'"{}"...').format(path))
return tf.contrib.estimator.SavedModelEstimator(str(path))
def _load_settings(self):
path = classifier_utils.get_settings_file_path(self.clip_type)
logging.info('Loading classifier settings from "{}"...'.format(path))
text = path.read_text()
d = yaml_utils.load(text)
return Settings.create_from_dict(d)
def classify_clips(self, clips):
# logging.info('Collecting clip waveforms for scoring...')
waveforms, indices = self._slice_clip_waveforms(clips)
if len(waveforms) == 0:
return []
else:
# have at least one waveform slice to classify
# Stack waveform slices to make 2-D NumPy array.
self._waveforms = np.stack(waveforms)
# logging.info('Scoring clip waveforms...')
scores = classifier_utils.score_dataset_examples(
self._estimator, self._create_dataset)
# logging.info('Classifying clips...')
triples = [
self._classify_clip(i, score, clips)
for i, score in zip(indices, scores)]
return triples
def _slice_clip_waveforms(self, clips):
waveforms = []
indices = []
for i, clip in enumerate(clips):
try:
waveform = self._get_clip_samples(clip)
except Exception as e:
logging.warning((
'Could not classify clip "{}", since its '
'samples could not be obtained. Error message was: '
'{}').format(str(clip), str(e)))
else:
# got clip samples
waveforms.append(waveform)
indices.append(i)
return waveforms, indices
def _get_clip_samples(self, clip):
clip_sample_rate = clip.sample_rate
classifier_sample_rate = self._settings.waveform_sample_rate
s2f = signal_utils.seconds_to_frames
start_offset = s2f(self._waveform_start_time, clip_sample_rate)
if clip_sample_rate != classifier_sample_rate:
# need to resample
# Get clip samples, including a millisecond of padding at
# the end. I don't know what if any guarantees the
# `resampy.resample` function offers about the relationship
# between its input and output lengths, so we add the padding
# to try to ensure that we don't wind up with too few samples
# after resampling.
length = s2f(self._waveform_duration + .001, clip_sample_rate)
samples = clip_manager.get_samples(
clip, start_offset=start_offset, length=length)
# Resample clip samples to classifier sample rate.
samples = resampy.resample(
samples, clip_sample_rate, classifier_sample_rate)
# Discard any extra trailing samples we wound up with.
samples = samples[:self._waveform_length]
if len(samples) < self._waveform_length:
raise ValueError('Resampling produced too few samples.')
else:
# don't need to resample
samples = clip_manager.get_samples(
clip, start_offset=start_offset, length=self._waveform_length)
return samples
def _create_dataset(self):
return dataset_utils.create_spectrogram_dataset_from_waveforms_array(
self._waveforms, dataset_utils.DATASET_MODE_INFERENCE,
self._settings, batch_size=64,
feature_name=self._settings.model_input_name)
def _classify_clip(self, index, score, clips):
if score >= self._classification_threshold:
classification = 'Call'
else:
classification = 'Noise'
return clips[index], classification, score
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for generating and updating index.yaml."""
import os
import logging
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_admin
from google.appengine.api import yaml_errors
from google.appengine.datastore import datastore_index
import yaml
AUTO_MARKER = '\n# AUTOGENERATED\n'
AUTO_COMMENT = '''
# This index.yaml is automatically updated whenever the dev_appserver
# detects that a new type of query is run. If you want to manage the
# index.yaml file manually, remove the above marker line (the line
# saying "# AUTOGENERATED"). If you want to manage some indexes
# manually, move them above the marker line. The index.yaml file is
# automatically uploaded to the admin console when you next deploy
# your application using appcfg.py.
'''
def GenerateIndexFromHistory(query_history,
all_indexes=None, manual_indexes=None):
"""Generate most of the text for index.yaml from the query history.
Args:
query_history: Query history, a dict mapping query
all_indexes: Optional datastore_index.IndexDefinitions instance
representing all the indexes found in the input file. May be None.
manual_indexes: Optional datastore_index.IndexDefinitions instance
containing indexes for which we should not generate output. May be None.
Returns:
A string representation that can safely be appended to an existing
index.yaml file. Returns the empty string if it would generate no output.
"""
all_keys = datastore_index.IndexDefinitionsToKeys(all_indexes)
manual_keys = datastore_index.IndexDefinitionsToKeys(manual_indexes)
indexes = dict((key, 0) for key in all_keys - manual_keys)
for query, count in query_history.iteritems():
required, kind, ancestor, props, num_eq_filters = datastore_index.CompositeIndexForQuery(query)
if required:
key = (kind, ancestor, props)
if key not in manual_keys:
if key in indexes:
indexes[key] += count
else:
indexes[key] = count
if not indexes:
return ''
res = []
for (kind, ancestor, props), count in sorted(indexes.iteritems()):
res.append('')
res.append(datastore_index.IndexYamlForQuery(kind, ancestor, props))
res.append('')
return '\n'.join(res)
class IndexYamlUpdater(object):
"""Helper class for updating index.yaml.
This class maintains some state about the query history and the
index.yaml file in order to minimize the number of times index.yaml
is actually overwritten.
"""
index_yaml_is_manual = False
index_yaml_mtime = None
last_history_size = 0
def __init__(self, root_path):
"""Constructor.
Args:
root_path: Path to the app's root directory.
"""
self.root_path = root_path
def UpdateIndexYaml(self, openfile=open):
"""Update index.yaml.
Args:
openfile: Used for dependency injection.
We only ever write to index.yaml if either:
- it doesn't exist yet; or
- it contains an 'AUTOGENERATED' comment.
All indexes *before* the AUTOGENERATED comment will be written
back unchanged. All indexes *after* the AUTOGENERATED comment
will be updated with the latest query counts (query counts are
reset by --clear_datastore). Indexes that aren't yet in the file
will be appended to the AUTOGENERATED section.
We keep track of some data in order to avoid doing repetitive work:
- if index.yaml is fully manual, we keep track of its mtime to
avoid parsing it over and over;
- we keep track of the number of keys in the history dict since
the last time we updated index.yaml (or decided there was
nothing to update).
"""
index_yaml_file = os.path.join(self.root_path, 'index.yaml')
try:
index_yaml_mtime = os.path.getmtime(index_yaml_file)
except os.error:
index_yaml_mtime = None
index_yaml_changed = (index_yaml_mtime != self.index_yaml_mtime)
self.index_yaml_mtime = index_yaml_mtime
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
query_history = datastore_stub.QueryHistory()
history_changed = (len(query_history) != self.last_history_size)
self.last_history_size = len(query_history)
if not (index_yaml_changed or history_changed):
logging.debug('No need to update index.yaml')
return
if self.index_yaml_is_manual and not index_yaml_changed:
logging.debug('Will not update manual index.yaml')
return
if index_yaml_mtime is None:
index_yaml_data = None
else:
try:
fh = open(index_yaml_file, 'r')
except IOError:
index_yaml_data = None
else:
try:
index_yaml_data = fh.read()
finally:
fh.close()
self.index_yaml_is_manual = (index_yaml_data is not None and
AUTO_MARKER not in index_yaml_data)
if self.index_yaml_is_manual:
logging.info('Detected manual index.yaml, will not update')
return
if index_yaml_data is None:
all_indexes = None
else:
try:
all_indexes = datastore_index.ParseIndexDefinitions(index_yaml_data)
except yaml_errors.EventListenerError, e:
logging.error('Error parsing %s:\n%s', index_yaml_file, e)
return
except Exception, err:
logging.error('Error parsing %s:\n%s.%s: %s', index_yaml_file,
err.__class__.__module__, err.__class__.__name__, err)
return
if index_yaml_data is None:
manual_part, automatic_part = 'indexes:\n', ''
manual_indexes = None
else:
manual_part, automatic_part = index_yaml_data.split(AUTO_MARKER, 1)
try:
manual_indexes = datastore_index.ParseIndexDefinitions(manual_part)
except Exception, err:
logging.error('Error parsing manual part of %s: %s',
index_yaml_file, err)
return
automatic_part = GenerateIndexFromHistory(query_history,
all_indexes, manual_indexes)
if index_yaml_mtime is None and automatic_part == '':
logging.debug('No need to update index.yaml')
return
try:
fh = openfile(index_yaml_file, 'w')
except IOError, err:
logging.error('Can\'t write index.yaml: %s', err)
return
try:
logging.info('Updating %s', index_yaml_file)
fh.write(manual_part)
fh.write(AUTO_MARKER)
fh.write(AUTO_COMMENT)
fh.write(automatic_part)
finally:
fh.close()
try:
self.index_yaml_mtime = os.path.getmtime(index_yaml_file)
except os.error, err:
logging.error('Can\'t stat index.yaml we just wrote: %s', err)
self.index_yaml_mtime = None
def SetupIndexes(app_id, root_path):
"""Ensure that the set of existing composite indexes matches index.yaml.
Note: this is similar to the algorithm used by the admin console for
the same purpose.
Args:
app_id: Application ID being served.
root_path: Path to the root of the application.
"""
index_yaml_file = os.path.join(root_path, 'index.yaml')
try:
fh = open(index_yaml_file, 'r')
except IOError:
index_yaml_data = None
else:
try:
index_yaml_data = fh.read()
finally:
fh.close()
indexes = []
if index_yaml_data is not None:
index_defs = datastore_index.ParseIndexDefinitions(index_yaml_data)
if index_defs is not None:
indexes = index_defs.indexes
if indexes is None:
indexes = []
requested_indexes = datastore_index.IndexDefinitionsToProtos(app_id, indexes)
existing_indexes = datastore_admin.GetIndices(app_id)
requested = dict((x.definition().Encode(), x) for x in requested_indexes)
existing = dict((x.definition().Encode(), x) for x in existing_indexes)
created = 0
for key, index in requested.iteritems():
if key not in existing:
datastore_admin.CreateIndex(index)
created += 1
deleted = 0
for key, index in existing.iteritems():
if key not in requested:
datastore_admin.DeleteIndex(index)
deleted += 1
if created or deleted:
logging.info("Created %d and deleted %d index(es); total %d",
created, deleted, len(requested))
| |
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, (type, types.ClassType))
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.func_code.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda info:
(-len(info[0]), info[0], info[1], info[2]),
imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if not file:
raise IOError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args varargs keywords')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return Arguments(args, varargs, varkw)
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, varkw = getargs(func.func_code)
return ArgSpec(args, varargs, varkw, func.func_defaults)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = strseq(arg, formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
else:
currentframe = lambda _=None: None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
| |
"""
sphinx.writers.text
~~~~~~~~~~~~~~~~~~~
Custom docutils writer for plain text.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import math
import os
import re
import textwrap
import warnings
from itertools import groupby, chain
from typing import Iterable, cast
from docutils import nodes, writers
from docutils.utils import column_width
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.locale import admonitionlabels, _
from sphinx.util.docutils import SphinxTranslator
if False:
# For type annotation
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union # NOQA
from sphinx.builders.text import TextBuilder # NOQA
class Cell:
"""Represents a cell in a table.
It can span on multiple columns or on multiple lines.
"""
def __init__(self, text="", rowspan=1, colspan=1):
self.text = text
self.wrapped = [] # type: List[str]
self.rowspan = rowspan
self.colspan = colspan
self.col = None
self.row = None
def __repr__(self):
return "<Cell {!r} {}v{}/{}>{}>".format(
self.text, self.row, self.rowspan, self.col, self.colspan
)
def __hash__(self):
return hash((self.col, self.row))
def wrap(self, width):
self.wrapped = my_wrap(self.text, width)
class Table:
"""Represents a table, handling cells that can span on multiple lines
or rows, like::
+-----------+-----+
| AAA | BBB |
+-----+-----+ |
| | XXX | |
| +-----+-----+
| DDD | CCC |
+-----+-----------+
This class can be used in two ways:
- Either with absolute positions: call ``table[line, col] = Cell(...)``,
this overwrite an existing cell if any.
- Either with relative positions: call the ``add_row()`` and
``add_cell(Cell(...))`` as needed.
Cell spanning on multiple rows or multiple columns (having a
colspan or rowspan greater than one) are automatically referenced
by all the table cells they covers. This is a usefull
representation as we can simply check ``if self[x, y] is self[x,
y+1]`` to recognize a rowspan.
Colwidth is not automatically computed, it has to be given, either
at construction time, either during the table construction.
Example usage::
table = Table([6, 6])
table.add_cell(Cell("foo"))
table.add_cell(Cell("bar"))
table.set_separator()
table.add_row()
table.add_cell(Cell("FOO"))
table.add_cell(Cell("BAR"))
print(table)
+--------+--------+
| foo | bar |
|========|========|
| FOO | BAR |
+--------+--------+
"""
def __init__(self, colwidth=None):
self.lines = [] # type: List[List[Cell]]
self.separator = 0
self.colwidth = (colwidth if colwidth is not None
else []) # type: List[int]
self.current_line = 0
self.current_col = 0
def add_row(self):
"""Add a row to the table, to use with ``add_cell()``. It is not needed
to call ``add_row()`` before the first ``add_cell()``.
"""
self.current_line += 1
self.current_col = 0
def set_separator(self):
"""Sets the separator below the current line.
"""
self.separator = len(self.lines)
def add_cell(self, cell):
"""Add a cell to the current line, to use with ``add_row()``. To add
a cell spanning on multiple lines or rows, simply set the
``cell.colspan`` or ``cell.rowspan`` BEFORE inserting it to
the table.
"""
while self[self.current_line, self.current_col]:
self.current_col += 1
self[self.current_line, self.current_col] = cell
self.current_col += cell.colspan
def __getitem__(self, pos):
line, col = pos
self._ensure_has_line(line + 1)
self._ensure_has_column(col + 1)
return self.lines[line][col]
def __setitem__(self, pos, cell):
line, col = pos
self._ensure_has_line(line + cell.rowspan)
self._ensure_has_column(col + cell.colspan)
for dline in range(cell.rowspan):
for dcol in range(cell.colspan):
self.lines[line + dline][col + dcol] = cell
cell.row = line
cell.col = col
def _ensure_has_line(self, line):
while len(self.lines) < line:
self.lines.append([])
def _ensure_has_column(self, col):
for line in self.lines:
while len(line) < col:
line.append(None)
def __repr__(self):
return "\n".join(repr(line) for line in self.lines)
def cell_width(self, cell, source):
"""Give the cell width, according to the given source (either
``self.colwidth`` or ``self.measured_widths``).
This take into account cells spanning on multiple columns.
"""
width = 0
for i in range(self[cell.row, cell.col].colspan):
width += source[cell.col + i]
return width + (cell.colspan - 1) * 3
@property
def cells(self):
seen = set() # type: Set[Cell]
for lineno, line in enumerate(self.lines):
for colno, cell in enumerate(line):
if cell and cell not in seen:
yield cell
seen.add(cell)
def rewrap(self):
"""Call ``cell.wrap()`` on all cells, and measure each column width
after wrapping (result written in ``self.measured_widths``).
"""
self.measured_widths = self.colwidth[:]
for cell in self.cells:
cell.wrap(width=self.cell_width(cell, self.colwidth))
if not cell.wrapped:
continue
width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)
for col in range(cell.col, cell.col + cell.colspan):
self.measured_widths[col] = max(self.measured_widths[col], width)
def physical_lines_for_line(self, line):
"""From a given line, compute the number of physical lines it spans
due to text wrapping.
"""
physical_lines = 1
for cell in line:
physical_lines = max(physical_lines, len(cell.wrapped))
return physical_lines
def __str__(self):
out = []
self.rewrap()
def writesep(char="-", lineno=None):
# type: (str, Optional[int]) -> str
"""Called on the line *before* lineno.
Called with no *lineno* for the last sep.
"""
out = [] # type: List[str]
for colno, width in enumerate(self.measured_widths):
if (
lineno is not None and
lineno > 0 and
self[lineno, colno] is self[lineno - 1, colno]
):
out.append(" " * (width + 2))
else:
out.append(char * (width + 2))
head = "+" if out[0][0] == "-" else "|"
tail = "+" if out[-1][0] == "-" else "|"
glue = [
"+" if left[0] == "-" or right[0] == "-" else "|"
for left, right in zip(out, out[1:])
]
glue.append(tail)
return head + "".join(chain(*zip(out, glue)))
for lineno, line in enumerate(self.lines):
if self.separator and lineno == self.separator:
out.append(writesep("=", lineno))
else:
out.append(writesep("-", lineno))
for physical_line in range(self.physical_lines_for_line(line)):
linestr = ["|"]
for colno, cell in enumerate(line):
if cell.col != colno:
continue
if lineno != cell.row:
physical_text = ""
elif physical_line >= len(cell.wrapped):
physical_text = ""
else:
physical_text = cell.wrapped[physical_line]
adjust_len = len(physical_text) - column_width(physical_text)
linestr.append(
" " +
physical_text.ljust(
self.cell_width(cell, self.measured_widths) + 1 + adjust_len
) + "|"
)
out.append("".join(linestr))
out.append(writesep("-"))
return "\n".join(out)
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
# type: (List[str]) -> List[str]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
lines = [] # type: List[str]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def _break_word(self, word, space_left):
# type: (str, int) -> Tuple[str, str]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[:i - 1], word[i - 1:]
return word, ''
def _split(self, text):
# type: (str) -> List[str]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
# type: (str) -> List[str]
return super(TextWrapper, self)._split(t)
chunks = [] # type: List[str]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split(''.join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
# type: (List[str], List[str], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
MAXWIDTH = 70
STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
# type: (str, int, Any) -> List[str]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
class TextWriter(writers.Writer):
supported = ('text',)
settings_spec = ('No options here.', '', ())
settings_defaults = {} # type: Dict
output = None
def __init__(self, builder):
# type: (TextBuilder) -> None
super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
visitor = self.builder.create_translator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = cast(TextTranslator, visitor).body
class TextTranslator(SphinxTranslator):
builder = None # type: TextBuilder
def __init__(self, document, builder):
# type: (nodes.document, TextBuilder) -> None
super().__init__(document, builder)
newlines = self.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
self.sectionchars = self.config.text_sectionchars
self.add_secnumbers = self.config.text_add_secnumbers
self.secnumber_suffix = self.config.text_secnumber_suffix
self.states = [[]] # type: List[List[Tuple[int, Union[str, List[str]]]]]
self.stateindent = [0]
self.list_counter = [] # type: List[int]
self.sectionlevel = 0
self.lineblocklevel = 0
self.table = None # type: Table
def add_text(self, text):
# type: (str) -> None
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
# type: (int) -> None
self.states.append([])
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
# type: (bool, List[str], str) -> None
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
result = [] # type: List[Tuple[int, List[str]]]
toformat = [] # type: List[str]
def do_format():
# type: () -> None
if not toformat:
return
if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH - maxindent)
else:
res = ''.join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item) # type: ignore
else:
do_format()
result.append((indent + itemindent, item)) # type: ignore
toformat = []
do_format()
if first is not None and result:
itemindent, item = result[0]
result_rest, result = result[1:], []
if item:
toformat = [first + ' '.join(item)]
do_format() # re-create `result` from `toformat`
_dummy, new_item = result[0]
result.insert(0, (itemindent - indent, [new_item[0]]))
result[1] = (itemindent, new_item[1:])
result.extend(result_rest)
self.states[-1].extend(result)
def visit_document(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_document(self, node):
# type: (nodes.Element) -> None
self.end_state()
self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0]
for line in lines)
# XXX header/footer?
def visit_section(self, node):
# type: (nodes.Element) -> None
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
# type: (nodes.Element) -> None
self.sectionlevel -= 1
def visit_topic(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_topic(self, node):
# type: (nodes.Element) -> None
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
# type: (nodes.Element) -> None
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
# type: (nodes.Element) -> None
pass
def depart_compound(self, node):
# type: (nodes.Element) -> None
pass
def visit_glossary(self, node):
# type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
# type: (nodes.Element) -> None
pass
def visit_title(self, node):
# type: (nodes.Element) -> None
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ': ')
raise nodes.SkipNode
self.new_state(0)
def get_section_number_string(self, node):
# type: (nodes.Element) -> str
if isinstance(node.parent, nodes.section):
anchorname = '#' + node.parent['ids'][0]
numbers = self.builder.secnumbers.get(anchorname)
if numbers is None:
numbers = self.builder.secnumbers.get('')
if numbers is not None:
return '.'.join(map(str, numbers)) + self.secnumber_suffix
return ''
def depart_title(self, node):
# type: (nodes.Element) -> None
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
text = ''
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) # type: ignore
if self.add_secnumbers:
text = self.get_section_number_string(node) + text
self.stateindent.pop()
title = ['', text, '%s' % (char * column_width(text)), '']
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
self.states[-1].append((0, title))
def visit_subtitle(self, node):
# type: (nodes.Element) -> None
pass
def depart_subtitle(self, node):
# type: (nodes.Element) -> None
pass
def visit_attribution(self, node):
# type: (nodes.Element) -> None
self.add_text('-- ')
def depart_attribution(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_signature(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_desc_signature(self, node):
# type: (nodes.Element) -> None
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
# type: (nodes.Element) -> None
self.add_text('\n')
def visit_desc_name(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_name(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_addname(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_addname(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
# type: (nodes.Element) -> None
self.add_text(' -> ')
def depart_desc_returns(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.add_text(')')
def visit_desc_parameter(self, node):
# type: (nodes.Element) -> None
if not self.first_param:
self.add_text(', ')
else:
self.first_param = 0
self.add_text(node.astext())
raise nodes.SkipNode
def visit_desc_optional(self, node):
# type: (nodes.Element) -> None
self.add_text('[')
def depart_desc_optional(self, node):
# type: (nodes.Element) -> None
self.add_text(']')
def visit_desc_annotation(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_annotation(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_content(self, node):
# type: (nodes.Element) -> None
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_figure(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_figure(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_caption(self, node):
# type: (nodes.Element) -> None
pass
def depart_caption(self, node):
# type: (nodes.Element) -> None
pass
def visit_productionlist(self, node):
# type: (nodes.Element) -> None
self.new_state()
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in productionlist:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
elif lastname is not None:
self.add_text('%s ' % (' ' * len(lastname)))
self.add_text(production.astext() + self.nl)
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_footnote(self, node):
# type: (nodes.Element) -> None
label = cast(nodes.label, node[0])
self._footnote = label.astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
# type: (nodes.Element) -> None
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
# type: (nodes.Element) -> None
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
self._citlabel = ''
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
# type: (nodes.Element) -> None
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_legend(self, node):
# type: (nodes.Element) -> None
pass
def depart_legend(self, node):
# type: (nodes.Element) -> None
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
# type: (nodes.Element) -> None
pass
def depart_option_list(self, node):
# type: (nodes.Element) -> None
pass
def visit_option_list_item(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_option_list_item(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_option_group(self, node):
# type: (nodes.Element) -> None
self._firstoption = True
def depart_option_group(self, node):
# type: (nodes.Element) -> None
self.add_text(' ')
def visit_option(self, node):
# type: (nodes.Element) -> None
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
# type: (nodes.Element) -> None
pass
def visit_option_string(self, node):
# type: (nodes.Element) -> None
pass
def depart_option_string(self, node):
# type: (nodes.Element) -> None
pass
def visit_option_argument(self, node):
# type: (nodes.Element) -> None
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
# type: (nodes.Element) -> None
pass
def visit_description(self, node):
# type: (nodes.Element) -> None
pass
def depart_description(self, node):
# type: (nodes.Element) -> None
pass
def visit_tabular_col_spec(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_colspec(self, node):
# type: (nodes.Element) -> None
self.table.colwidth.append(node["colwidth"])
raise nodes.SkipNode
def visit_tgroup(self, node):
# type: (nodes.Element) -> None
pass
def depart_tgroup(self, node):
# type: (nodes.Element) -> None
pass
def visit_thead(self, node):
# type: (nodes.Element) -> None
pass
def depart_thead(self, node):
# type: (nodes.Element) -> None
pass
def visit_tbody(self, node):
# type: (nodes.Element) -> None
self.table.set_separator()
def depart_tbody(self, node):
# type: (nodes.Element) -> None
pass
def visit_row(self, node):
# type: (nodes.Element) -> None
if self.table.lines:
self.table.add_row()
def depart_row(self, node):
# type: (nodes.Element) -> None
pass
def visit_entry(self, node):
# type: (nodes.Element) -> None
self.entry = Cell(
rowspan=node.get("morerows", 0) + 1, colspan=node.get("morecols", 0) + 1
)
self.new_state(0)
def depart_entry(self, node):
# type: (nodes.Element) -> None
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
self.entry.text = text
self.table.add_cell(self.entry)
self.entry = None
def visit_table(self, node):
# type: (nodes.Element) -> None
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = Table()
def depart_table(self, node):
# type: (nodes.Element) -> None
self.add_text(str(self.table))
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
# type: (nodes.Element) -> None
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.new_state(0)
self.add_text(', '.join(n.astext() for n in list_items) + '.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
# type: (nodes.Element) -> None
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
# type: (nodes.Element) -> None
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
self.end_state()
raise nodes.SkipNode
def visit_bullet_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.append(-1)
def depart_bullet_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.pop()
def visit_enumerated_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.pop()
def visit_definition_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.append(-2)
def depart_definition_list(self, node):
# type: (nodes.Element) -> None
self.list_counter.pop()
def visit_list_item(self, node):
# type: (nodes.Element) -> None
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
elif self.list_counter[-1] == -2:
# definition list
pass
else:
# enumerated list
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
# type: (nodes.Element) -> None
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
# type: (nodes.Element) -> None
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
# type: (nodes.Element) -> None
pass
def visit_term(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_term(self, node):
# type: (nodes.Element) -> None
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_classifier(self, node):
# type: (nodes.Element) -> None
self.add_text(' : ')
def depart_classifier(self, node):
# type: (nodes.Element) -> None
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_definition(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_field_list(self, node):
# type: (nodes.Element) -> None
pass
def depart_field_list(self, node):
# type: (nodes.Element) -> None
pass
def visit_field(self, node):
# type: (nodes.Element) -> None
pass
def depart_field(self, node):
# type: (nodes.Element) -> None
pass
def visit_field_name(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_field_name(self, node):
# type: (nodes.Element) -> None
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_field_body(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_centered(self, node):
# type: (nodes.Element) -> None
pass
def depart_centered(self, node):
# type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
# type: (nodes.Element) -> None
pass
def depart_hlist(self, node):
# type: (nodes.Element) -> None
pass
def visit_hlistcol(self, node):
# type: (nodes.Element) -> None
pass
def depart_hlistcol(self, node):
# type: (nodes.Element) -> None
pass
def visit_admonition(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_admonition(self, node):
# type: (nodes.Element) -> None
self.end_state()
def _visit_admonition(self, node):
# type: (nodes.Element) -> None
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
def _depart_admonition(self, node):
# type: (nodes.Element) -> None
label = admonitionlabels[node.tagname]
self.end_state(first=label + ': ')
visit_attention = _visit_admonition
depart_attention = _depart_admonition
visit_caution = _visit_admonition
depart_caution = _depart_admonition
visit_danger = _visit_admonition
depart_danger = _depart_admonition
visit_error = _visit_admonition
depart_error = _depart_admonition
visit_hint = _visit_admonition
depart_hint = _depart_admonition
visit_important = _visit_admonition
depart_important = _depart_admonition
visit_note = _visit_admonition
depart_note = _depart_admonition
visit_tip = _visit_admonition
depart_tip = _depart_admonition
visit_warning = _visit_admonition
depart_warning = _depart_admonition
visit_seealso = _visit_admonition
depart_seealso = _depart_admonition
def visit_versionmodified(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_versionmodified(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_literal_block(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_literal_block(self, node):
# type: (nodes.Element) -> None
self.end_state(wrap=False)
def visit_doctest_block(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
def depart_doctest_block(self, node):
# type: (nodes.Element) -> None
self.end_state(wrap=False)
def visit_line_block(self, node):
# type: (nodes.Element) -> None
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
# type: (nodes.Element) -> None
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
# type: (nodes.Element) -> None
pass
def depart_line(self, node):
# type: (nodes.Element) -> None
self.add_text('\n')
def visit_block_quote(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_block_quote(self, node):
# type: (nodes.Element) -> None
self.end_state()
def visit_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def visit_paragraph(self, node):
# type: (nodes.Element) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
# type: (nodes.Element) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_index(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_toctree(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_pending_xref(self, node):
# type: (nodes.Element) -> None
pass
def depart_pending_xref(self, node):
# type: (nodes.Element) -> None
pass
def visit_reference(self, node):
# type: (nodes.Element) -> None
if self.add_secnumbers:
numbers = node.get("secnumber")
if numbers is not None:
self.add_text('.'.join(map(str, numbers)) + self.secnumber_suffix)
def depart_reference(self, node):
# type: (nodes.Element) -> None
pass
def visit_number_reference(self, node):
# type: (nodes.Element) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
# type: (nodes.Element) -> None
pass
def depart_download_reference(self, node):
# type: (nodes.Element) -> None
pass
def visit_emphasis(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def depart_emphasis(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def visit_literal_emphasis(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def depart_literal_emphasis(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def visit_strong(self, node):
# type: (nodes.Element) -> None
self.add_text('**')
def depart_strong(self, node):
# type: (nodes.Element) -> None
self.add_text('**')
def visit_literal_strong(self, node):
# type: (nodes.Element) -> None
self.add_text('**')
def depart_literal_strong(self, node):
# type: (nodes.Element) -> None
self.add_text('**')
def visit_abbreviation(self, node):
# type: (nodes.Element) -> None
self.add_text('')
def depart_abbreviation(self, node):
# type: (nodes.Element) -> None
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_manpage(self, node):
# type: (nodes.Element) -> None
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
# type: (nodes.Element) -> None
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def depart_title_reference(self, node):
# type: (nodes.Element) -> None
self.add_text('*')
def visit_literal(self, node):
# type: (nodes.Element) -> None
self.add_text('"')
def depart_literal(self, node):
# type: (nodes.Element) -> None
self.add_text('"')
def visit_subscript(self, node):
# type: (nodes.Element) -> None
self.add_text('_')
def depart_subscript(self, node):
# type: (nodes.Element) -> None
pass
def visit_superscript(self, node):
# type: (nodes.Element) -> None
self.add_text('^')
def depart_superscript(self, node):
# type: (nodes.Element) -> None
pass
def visit_footnote_reference(self, node):
# type: (nodes.Element) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
# type: (nodes.Element) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
# type: (nodes.Text) -> None
self.add_text(node.astext())
def depart_Text(self, node):
# type: (nodes.Text) -> None
pass
def visit_generated(self, node):
# type: (nodes.Element) -> None
pass
def depart_generated(self, node):
# type: (nodes.Element) -> None
pass
def visit_inline(self, node):
# type: (nodes.Element) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
# type: (nodes.Element) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
# type: (nodes.Element) -> None
pass
def depart_container(self, node):
# type: (nodes.Element) -> None
pass
def visit_problematic(self, node):
# type: (nodes.Element) -> None
self.add_text('>>')
def depart_problematic(self, node):
# type: (nodes.Element) -> None
self.add_text('<<')
def visit_system_message(self, node):
# type: (nodes.Element) -> None
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_meta(self, node):
# type: (nodes.Element) -> None
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
# type: (nodes.Element) -> None
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
self.end_state(wrap = False)
raise nodes.SkipNode
def visit_math(self, node):
# type: (nodes.Element) -> None
pass
def depart_math(self, node):
# type: (nodes.Element) -> None
pass
def visit_math_block(self, node):
# type: (nodes.Element) -> None
self.new_state()
def depart_math_block(self, node):
# type: (nodes.Element) -> None
self.end_state()
def unknown_visit(self, node):
# type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
def _make_depart_admonition(name): # type: ignore
# type: (str) -> Callable[[TextTranslator, nodes.Element], None]
warnings.warn('TextTranslator._make_depart_admonition() is deprecated.',
RemovedInSphinx30Warning)
def depart_admonition(self, node):
# type: (nodes.Element) -> None
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("apps", "0001_initial"),
("organizations", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Challenge'
db.create_table(u'challenges_challenge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, unique=True, populate_from='name', overwrite=False)),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('start_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('end_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('is_external', self.gf('django.db.models.fields.BooleanField')(default=False)),
('summary', self.gf('django.db.models.fields.TextField')()),
('description', self.gf('django.db.models.fields.TextField')()),
('event_dates', self.gf('django.db.models.fields.TextField')(blank=True)),
('acknowledgments', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organizations.Organization'], null=True, blank=True)),
('hide_entries', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'challenges', ['Challenge'])
# Adding model 'Question'
db.create_table(u'challenges_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('challenge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenges.Challenge'])),
('question', self.gf('django.db.models.fields.TextField')()),
('is_required', self.gf('django.db.models.fields.BooleanField')(default=True)),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'challenges', ['Question'])
# Adding model 'Entry'
db.create_table(u'challenges_entry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('challenge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenges.Challenge'])),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['apps.Application'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'challenges', ['Entry'])
# Adding unique constraint on 'Entry', fields ['challenge', 'application']
db.create_unique(u'challenges_entry', ['challenge_id', 'application_id'])
# Adding model 'EntryAnswer'
db.create_table(u'challenges_entryanswer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entry', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenges.Entry'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenges.Question'])),
('answer', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'challenges', ['EntryAnswer'])
# Adding unique constraint on 'EntryAnswer', fields ['entry', 'question']
db.create_unique(u'challenges_entryanswer', ['entry_id', 'question_id'])
def backwards(self, orm):
# Removing unique constraint on 'EntryAnswer', fields ['entry', 'question']
db.delete_unique(u'challenges_entryanswer', ['entry_id', 'question_id'])
# Removing unique constraint on 'Entry', fields ['challenge', 'application']
db.delete_unique(u'challenges_entry', ['challenge_id', 'application_id'])
# Deleting model 'Challenge'
db.delete_table(u'challenges_challenge')
# Deleting model 'Question'
db.delete_table(u'challenges_question')
# Deleting model 'Entry'
db.delete_table(u'challenges_entry')
# Deleting model 'EntryAnswer'
db.delete_table(u'challenges_entryanswer')
models = {
u'apps.application': {
'Meta': {'ordering': "('-is_featured', 'created')", 'object_name': 'Application'},
'acknowledgments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'assistance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Domain']", 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
'features_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'impact_statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'membership_set'", 'symmetrical': 'False', 'through': u"orm['apps.ApplicationMembership']", 'to': u"orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ownership_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'slug': ('us_ignite.common.fields.AutoUUIDField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'apps.applicationmembership': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApplicationMembership'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'apps.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'apps.feature': {
'Meta': {'object_name': 'Feature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'challenges.challenge': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Challenge'},
'acknowledgments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'event_dates': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hide_entries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']", 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'challenges.entry': {
'Meta': {'unique_together': "(('challenge', 'application'),)", 'object_name': 'Entry'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['challenges.Challenge']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'challenges.entryanswer': {
'Meta': {'ordering': "('question__order',)", 'unique_together': "(('entry', 'question'),)", 'object_name': 'EntryAnswer'},
'answer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['challenges.Entry']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['challenges.Question']"})
},
u'challenges.question': {
'Meta': {'ordering': "('order',)", 'object_name': 'Question'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['challenges.Challenge']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.TextField', [], {})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'organizations.organization': {
'Meta': {'object_name': 'Organization'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'interest_ignite': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Interest']", 'symmetrical': 'False', 'blank': 'True'}),
'interests_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['organizations.OrganizationMember']", 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'resources_available': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'organizations.organizationmember': {
'Meta': {'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'profiles.interest': {
'Meta': {'ordering': "('name',)", 'object_name': 'Interest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
}
}
complete_apps = ['challenges']
| |
import PlayerClass
import ActionMethods
import Log
import Items
import copy
import random
class Sym:
# logName - string
def __init__(self,logName):
self.listOfTeams = [] # Team - a list of players with some additional stuff.
self.dayDuration = 12 # Amount of hours in a day
self.hourlyCalorieCost = 50 # calorie cost per hour - no matter the action
self.theLog = Log.LogObject(logName) # log where stuff is going to be written. For now only for testing purposes.
# runs symulation for one day
def runForADay(self):
for t in self.listOfTeams:
t.resetPlayersTime()
stringToWrite = "---Status:\n"
for team in self.listOfTeams:
for p in team.playerList:
stringToWrite+=p.getStringStatus(1)+ "\tinventory\n"+ p.getStringItemList(2)
self.theLog.writeToLog(stringToWrite)
allFinished = False
while not allFinished:
random.shuffle(self.listOfTeams) # this shuffle is so that one team doesn't always go first
result = self.findEarlyiestPlayer()
# To implement - teams make actions as a group. Sometimes only part of the team does the action.
if result[0] is None:
allFinished = self.areAllFinished()
break
playersPerformingAction = result[1].listOfPlayersWithGivenTime(result[0].simTime)
resultOfDecision = self.makeGroupDecisionNoCathegories(playersPerformingAction)
action = resultOfDecision[0]
target = resultOfDecision[1]
ideaOriginators = resultOfDecision[2]
resultOfRebelDetection = self.detectRebelsAndRemoveThemFromParticipants(playersPerformingAction,ideaOriginators)
listOfRebels = resultOfRebelDetection[0]
playersPerformingAction = resultOfRebelDetection[1]
# now we have to implement bumps. For now takeCareOfBumo doesn't work, since I need to implement relationships first
bumpDidntFailAction = self.takeCareOfBump(playersPerformingAction,target)
if bumpDidntFailAction:
actionResult = action.applyAction(playersPerformingAction,target)
else:
actionResult = action.failAction(playersPerformingAction,target)
# This listOfPerformers is only for log to look nice. Players that are target and performer are removed from performers lists by action methods anyway to be extra safe.
if not target is None:
listOfPerformers = [x for x in playersPerformingAction if x not in target]
else:
listOfPerformers = playersPerformingAction
self.writeActionInformationToLog(action,actionResult,listOfPerformers,target,listOfRebels,ideaOriginators)
for p in playersPerformingAction:
p.removeCalories(self.hourlyCalorieCost*action.time)
p.simTime+=action.time
p.passTimeOnAfflictions(action.time)
afflictionChanges = p.checkForAfflictionsToPutAndRemove()
#print(afflictionChanges)
self.writeAfflictionInformationToLog(p,afflictionChanges)
stringToWrite = p.getStringStatus(1) + "\tinventory\n"+ p.getStringItemList(2) + "\ttime passed so far:{}\n".format(p.simTime)
self.theLog.writeToLog(stringToWrite) # Writes player status to the log
isDead = p.isDead()
if isDead[0]:
result[1].removePlayer(p.id)
stringToWrite=p.Name+" "+isDead[1]+'\n'
self.theLog.writeToLog(stringToWrite)
allFinished = self.areAllFinished()
# finds a player that has the least of time passed
# returns None if everyone is above dayDuration time
def findEarlyiestPlayer(self):
playerToReturn = None
teamToReturn = None
playerTime = self.dayDuration
for team in self.listOfTeams:
for player in team.playerList:
if player.simTime<playerTime:
playerTime = player.simTime
playerToReturn = player
teamToReturn = team
return playerToReturn,teamToReturn # player with the least time passed - and his team
# makes decision of one player
# player - PlayerClass.Player
# group - list of players, where decision making takes place. Player should be inside it.
def makeDecisionNoCathegories(self,player,group):
group.remove(player)
group.insert(0,player)
actionToReturn = None # action to perform
actionPlayers = None # performers of the action
actionTarget = None # target of the action
rebelReason = player.isRebel()
if rebelReason == PlayerClass.Player.rebelReasonDict["hunger"]:
result = ActionMethods.EatFoodCheck([player])
if result:
actionToReturn = PlayerClass.getActionByName("eat food")
actionPlayers = [player]
elif ActionMethods.AskForFoodCheck(group,[player]):
actionToReturn = PlayerClass.getActionByName("ask for food")
actionPlayers = group
actionTarget = [player]
else:
actionToReturn = PlayerClass.getActionByName("gather food")
actionPlayers = group
else:
# random action
actionToReturn = PlayerClass.getPurelyRandomAction()
actionPlayers = group
if actionToReturn.name == "ask for food":
actionTarget = [player]
return actionToReturn, actionPlayers ,actionTarget
# makes decision of group of players.
# listOfPlayers - list with Player as elements
def makeGroupDecisionNoCathegories(self,listOfPlayers):
actionsAndPersuasionList = []
action = None
actionTarget = None
for p in listOfPlayers:
result = self.makeDecisionNoCathegories(p,listOfPlayers) # [0] - action, [1] - players performing action,[2] - players to perform action on
action = result[0]
actionPlayers = result[1]
actionTarget = result[2]
persuasion = p.Charisma + 0.4 * p.Intelligence + 0.1*random.randint(0,20)
appendIsNeeded = True
for AaP in actionsAndPersuasionList:
if AaP[0].name == action.name:
appendIsNeeded = False
AaP[1]+=persuasion
AaP[3].append(p)
break
if appendIsNeeded:
actionsAndPersuasionList.append([action,persuasion,actionTarget,[p]])
# find action with the biggest persuasion
action = None
actionTarget = None
persuasion = -1
for AaP in actionsAndPersuasionList:
if AaP[1]> persuasion:
persuasion = AaP[1]
action = AaP[0]
actionTarget = AaP[2]
originators =AaP[3]
return action,actionTarget,originators
#detects if all players in all teams have finished actions for today
def areAllFinished(self):
for team in self.listOfTeams:
if not team.checkAllPlayersFinished(self.dayDuration):
return False
return True
# Writes information about performed action into the log
def writeActionInformationToLog(self,action,actionResult,playerList,targetList = None,listOfRebels = None,originators = None ):
if actionResult:
textToPrint = "success"
else:
textToPrint = "failure"
playerNames = ""
for p in playerList:
playerNames+=p.Name+', '
playerNames = playerNames[:-2]
targetNames =""
if (not targetList is None):
if targetList:
if not targetList[0] is None:
targetNames = " targets: "
for t in targetList:
targetNames+=t.Name+', '
targetNames = targetNames[:-2]
rebelString = ""
if (not listOfRebels is None):
if listOfRebels:
rebelString = "\n== People who rebelled: "
for r in listOfRebels:
# my god I goofed out. Dictionary in python is not meant to be used backwards
# and so I ended up with this monstrosity. Oh well.
tmpString = list(PlayerClass.Player.rebelReasonDict.keys())[list(PlayerClass.Player.rebelReasonDict.values()).index(r["cause"])]
rebelString+="{0} for {1}, ".format(r["rebel"].Name,tmpString)
rebelString = rebelString[:-2]
originatorsText = ""
if (not originators is None):
if originators:
originatorsText = " originators:"
for o in originators:
originatorsText+=o.Name+', '
originatorsText = originatorsText[:-2]
stringToWriteToLog = "\n==== Action Performed: {0}, result:{1}\n== Action performed by:{2}".format(action.name,textToPrint,playerNames)
stringToWriteToLog+=targetNames+originatorsText+rebelString+'\n'
self.theLog.writeToLog(stringToWriteToLog)
# Writes information about gained and lost afflictions
def writeAfflictionInformationToLog(self,player,afflictionChanges):
afflictionStringToWrite = ""
if afflictionChanges[0]: # added afflictions
if len(afflictionChanges[0]) == 1:
afflictionStringToWrite+="\t{} gained affliction: ".format(player.Name)
else:
afflictionStringToWrite+="\t{} gained afflictions: ".format(player.Name)
for a in afflictionChanges[0]:
afflictionStringToWrite+="{}, ".format(a.name)
afflictionStringToWrite = afflictionStringToWrite[:-2]
afflictionStringToWrite+='\n'
if afflictionChanges[1]: # removed afflictions
if len(afflictionChanges[1]) == 1:
afflictionStringToWrite+="\t{} lost affliction: ".format(player.Name)
else:
afflictionStringToWrite+="\t{} lost afflictions: ".format(player.Name)
for a in afflictionChanges[1]:
afflictionStringToWrite+="{}, ".format(a.name)
afflictionStringToWrite = afflictionStringToWrite[:-2]
afflictionStringToWrite+='\n'
if not afflictionStringToWrite == "":
self.theLog.writeToLog(afflictionStringToWrite)
# gives all players an unique id - becuase things go bad if they are not unique.
def giveAllPlayersUniqueIDs(self):
listOfUsedIDs = []
id = 0
for team in self.listOfTeams:
for player in team.playerList:
while id in listOfUsedIDs:
id = random.randint(1,100000)
player.id = id
listOfUsedIDs.append(id)
# of course, targets have no say in if they want to be a part of an action or not.
# returns a list of rebels
def detectRebelsAndRemoveThemFromParticipants(self, listOfPlayers, ideaOriginators):
listOfRebels = []
listOfParticipants = []
rebelReason = None
for player in listOfPlayers:
rebelReason = player.isRebel()
if rebelReason != PlayerClass.Player.rebelReasonDict["no rebel"] and not player in ideaOriginators:
listOfRebels.append({"rebel":player,"cause":copy.deepcopy(rebelReason)})
else:
listOfParticipants.append(player)
return listOfRebels,listOfParticipants
# Well. This method takes care of bumps
# If there was no bump, or bump didn't interrupted current action
def takeCareOfBump(self, listOfPlayers,target):
listOfBumpablePlayers = []
return True
class Team:
# listOfPlayers - list with players as elements
def __init__(self,listOfPlayers):
self.playerList = listOfPlayers
self.allPlayersFinished = False
# returns a player from list. None, if player is not in team.
# name - either string or int
def getPlayer(self,name):
if type(name) is int:
for p in self.playerList:
if p.id == name:
return p
elif type(name) is str:
for p in self.playerList:
if p.Name == name:
return p
else:
raise Exception("Wrong parameter type")
return None
# checks if all players have finished the day.
# dayTimeLimit - int
def checkAllPlayersFinished(self,dayTimeLimit):
for p in self.playerList:
if p.simTime<dayTimeLimit:
self.allPlayersFinished = False
self.allPlayersFinished = True
# sets all players time in the team to zero
def resetPlayersTime(self):
for p in self.playerList:
p.simTime = 0
# checks, if all players have the same time.
# players with different times can't perform an action together.
# becasue they are in different points in time
def areAllPlayersTheSameTime(self):
time = self.playerList[0].simTime
for p in self.playerList:
if not p.simTime == time:
return False
return True
# Returns list of players with the same time
def listOfPlayersWithGivenTime(self,time):
listToReturn = []
for p in self.playerList:
if p.simTime == time:
listToReturn.append(p)
return listToReturn
# removes player from team. Returns true if player was removed.
# name - str or int
def removePlayer(self,name):
for p in self.playerList:
if p.Name == name or p.id == name:
self.playerList.remove(p)
return True
return False
# --- test ---
pl = PlayerClass.Player(0)
pl.BaseWisdom = 5
pl.Name = "Jan Kowalski"
pl.calculateStartStatistics()
pl2 = PlayerClass.Player(0)
pl2.BaseWisdom = 0
pl2.Name = "Adam Nowak"
pl2.calculateStartStatistics()
team = Team([pl,pl2])
theLog = Log.LogObject('log1')
s = Sym('log1')
s.listOfTeams.append(team)
s.giveAllPlayersUniqueIDs()
for x in range(1,100):
theLog.writeToLog("\n~~ DAY {}~~\n\n".format(x))
s.runForADay()
| |
#!/etc/bin/env python
__author__ = 'nash.xiejun'
import sys
sys.path.append('/usr/bin/install_tool')
import os
import traceback
from oslo.config import cfg
from constants import FileName
import socket
import shutil
import errno
import log
import utils
from utils import CommonCMD, SSHConnection
from services import RefServices, RefCPSService, RefCPSServiceExtent, RefFsUtils, RefFsSystemUtils, CPSServiceBusiness
from constants import CfgFilePath, SysUserInfo, SysPath, ScriptFilePath
from dispatch import DispatchPatchTool
module_logger = log
print_logger = log
CONF = cfg.CONF
global_opts = [
cfg.StrOpt('file_hosts',
default='/etc/hosts'),
cfg.StrOpt('file_hostname',
default='/etc/hostname'),
cfg.StrOpt('self_config_file', default= os.path.sep.join([os.path.split(os.path.realpath(__file__))[0],
FileName.PATCHES_TOOL_CONFIG_FILE])),
]
CONF.register_opts(global_opts)
default_group = cfg.OptGroup(name='DEFAULT',
title='default config')
default_opts = [
cfg.DictOpt('proxy_match_host', default=None),
cfg.DictOpt('proxy_match_region', default=None),
cfg.StrOpt('current_node', default='proxy001'),
cfg.DictOpt('cascaded_add_route', default=None),
cfg.DictOpt('cascaded_add_table_external_api', default=None),
cfg.StrOpt('cascading_region', default='cascading.hybrid.huawei.com'),
cfg.StrOpt('openstack_bak_path', default='/home/openstack_bak')
]
CONF.register_group(default_group)
CONF.register_opts(default_opts, default_group)
env_group = cfg.OptGroup(name='ENV',
title='environment for openstack')
env_opts = [
cfg.StrOpt('OS_AUTH_URL', default="https://identity.cascading.hybrid.huawei.com:443/identity/v2.0"),
cfg.StrOpt('OS_USERNAME', default="cloud_admin"),
cfg.StrOpt('OS_TENANT_NAME', default="admin"),
cfg.StrOpt('NOVA_ENDPOINT_TYPE', default="publicURL"),
cfg.StrOpt('CINDER_ENDPOINT_TYPE', default="publicURL"),
cfg.StrOpt('OS_ENDPOINT_TYPE', default="publicURL"),
cfg.StrOpt('OS_VOLUME_API_VERSION', default="2"),
cfg.StrOpt('OS_PASSWORD', default=""),
]
CONF.register_group(env_group)
CONF.register_opts(env_opts, env_group)
absolute_config_file = os.path.join(utils.get_patches_tool_path(), FileName.PATCHES_TOOL_CONFIG_FILE)
CONF(['--config-file=%s' % absolute_config_file])
class ConfigCascading(object):
def __init__(self):
self.proxy_match_host = CONF.DEFAULT.proxy_match_host
self.proxy_match_region = CONF.DEFAULT.proxy_match_region
self.proxies = self.proxy_match_host.keys()
self.current_proxy = CONF.DEFAULT.current_node
# cascading.hybrid.huawei.com
self.cascading_region = RefCPSService.get_local_domain()
log.info('cascading region is: %s' % self.cascading_region)
local_dc, local_az = RefFsUtils.get_local_dc_az()
# cascading.hybrid
self.cascading_os_region_name = '.'.join([local_az, local_dc])
def check_service_status(self):
print('****Start to check service status...')
cps_service = CPSServiceBusiness()
for proxy in self.proxies:
cps_service.check_all_service_template_status(proxy)
print('****End to check service status.')
def restart_services(self):
print('****Start to restart services...')
cps_service = CPSServiceBusiness()
for proxy in self.proxies:
cps_service.stop_all(proxy)
for proxy in self.proxies:
cps_service.start_all(proxy)
print('****Finish to restart services.')
def add_role_for_proxies(self):
for proxy_number in self.proxies:
print('**** Start to add role for Proxy:<<%s>> ****' % proxy_number)
log.info('**** Start to add role for Proxy:<<%s>> ****' % proxy_number)
role_nova_proxy = self._get_nova_role_name(proxy_number)
role_neutron_proxy = self._get_neutron_role_name(proxy_number)
role_cinder_proxy = self._get_cinder_role_name(proxy_number)
host_proxy_in = self.proxy_match_host[proxy_number]
self._add_proxy_role(host_proxy_in, role_nova_proxy)
# self._check_service_proxy_status('nova', self._get_nova_template_name(proxy_number), 'fault')
self._add_proxy_role(host_proxy_in, role_neutron_proxy)
# self._check_service_proxy_status('neutron', self._get_neutron_l2_template_name(proxy_number), 'fault')
self._add_proxy_role(host_proxy_in, role_cinder_proxy)
# self._check_service_proxy_status('cinder', self._get_cinder_template_name(proxy_number), 'fault')
log.info('**** End to add role for Proxy:<<%s>> ****' % proxy_number)
print('**** End to add role for Proxy:<<%s>> ****' % proxy_number)
def _add_proxy_role(self, host, role_name):
"""
Commands used to add role for host:
cps role-host-add --host ** nova-proxy001
cps commit
Commands used to check if add successful:
cps template-instance-list --service nova nova-proxy001
If get proxy info, then it is add successfully, no mater the status of proxy is fault.
:param role_name:
:return:
"""
log.info('Start to add proxy role in host: %s, for role: %s' % (host, role_name))
add_result = RefCPSService.role_host_add(role_name, [host])
RefCPSService.cps_commit()
log.info('Finish to add proxy role in host: %s, for role: %s' % (host, role_name))
def _get_nova_role_name(self, proxy_number):
service_nova = 'compute'
return '-'.join([service_nova, proxy_number])
def _get_neutron_role_name(self, proxy_number):
return '-'.join(['network', proxy_number])
def _get_cinder_role_name(self, proxy_number):
return '-'.join(['blockstorage', proxy_number])
def _get_nova_template_name(self, proxy_name):
return '-'.join(['nova', proxy_name])
def _get_neutron_l2_template_name(self, proxy_name):
return '-'.join(['neutron', 'l2', proxy_name])
def _get_neutron_l3_template_name(self, proxy_name):
return '-'.join(['neutron', 'l3', proxy_name])
def _get_cinder_template_name(self, proxy_name):
return '-'.join(['cinder', proxy_name])
def config_nova_scheduler(self):
updated_params = {'scheduler_default_filters':'AvailabilityZoneFilter'
}
service = 'nova'
template = 'nova-scheduler'
self._update_template_params_for_proxy(service, template, updated_params)
self._commit_config()
def config_neutron_server(self):
updated_params = {'lbaas_vip_create_port':'True'
}
service = 'neutron'
template = 'neutron-server'
self._update_template_params_for_proxy(service, template, updated_params)
self._commit_config()
def config_proxy_to_connect_with_cascaded(self):
for proxy in self.proxies:
log.info('Start to config cascading connection for proxy: %s' % proxy)
print('Start to config cascading connection for proxy: %s' % proxy)
self._config_service_for_nova_proxy(proxy)
self._config_for_neutron_l2_proxy(proxy)
self._config_for_neutron_l3_proxy(proxy)
self._config_cinder_proxy(proxy)
print('Finish to config cascading connection for proxy: %s' % proxy)
log.info('Finish to config cascading connection for proxy: %s' % proxy)
def config_big_l2_layer_in_proxy_node(self):
self._config_big_l2_layer_in_proxy(self.current_proxy)
host_list = RefCPSService.host_list()
if host_list is not None:
self._replace_neutron_l2_proxy_json(host_list)
def config_big_l2_layer_in_cascaded_node(self):
self._config_big_l2_layer_in_cascaded_node()
self._restart_neutron_openvswitch_agent()
def create_aggregate_in_cascading_node(self):
"""
nova aggregate-create az31.singapore--aws az31.singapore--aws
nova aggregate-add-host az31.singapore--aws az31.singapore--aws
Check status of proxy in az31:
nova service-list | grep az31
:return:
"""
pass
def create_aggregate_in_cascaded_node(self):
"""
nova aggregate-create az31.singapore--aws az31.singapore--aws
nova host-list
nova aggregate-add-host az31.singapore--aws 42114FD9-D446-9248-3A05-23CF474E3C68
:return:
"""
ref_service = RefServices()
host_id = socket.gethostname()
region = RefCPSService.get_local_domain()
os_region_name = '.'.join([RefFsSystemUtils.get_az_by_domain(region), RefFsSystemUtils.get_dc_by_domain(region)])
if not ref_service.nova_aggregate_exist(os_region_name, os_region_name):
create_result = ref_service.nova_aggregate_create(os_region_name, os_region_name)
if create_result is not None:
ref_service.nova_aggregate_add_host(create_result, host_id)
print('Success to create region<%s> for host<%s>' % (os_region_name, host_id))
def create_route_table_in_cascaded_node(self):
"""
ip route add 172.29.0.0/24 via 162.3.120.247
ip route add 172.29.1.0/24 via 172.28.48.1
ip route add table external_api 172.29.0.0/24 via 162.3.120.247
:return:
"""
cascaded_add_route = CONF.DEFAULT.cascaded_add_route
cascaded_add_router_table_external_api = CONF.DEFAULT.cascaded_add_table_external_api
for net, ip in cascaded_add_route.items():
CommonCMD.create_route(net, ip)
for net, ip in cascaded_add_router_table_external_api.items():
table = 'external_api'
CommonCMD.create_route_for_table(table, net, ip)
def _config_big_l2_layer_in_proxy(self, proxy_name):
print('Start to config big l2 layer.')
self._update_neutron_machanism_drivers_for_cascading()
for proxy in self.proxies:
self._enable_for_l2_remote_port(proxy)
self._commit_config()
print('End to config big l2 layer.')
def _update_neutron_machanism_drivers_for_cascading(self):
updated_params = {
'mechanism_drivers': 'openvswitch,l2populationcascading,basecascading,evs,sriovnicswitch,netmapnicswitch'
}
service = 'neutron'
template = 'neutron-server'
self._update_template_params_for_proxy(service, template,updated_params)
def _enable_for_l2_remote_port(self, proxy_name):
service='neutron'
# e.g. 'neutron-l2-proxy003'
template = '-'.join(['neutron-l2', proxy_name])
updated_params = {
'remote_port_enabled': 'True'
}
self._update_template_params_for_proxy(service, template,updated_params)
def _config_big_l2_layer_in_cascaded_node(self):
updated_params = {
'mechanism_drivers': 'openvswitch,l2populationcascaded,evs,sriovnicswitch,netmapnicswitch'
}
service = 'neutron'
template = 'neutron-server'
self._update_template_params_for_proxy(service, template,updated_params)
self._commit_config()
def _restart_neutron_openvswitch_agent(self):
"""
cps host-template-instance-operate --action STOP --service neutron neutron-openvswitch-agent
cps host-template-instance-operate --action START --service neutron neutron-openvswitch-agent
:return:
"""
self._stop_neutron_openvswitch_agent()
self._start_neutron_openvswitch_agent()
def _stop_neutron_openvswitch_agent(self):
service = 'neutron'
template = 'neutron-openvswitch-agent'
action_stop = 'STOP'
RefCPSServiceExtent.host_template_instance_operate(service, template, action_stop)
def _start_neutron_openvswitch_agent(self):
service = 'neutron'
template = 'neutron-openvswitch-agent'
action_stop = 'START'
RefCPSServiceExtent.host_template_instance_operate(service, template, action_stop)
def _replace_neutron_l2_proxy_json(self, host_info):
"""
TODO: to get host ip of proxies, and scp config file of json to these proxies.
:return:
"""
log.info('Start to replace neutron l2 proxy json in proxy nodes..')
print('Start to replace neutron l2 proxy json..')
for proxy in self.proxies:
neutron_network_role = self._get_neutron_role_name(proxy)
for host in host_info['hosts']:
roles_list = host['roles']
local_path_of_neutron_l2_proxy = os.path.join(utils.get_patches_tool_path(), CfgFilePath.NEUTRON_L2_PROXY_PATH_TEMPLATE)
if neutron_network_role in roles_list:
proxy_host_ip = host['manageip']
log.info('Start remote copy neutron l2 proxy json to host: %s' % proxy_host_ip)
try:
utils.remote_open_root_permit_for_host(proxy_host_ip)
ssh = SSHConnection(proxy_host_ip, SysUserInfo.ROOT , SysUserInfo.ROOT_PWD)
ssh.put(local_path_of_neutron_l2_proxy, CfgFilePath.NEUTRON_L2_PROXY_PATH)
ssh.close()
except Exception, e:
log.error('Exception when remote copy neutron l2 proxy json to host: %s' % proxy_host_ip)
log.error('Exception: %s' % traceback.format_exc())
log.info('Finish remote copy neutron l2 proxy json to host: %s' % proxy_host_ip)
print('Finish to replace neutron l2 proxy json..')
log.info('Finish to replace neutron l2 proxy json in proxy nodes..')
def _get_proxy_region_and_host_region_name(self, proxy_matched_region):
return '.'.join([RefFsSystemUtils.get_az_by_domain(proxy_matched_region),
RefFsSystemUtils.get_dc_by_domain(proxy_matched_region)])
def _config_service_for_nova_proxy(self, proxy_name):
proxy_matched_region = self.proxy_match_region.get(proxy_name)
proxy_matched_host_region_name = self._get_proxy_region_and_host_region_name(proxy_matched_region)
updated_params = {'cascaded_cinder_url': 'https://volume.%s:443/v2' % proxy_matched_region,
'cascaded_neutron_url': 'https://network.%s:443' % proxy_matched_region,
'cascaded_nova_url': 'https://compute.%s:443/v2' % proxy_matched_region,
'cascaded_glance_url': 'https://image.%s' % self.cascading_region,
'glance_host': 'https://image.%s' % self.cascading_region,
'cascading_nova_url': 'https://compute.%s:443/v2' % self.cascading_region,
'cinder_endpoint_template': "".join(['https://volume.%s:443'% self.cascading_region, '/v2/%(project_id)s']),
'neutron_admin_auth_url': 'https://identity.%s:443/identity/v2.0' % self.cascading_region,
'keystone_auth_url':'https://identity.%s:443/identity/v2.0' % self.cascading_region,
'os_region_name':self.cascading_os_region_name,
'host': proxy_matched_host_region_name,
'proxy_region_name': proxy_matched_host_region_name,
'default_availability_zone': proxy_matched_host_region_name,
'default_schedule_zone': proxy_matched_host_region_name
}
service = 'nova'
template = '-'.join([service, proxy_name])
self._update_template_params_for_proxy('nova', template, updated_params)
self._commit_config()
# self._check_service_proxy_status(service, template, 'active')
def _update_template_params_for_proxy(self, service, template_name, dict_params):
result = RefCPSService.update_template_params(service, template_name, dict_params)
return result
def _commit_config(self):
RefCPSService.cps_commit()
def _config_for_neutron_l2_proxy(self, proxy_name):
neutron_proxy_type = 'l2'
self._config_for_neutron_proxy(proxy_name, neutron_proxy_type)
def _config_for_neutron_l3_proxy(self, proxy_name):
neutron_proxy_type = 'l3'
self._config_for_neutron_proxy(proxy_name, neutron_proxy_type)
def _config_for_neutron_proxy(self, proxy_name, neutron_proxy_type):
"""
:param proxy_name: str, 'proxy001', 'proxy002', ...
:param neutron_proxy_type: str, 'l2' or 'l3'
:return:
"""
proxy_matched_region = self.proxy_match_region.get(proxy_name)
proxy_matched_host_region_name = self._get_proxy_region_and_host_region_name(proxy_matched_region)
updated_params = {
'host':proxy_matched_host_region_name,
'neutron_region_name': proxy_matched_host_region_name,
'region_name': self.cascading_os_region_name,
'neutron_admin_auth_url': 'https://identity.%s:443/identity-admin/v2.0' % self.cascading_region
}
service = 'neutron'
# e.g. 'neutron-l2-proxy001'
template = '-'.join([service, neutron_proxy_type, proxy_name])
self._update_template_params_for_proxy(service, template, updated_params)
self._commit_config()
# self._check_service_proxy_status(service, template, 'active')
def _config_cinder_proxy(self, proxy_name):
try:
service = 'cinder'
template = '-'.join([service, proxy_name])
proxy_matched_region = self.proxy_match_region.get(proxy_name)
proxy_matched_host_region_name = self._get_proxy_region_and_host_region_name(proxy_matched_region)
cinder_tenant_id = RefServices().get_tenant_id_for_admin()
log.info('cinder_tenant_id: %s' % cinder_tenant_id)
updated_params = {'cascaded_cinder_url': ''.join(['https://volume.%s:443'%proxy_matched_region,'/v2/%(project_id)s']),
'cascaded_neutron_url': 'https://network.%s:443' % proxy_matched_region,
'cascaded_region_name': proxy_matched_host_region_name,
'cinder_tenant_id': cinder_tenant_id,
'host': proxy_matched_host_region_name,
'keystone_auth_url': 'https://identity.%s:443/identity/v2.0' % self.cascading_region,
'storage_availability_zone': proxy_matched_host_region_name
}
self._update_template_params_for_proxy(service, template, updated_params)
self._commit_config()
#self._check_service_proxy_status(service, template, 'active')
template_cinder_api = 'cinder-api'
template_cinder_scheduler = 'cinder-scheduler'
template_cinder_cinder_volume = 'cinder-scheduler'
#self._check_service_proxy_status(service, template_cinder_api, 'active')
#self._check_service_proxy_status(service, template_cinder_scheduler, 'active')
#self._check_service_proxy_status(service, template_cinder_cinder_volume, 'active')
except:
print 'Exception when cinder proxy config. e: %s' % traceback.format_exc()
log.error('e: %s' % traceback.format_exc())
def _check_service_proxy_status(self, service, template, aim_status):
template_instance_info = RefCPSServiceExtent.list_template_instance(service, template)
print template_instance_info
if template_instance_info is None:
print('Template instance info of Service<%s> Template<%s> is None.' % (service, template))
return False
status = template_instance_info.get('instances')[0].get('hastatus')
if status == aim_status:
print_logger.info('SUCCESS to update template for service<%s>, template<%s>' % (service, template))
return True
else:
print_logger.error('FAILED to update template for service<%s>, template<%s>' % (service, template))
return False
def add_role_for_cascading_node(self):
start_info = 'Start to add role for cascading node. Include: nova-api, nova-scheduler, neutron-server, loadbalancer'
print(start_info)
log.info(start_info)
host_name_of_cascading_node = socket.gethostname()
RefCPSService.role_host_add('nova-api', [host_name_of_cascading_node])
RefCPSService.role_host_add('nova-scheduler', [host_name_of_cascading_node])
RefCPSService.role_host_add('neutron-server', [host_name_of_cascading_node])
RefCPSService.role_host_add('loadbalancer', [host_name_of_cascading_node])
RefCPSService.cps_commit()
finish_info = 'Finish to add role for cascading node. Include: nova-api, nova-scheduler, neutron-server, loadbalancer'
print(finish_info)
log.info(finish_info)
def config_cascading_nodes(self):
self.add_role_for_cascading_node()
self.config_nova_scheduler()
self.config_neutron_server()
self.add_role_for_proxies()
self.config_proxy_to_connect_with_cascaded()
self.config_big_l2_layer_in_proxy_node()
def config_cascaded_nodes(self):
print('****Start to config big l2 layer...')
config_cascading.config_big_l2_layer_in_cascaded_node()
print('****End to config big l2 layer...')
print('****Start to create aggregate...')
config_cascading.create_aggregate_in_cascaded_node()
print('****End to create aggregate...')
print('****Start to create route table...')
config_cascading.create_route_table_in_cascaded_node()
print('****End to create route table...')
class BackupRecoverFS(object):
def __init__(self):
self._init_paths_need_to_backup()
self.path_fs_code_backup = SysPath.PATH_FS_CODE_BACKUP
def _init_paths_need_to_backup(self):
nova = 'nova'
self.openstack_installed_path = utils.get_openstack_installed_path()
nova_path = os.path.join(self.openstack_installed_path, nova)
neutron = 'neutron'
neutron_path = os.path.join(self.openstack_installed_path, neutron)
cinder = 'cinder'
cinder_path = os.path.join(self.openstack_installed_path, cinder)
self.paths_need_to_backup = [nova_path, neutron_path, cinder_path]
log.info('paths_need_to_backup: %s' % str(self.paths_need_to_backup))
def back_fs_code(self):
log.info('Start to backup fs code.')
if os.path.isdir(self.path_fs_code_backup):
shutil.rmtree(self.path_fs_code_backup)
for source_path in self.paths_need_to_backup:
try:
dist_path = os.path.join(self.path_fs_code_backup, os.path.basename(source_path))
if os.path.isdir(dist_path):
#clear backup code before.
shutil.rmtree(dist_path)
log.info('remove old backup code.')
self.copy_anything(source_path, dist_path)
except Exception, e:
log.error('Exception occur when backup service<%s>' % os.path.basename(source_path))
log.error('Exception: %s' % traceback.format_exc())
log.info('Finish to backup for code<%s>' % os.path.basename(source_path))
log.info('Finish to backup fs code.')
def recover_fs_code(self):
log.info('Start to recover fs code.')
# services_dirs, e.g. ['nova', 'cinder', 'neutron']
services_dirs = os.listdir(self.path_fs_code_backup)
for service_dir in services_dirs:
path_backup_service_code = os.path.join(self.path_fs_code_backup, service_dir)
if os.path.isdir(path_backup_service_code):
# remove openstack service code in sites-package.
service_dir_to_be_remove = os.path.join(self.openstack_installed_path, service_dir)
if os.path.isdir(service_dir_to_be_remove):
shutil.rmtree(service_dir_to_be_remove)
log.info('remove code from site-package of service<%s>' % service_dir)
dest_path = os.path.join(self.openstack_installed_path, service_dir)
# copy backup files of openstack service to openstack installed path(/usr/lib64/python2.6/site-package/).
self.copy_anything(path_backup_service_code, dest_path)
log.info('Finish to recover code of service<%s>' % service_dir)
else:
log.error('No backup code for service:<%s> in path: %s' % (service_dir, path_backup_service_code))
log.info('Finish to recover fs code')
def copy_anything(self, src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
def get_all_hosts():
cps_business = CPSServiceBusiness()
openstack_az_hosts = cps_business.get_openstack_hosts()
aws_az_hosts = cps_business.get_aws_node_hosts()
vcloud_az_hosts = cps_business.get_vcloud_node_hosts()
all_proxy_host = cps_business.get_all_proxy_nodes(proxy_match_region=CONF.DEFAULT.proxy_match_region)
return openstack_az_hosts + aws_az_hosts + vcloud_az_hosts + all_proxy_host
def get_all_cascaded_hosts():
cps_business = CPSServiceBusiness()
openstack_az_hosts = cps_business.get_openstack_hosts()
aws_az_hosts = cps_business.get_aws_node_hosts()
vcloud_az_hosts = cps_business.get_vcloud_node_hosts()
return openstack_az_hosts + aws_az_hosts + vcloud_az_hosts
def get_all_proxy_hosts():
cps_business = CPSServiceBusiness()
all_proxy_hosts = cps_business.get_all_proxy_nodes(proxy_match_region=CONF.DEFAULT.proxy_match_region)
return all_proxy_hosts
def get_os_region_name():
cps_business = CPSServiceBusiness()
return cps_business.get_os_region_name()
def export_region():
os_region_name = get_os_region_name()
os.environ['OS_REGION_NAME'] = os_region_name
def export_env():
os.environ['OS_AUTH_URL'] = CONF.ENV.OS_AUTH_URL
os.environ['OS_USERNAME'] = CONF.ENV.OS_USERNAME
os.environ['OS_TENANT_NAME'] = CONF.ENV.OS_TENANT_NAME
os.environ['NOVA_ENDPOINT_TYPE'] = CONF.ENV.NOVA_ENDPOINT_TYPE
os.environ['CINDER_ENDPOINT_TYPE'] = CONF.ENV.CINDER_ENDPOINT_TYPE
os.environ['OS_ENDPOINT_TYPE'] = CONF.ENV.OS_ENDPOINT_TYPE
os.environ['OS_VOLUME_API_VERSION'] = CONF.ENV.OS_VOLUME_API_VERSION
os.environ['OS_PASSWORD'] = CONF.ENV.OS_PASSWORD
export_env()
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Please select mode, options is: 1.prepare; 2. cascading; 3. check; 4. restart; 5.remote-backup; 6.remote-recover')
print('Option <prepare> is use to copy patches_tool to proxy nodes and az nodes.')
print('Option <cascading> is use to config cascading node and proxy node. Only need to execute once in cascading node.')
print('Option <check> is use to check status of services in cascading and proxy node.')
print('Option <restart> is use to restart services in cascading and proxy node.')
print('Option <remote-backup> is use to backup fs code(nova, cinder, neutron) in /home/fsp/fs_code_backup dir.')
print('Option <remote-recover> is use to ')
exit(0)
print('Start to config cascading....')
log.init('patches_tool_config')
log.info('Start to config cascading....')
mode = sys.argv[1]
export_region()
config_cascading = ConfigCascading()
dispatch_patch_tool = DispatchPatchTool(proxy_match_region=CONF.DEFAULT.proxy_match_region)
#first to dispatch patch_tool to all cascaded node.
if mode == 'prepare':
all_cascaded_hosts = get_all_cascaded_hosts()
all_proxy_hosts = get_all_proxy_hosts()
utils.remote_open_root_permit_for_hosts(all_cascaded_hosts + all_proxy_hosts)
utils.add_auto_route_for_fs(all_cascaded_hosts + all_proxy_hosts)
dispatch_patch_tool.dispatch_patches_tool_to_remote_nodes()
#Second to config cascading node to add proxy roles and config proxy nodes connect with cascaded nodes.
elif mode == 'cascading':
config_cascading.config_cascading_nodes()
#Thrid to Config cascaded node to connect with cascading node.
dispatch_patch_tool.remote_config_cascaded_for_all_type_node()
elif mode == 'remote-backup':
executed_cmd = 'python %s %s' % (ScriptFilePath.PATCH_REMOTE_HYBRID_CONFIG_PY, 'local-backup')
dispatch_patch_tool.dispatch_cmd_to_all_proxy_nodes(executed_cmd)
dispatch_patch_tool.dispatch_cmd_to_all_az_nodes(executed_cmd)
elif mode == 'remote-recover':
executed_cmd = 'python %s %s' % (ScriptFilePath.PATCH_REMOTE_HYBRID_CONFIG_PY, 'local-recover')
dispatch_patch_tool.dispatch_cmd_to_all_proxy_nodes(executed_cmd)
dispatch_patch_tool.dispatch_cmd_to_all_az_nodes(executed_cmd)
#this mode cascaded is use to be called in cascaded node remotely in cascading node.
elif mode == 'cascaded':
config_cascading.config_cascaded_nodes()
elif mode == 'check':
config_cascading.check_service_status()
elif mode == 'restart':
config_cascading.restart_services()
elif mode == 'local-backup':
backup_recover_service = BackupRecoverFS()
backup_recover_service.back_fs_code()
elif mode == 'local-recover':
backup_recover_service = BackupRecoverFS()
backup_recover_service.recover_fs_code()
print('End to config')
log.info('End to config')
| |
"""
color_scheme_matcher.
Licensed under MIT.
Copyright (C) 2012 Andrew Gibson <agibsonsw@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---------------------
Original code has been heavily modifed by Isaac Muse <isaacmuse@gmail.com> for the ExportHtml project.
Algorithm has been split out into a separate library and been enhanced with a number of features.
"""
from __future__ import absolute_import
import sublime
import re
from .rgba import RGBA, clamp, round_int
from . import x11colors
from os import path
from collections import namedtuple
from plistlib import readPlistFromBytes
import decimal
FONT_STYLE = "font_style" if int(sublime.version()) >= 3151 else "fontStyle"
# For new Sublime format
FLOAT_TRIM_RE = re.compile(r'^(?P<keep>\d+)(?P<trash>\.0+|(?P<keep2>\.\d*[1-9])0+)$')
COLOR_PARTS = {
"percent": r"[+\-]?(?:(?:\d*\.\d+)|\d+)%",
"float": r"[+\-]?(?:(?:\d*\.\d+)|\d+)"
}
RGB_COLORS = r"""(?x)
(?P<hexa>\#(?P<hexa_content>[\dA-Fa-f]{8}))\b |
(?P<hex>\#(?P<hex_content>[\dA-Fa-f]{6}))\b |
(?P<hexa_compressed>\#(?P<hexa_compressed_content>[\dA-Fa-f]{4}))\b |
(?P<hex_compressed>\#(?P<hex_compressed_content>[\dA-Fa-f]{3}))\b |
\b(?P<rgb>rgb\(\s*(?P<rgb_content>(?:%(float)s\s*,\s*){2}%(float)s | (?:%(percent)s\s*,\s*){2}%(percent)s)\s*\)) |
\b(?P<rgba>rgba\(\s*(?P<rgba_content>
(?:%(float)s\s*,\s*){3}(?:%(percent)s|%(float)s) | (?:%(percent)s\s*,\s*){3}(?:%(percent)s|%(float)s)
)\s*\))
""" % COLOR_PARTS
HSL_COLORS = r"""(?x)
\b(?P<hsl>hsl\(\s*(?P<hsl_content>%(float)s\s*,\s*%(percent)s\s*,\s*%(percent)s)\s*\)) |
\b(?P<hsla>hsla\(\s*(?P<hsla_content>%(float)s\s*,\s*(?:%(percent)s\s*,\s*){2}(?:%(percent)s|%(float)s))\s*\))
""" % COLOR_PARTS
VARIABLES = r"""(?x)
\b(?P<var>var\(\s*(?P<var_content>\w[\w\d]*)\s*\))
"""
COLOR_MOD = r"""(?x)
\b(?P<color>color\((?P<color_content>.*)\))
"""
COLOR_NAMES = r'\b(?P<x11colors>%s)\b(?!\()' % '|'.join([name for name in x11colors.name2hex_map.keys()])
COLOR_RE = re.compile(
r'(?x)(?i)(?:%s|%s|%s|%s|%s)' % (
RGB_COLORS,
HSL_COLORS,
VARIABLES,
COLOR_MOD,
COLOR_NAMES
)
)
COLOR_RGB_SPACE_RE = re.compile(
r'(?x)(?i)(?:%s|%s|%s)' % (
RGB_COLORS,
VARIABLES,
COLOR_NAMES
)
)
COLOR_MOD_RE = re.compile(
r'''(?x)
color\(\s*
(?P<base>\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})
\s+(?P<type>blenda?)\(
(?P<color>\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})
\s+(?P<percent>%(percent)s)
\)
(?P<other>
(?:\s+blenda?\((?:\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})\s+%(percent)s\))+
)?
\s*\)
''' % COLOR_PARTS
)
def fmt_float(f, p=0):
"""Set float precision and trim precision zeros."""
string = str(
decimal.Decimal(f).quantize(decimal.Decimal('0.' + ('0' * p) if p > 0 else '0'), decimal.ROUND_HALF_UP)
)
m = FLOAT_TRIM_RE.match(string)
if m:
string = m.group('keep')
if m.group('keep2'):
string += m.group('keep2')
return string
def alpha_dec_normalize(dec):
"""Normailze a deciaml alpha value."""
temp = float(dec)
if temp < 0.0 or temp > 1.0:
dec = fmt_float(clamp(float(temp), 0.0, 1.0), 3)
alpha = "%02x" % round_int(float(dec) * 255.0)
return alpha
def alpha_percent_normalize(perc):
"""Normailze a percent alpha value."""
alpha_float = clamp(float(perc.strip('%')), 0.0, 100.0) / 100.0
alpha = "%02x" % round_int(alpha_float * 255.0)
return alpha
def blend(m):
"""Blend colors."""
base = m.group('base')
color = m.group('color')
blend_type = m.group('type')
percent = m.group('percent')
if percent.endswith('%'):
percent = float(percent.strip('%'))
else:
percent = int(alpha_dec_normalize(percent), 16) * (100.0 / 255.0)
rgba = RGBA(base)
rgba.blend(color, percent, alpha=(blend_type == 'blenda'))
color = rgba.get_rgb() if rgba.a == 255 else rgba.get_rgba()
if m.group('other'):
color = "color(%s %s)" % (color, m.group('other'))
return color
def translate_color(m, var, var_src):
"""Translate the match object to a color w/ alpha."""
color = None
alpha = None
groups = m.groupdict()
if groups.get('hex_compressed'):
content = m.group('hex_compressed_content')
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
elif groups.get('hexa_compressed'):
content = m.group('hexa_compressed_content')
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
alpha = content[3:]
elif groups.get('hex'):
content = m.group('hex_content')
if len(content) == 6:
color = "#%02x%02x%02x" % (
int(content[0:2], 16), int(content[2:4], 16), int(content[4:6], 16)
)
else:
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
elif groups.get('hexa'):
content = m.group('hexa_content')
if len(content) == 8:
color = "#%02x%02x%02x" % (
int(content[0:2], 16), int(content[2:4], 16), int(content[4:6], 16)
)
alpha = content[6:]
else:
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
alpha = content[3:]
elif groups.get('rgb'):
content = [x.strip() for x in m.group('rgb_content').split(',')]
if content[0].endswith('%'):
r = round_int(clamp(float(content[0].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
g = round_int(clamp(float(content[1].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
b = round_int(clamp(float(content[2].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
color = "#%02x%02x%02x" % (r, g, b)
else:
color = "#%02x%02x%02x" % (
clamp(round_int(float(content[0])), 0, 255),
clamp(round_int(float(content[1])), 0, 255),
clamp(round_int(float(content[2])), 0, 255)
)
elif groups.get('rgba'):
content = [x.strip() for x in m.group('rgba_content').split(',')]
if content[0].endswith('%'):
r = round_int(clamp(float(content[0].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
g = round_int(clamp(float(content[1].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
b = round_int(clamp(float(content[2].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
color = "#%02x%02x%02x" % (r, g, b)
else:
color = "#%02x%02x%02x" % (
clamp(round_int(float(content[0])), 0, 255),
clamp(round_int(float(content[1])), 0, 255),
clamp(round_int(float(content[2])), 0, 255)
)
if content[3].endswith('%'):
alpha = alpha_percent_normalize(content[3])
else:
alpha = alpha_dec_normalize(content[3])
elif groups.get('hsl'):
content = [x.strip() for x in m.group('hsl_content').split(',')]
rgba = RGBA()
hue = float(content[0])
if hue < 0.0 or hue > 360.0:
hue = hue % 360.0
h = hue / 360.0
s = clamp(float(content[1].strip('%')), 0.0, 100.0) / 100.0
l = clamp(float(content[2].strip('%')), 0.0, 100.0) / 100.0
rgba.fromhls(h, l, s)
color = rgba.get_rgb()
elif groups.get('hsla'):
content = [x.strip() for x in m.group('hsla_content').split(',')]
rgba = RGBA()
hue = float(content[0])
if hue < 0.0 or hue > 360.0:
hue = hue % 360.0
h = hue / 360.0
s = clamp(float(content[1].strip('%')), 0.0, 100.0) / 100.0
l = clamp(float(content[2].strip('%')), 0.0, 100.0) / 100.0
rgba.fromhls(h, l, s)
color = rgba.get_rgb()
if content[3].endswith('%'):
alpha = alpha_percent_normalize(content[3])
else:
alpha = alpha_dec_normalize(content[3])
elif groups.get('var'):
content = m.group('var_content')
if content in var:
color = var[content]
else:
v = var_src[content]
m = COLOR_RE.match(v.strip())
color = translate_color(m, var, var_src)
elif groups.get('x11colors'):
try:
color = x11colors.name2hex(m.group('x11colors')).lower()
except:
pass
elif groups.get('color'):
content = m.group('color')
content = COLOR_RGB_SPACE_RE.sub((lambda match, v=var, vs=var_src: translate_color(match, v, vs)), content)
n = -1
while n:
content, n = COLOR_MOD_RE.subn(blend, content)
color = content
if color is not None and alpha is not None:
color += alpha
return color
def sublime_format_path(pth):
"""Format path for sublime internal use."""
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
class SchemeColors(
namedtuple(
'SchemeColors',
['fg', 'fg_simulated', 'bg', "bg_simulated", "style", "fg_selector", "bg_selector", "style_selectors"],
verbose=False
)
):
"""SchemeColors."""
class SchemeSelectors(namedtuple('SchemeSelectors', ['name', 'scope'], verbose=False)):
"""SchemeSelectors."""
class ColorSchemeMatcher(object):
"""Determine color scheme colors and style for text in a Sublime view buffer."""
def __init__(self, scheme_file, color_filter=None):
"""Initialize."""
if color_filter is None:
color_filter = self.filter
self.legacy = not scheme_file.lower().endswith('.sublime-color-scheme')
self.color_scheme = path.normpath(scheme_file)
self.scheme_file = path.basename(self.color_scheme)
if self.legacy:
self.scheme_obj = color_filter(
readPlistFromBytes(
re.sub(
br"^[\r\n\s]*<!--[\s\S]*?-->[\s\r\n]*|<!--[\s\S]*?-->", b'',
sublime.load_binary_resource(sublime_format_path(self.color_scheme))
)
)
)
else:
sublime.decode_value(
sublime.load_resource(sublime_format_path(self.color_scheme)),
preserve_lines=True
)
self.scheme_file = scheme_file
self.matched = {}
self.variables = {}
self.parse_scheme()
def filter(self, plist):
"""Dummy filter call that does nothing."""
return plist
def parse_scheme(self):
"""Parse the color scheme."""
if self.legacy:
color_settings = {}
for item in self.scheme_obj["settings"]:
if item.get('scope', None) is None and item.get('name', None) is None:
color_settings = item["settings"]
break
else:
for k, v in self.scheme_obj.get('variables', {}).items():
m = COLOR_RE.match(v.strip())
self.variables[k] = translate_color(m, self.variables, self.scheme_obj.get('variables'))
color_settings = {}
for k, v in self.scheme_obj["defaults"].items():
m = COLOR_RE.match(v.strip())
if m is not None:
color_settings[k] = translate_color(m, self.variables, {})
# Get general theme colors from color scheme file
bground, bground_sim = self.process_color(
color_settings.get("background", '#FFFFFF'), simple_strip=True
)
# Need to set background so other colors can simulate their transparency.
self.special_colors = {
"background": {'color': bground, 'color_simulated': bground_sim}
}
fground, fground_sim = self.process_color(color_settings.get("foreground", '#000000'))
sbground = self.process_color(color_settings.get("selection", fground))[0]
sbground_sim = self.process_color(color_settings.get("selection", fground_sim))[1]
sfground, sfground_sim = self.process_color(color_settings.get("selectionForeground", None))
gbground = self.process_color(color_settings.get("gutter", bground))[0]
gbground_sim = self.process_color(color_settings.get("gutter", bground_sim))[1]
gfground = self.process_color(color_settings.get("gutterForeground", fground))[0]
gfground_sim = self.process_color(color_settings.get("gutterForeground", fground_sim))[1]
self.special_colors["foreground"] = {'color': fground, 'color_simulated': fground_sim}
self.special_colors["background"] = {'color': bground, 'color_simulated': bground_sim}
self.special_colors["selectionForeground"] = {'color': sfground, 'color_simulated': sfground_sim}
self.special_colors["selection"] = {'color': sbground, 'color_simulated': sbground_sim}
self.special_colors["gutter"] = {'color': gbground, 'color_simulated': gbground_sim}
self.special_colors["gutterForeground"] = {'color': gfground, 'color_simulated': gfground_sim}
self.colors = {}
if self.legacy:
# Create scope colors mapping from color scheme file
for item in self.scheme_obj["settings"]:
name = item.get('name', '')
scope = item.get('scope', None)
color = None
bgcolor = None
style = []
if 'settings' in item and scope is not None:
color = item['settings'].get('foreground', None)
bgcolor = item['settings'].get('background', None)
if 'fontStyle' in item['settings']:
for s in item['settings']['fontStyle'].split(' '):
if s == "bold" or s == "italic": # or s == "underline":
style.append(s)
if scope is not None:
self.add_entry(name, scope, color, bgcolor, style)
else:
# Create scope colors mapping from color scheme file
for item in self.scheme_obj["rules"]:
name = item.get('name', '')
scope = item.get('scope', None)
color = None
bgcolor = None
style = []
if scope is not None:
color = item.get('foreground', None)
if color is not None:
color = translate_color(COLOR_RE.match(color.strip()), self.variables, {})
bgcolor = item.get('background', None)
if bgcolor is not None:
bgcolor = translate_color(COLOR_RE.match(bgcolor.strip()), self.variables, {})
if FONT_STYLE in item:
for s in item[FONT_STYLE].split(' '):
if s == "bold" or s == "italic": # or s == "underline":
style.append(s)
if scope is not None:
self.add_entry(name, scope, color, bgcolor, style)
def add_entry(self, name, scope, color, bgcolor, style):
"""Add color entry."""
if color is not None:
fg, fg_sim = self.process_color(color)
else:
fg, fg_sim = None, None
if bgcolor is not None:
bg, bg_sim = self.process_color(bgcolor)
else:
bg, bg_sim = None, None
self.colors[scope] = {
"name": name,
"scope": scope,
"color": fg,
"color_simulated": fg_sim,
"bgcolor": bg,
"bgcolor_simulated": bg_sim,
"style": style
}
def process_color(self, color, simple_strip=False):
"""
Strip transparency from the color value.
Transparency can be stripped in one of two ways:
- Simply mask off the alpha channel.
- Apply the alpha channel to the color essential getting the color seen by the eye.
"""
if color is None or color.strip() == "":
return None, None
if not color.startswith('#'):
if self.legacy:
color = x11colors.name2hex(color)
if color is None:
return None, None
else:
return None, None
rgba = RGBA(color.replace(" ", ""))
if not simple_strip:
bground = self.special_colors['background']['color_simulated']
rgba.apply_alpha(bground if bground != "" else "#FFFFFF")
return color, rgba.get_rgb()
def get_special_color(self, name, simulate_transparency=False):
"""
Get the core colors (background, foreground) for the view and gutter.
Get the visible look of the color by simulated transparency if requrested.
"""
return self.special_colors.get(name, {}).get('color_simulated' if simulate_transparency else 'color')
def get_scheme_obj(self):
"""Get the plist file used during the process."""
return self.scheme_obj
def get_scheme_file(self):
"""Get the scheme file used during the process."""
return self.scheme_file
def guess_color(self, scope_key, selected=False, explicit_background=False):
"""
Guess the colors and style of the text for the given Sublime scope.
By default, we always fall back to the schemes default background,
but if desired, we can show that no background was explicitly
specified by returning None. This is done by enabling explicit_background.
This will only show backgrounds that were explicitly specified.
This was orginially introduced for mdpopups so that it would
know when a background was not needed. This allowed mdpopups
to generate syntax highlighted code that could be overlayed on
block elements with different background colors and allow that
background would show through.
"""
color = self.special_colors['foreground']['color']
color_sim = self.special_colors['foreground']['color_simulated']
bgcolor = self.special_colors['background']['color'] if not explicit_background else None
bgcolor_sim = self.special_colors['background']['color_simulated'] if not explicit_background else None
style = set([])
color_selector = SchemeSelectors("foreground", "foreground")
bg_selector = SchemeSelectors("background", "background")
style_selectors = {"bold": SchemeSelectors("", ""), "italic": SchemeSelectors("", "")}
if scope_key in self.matched:
color = self.matched[scope_key]["color"]
color_sim = self.matched[scope_key]["color_simulated"]
style = self.matched[scope_key]["style"]
bgcolor = self.matched[scope_key]["bgcolor"]
bgcolor_sim = self.matched[scope_key]["bgcolor_simulated"]
selectors = self.matched[scope_key]["selectors"]
color_selector = selectors["color"]
bg_selector = selectors["background"]
style_selectors = selectors["style"]
else:
best_match_bg = 0
best_match_fg = 0
best_match_style = 0
for key in self.colors:
match = sublime.score_selector(scope_key, key)
if self.colors[key]["color"] is not None and match > best_match_fg:
best_match_fg = match
color = self.colors[key]["color"]
color_sim = self.colors[key]["color_simulated"]
color_selector = SchemeSelectors(self.colors[key]["name"], self.colors[key]["scope"])
if self.colors[key]["style"] is not None and match > best_match_style:
best_match_style = match
for s in self.colors[key]["style"]:
style.add(s)
if s == "bold":
style_selectors["bold"] = SchemeSelectors(
self.colors[key]["name"], self.colors[key]["scope"]
)
elif s == "italic":
style_selectors["italic"] = SchemeSelectors(
self.colors[key]["name"], self.colors[key]["scope"]
)
if self.colors[key]["bgcolor"] is not None and match > best_match_bg:
best_match_bg = match
bgcolor = self.colors[key]["bgcolor"]
bgcolor_sim = self.colors[key]["bgcolor_simulated"]
bg_selector = SchemeSelectors(self.colors[key]["name"], self.colors[key]["scope"])
if len(style) == 0:
style = ""
else:
style = ' '.join(style)
self.matched[scope_key] = {
"color": color,
"bgcolor": bgcolor,
"color_simulated": color_sim,
"bgcolor_simulated": bgcolor_sim,
"style": style,
"selectors": {
"color": color_selector,
"background": bg_selector,
"style": style_selectors
}
}
if selected:
if self.special_colors['selectionForeground']['color']:
color = self.special_colors['selectionForeground']['color']
color_sim = color = self.special_colors['selectionForeground']['color_simulated']
style = ''
if self.special_colors['selection']['color']:
bgcolor = self.special_colors['selection']['color']
bgcolor_sim = color = self.special_colors['selection']['color_simulated']
return SchemeColors(
color, color_sim, bgcolor, bgcolor_sim, style,
color_selector, bg_selector, style_selectors
)
| |
import calendar
import hashlib
import sys
import time
import urlparse
import uuid
from decimal import Decimal
from urllib import urlencode
from django import http
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
import bleach
import commonware.log
from tower import ugettext as _
from addons.decorators import addon_view_factory, can_be_purchased
import amo
from amo.decorators import json_view, login_required, post_required, write
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from lib.cef_loggers import app_pay_cef
from lib.crypto.webpay import (InvalidSender, parse_from_webpay,
sign_webpay_jwt)
from mkt.api.exceptions import AlreadyPurchased
from mkt.webapps.models import Webapp
from stats.models import ClientData, Contribution
from . import webpay_tasks as tasks
log = commonware.log.getLogger('z.purchase')
addon_view = addon_view_factory(qs=Webapp.objects.valid)
def start_purchase(request, addon):
log.debug('Starting purchase of app: %s by user: %s'
% (addon.pk, request.amo_user.pk))
amount = addon.get_price(region=request.REGION.id)
uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest()
# L10n: {0} is the addon name.
contrib_for = (_(u'Firefox Marketplace purchase of {0}')
.format(addon.name))
currency = request.REGION.default_currency
return amount, currency, uuid_, contrib_for
def make_ext_id(addon_pk):
"""
Generates a webpay/solitude external ID for this addon's primary key.
"""
# This namespace is currently necessary because app products
# are mixed into an application's own in-app products.
# Maybe we can fix that.
# Also, we may use various dev/stage servers with the same
# Bango test API.
domain = getattr(settings, 'DOMAIN', None)
if not domain:
domain = 'marketplace-dev'
ext_id = domain.split('.')[0]
return '%s:%s' % (ext_id, addon_pk)
@login_required
@addon_view
@write
@post_required
@json_view
def prepare_pay(request, addon):
return _prepare_pay(request, addon)
@can_be_purchased
def _prepare_pay(request, addon):
"""Prepare a JWT to pass into navigator.pay()"""
if addon.is_premium() and addon.has_purchased(request.amo_user):
log.info('Already purchased: %d' % addon.pk)
raise AlreadyPurchased
amount, currency, uuid_, contrib_for = start_purchase(request, addon)
log.debug('Storing contrib for uuid: %s' % uuid_)
Contribution.objects.create(addon_id=addon.id, amount=amount,
source=request.REQUEST.get('src', ''),
source_locale=request.LANG,
uuid=str(uuid_), type=amo.CONTRIB_PENDING,
paykey=None, user=request.amo_user,
price_tier=addon.premium.price,
client_data=ClientData.get_or_create(request))
# Until atob() supports encoded HTML we are stripping all tags.
# See bug 831524
app_description = bleach.clean(unicode(addon.description), strip=True,
tags=[])
acct = addon.app_payment_account.payment_account
seller_uuid = acct.solitude_seller.uuid
application_size = addon.current_version.all_files[0].size
issued_at = calendar.timegm(time.gmtime())
icons = {}
for size in amo.ADDON_ICON_SIZES:
icons[str(size)] = absolutify(addon.get_icon_url(size))
req = {
'iss': settings.APP_PURCHASE_KEY,
'typ': settings.APP_PURCHASE_TYP,
'aud': settings.APP_PURCHASE_AUD,
'iat': issued_at,
'exp': issued_at + 3600, # expires in 1 hour
'request': {
'name': unicode(addon.name),
'description': app_description,
'pricePoint': addon.premium.price.name,
'id': make_ext_id(addon.pk),
'postbackURL': absolutify(reverse('webpay.postback')),
'chargebackURL': absolutify(reverse('webpay.chargeback')),
'productData': urlencode({'contrib_uuid': uuid_,
'seller_uuid': seller_uuid,
'addon_id': addon.pk,
'application_size': application_size}),
'icons': icons,
}
}
jwt_ = sign_webpay_jwt(req)
log.debug('Preparing webpay JWT for addon %s: %s' % (addon, jwt_))
app_pay_cef.log(request, 'Preparing JWT', 'preparing_jwt',
'Preparing JWT for: %s' % (addon.pk), severity=3)
if request.API:
url = reverse('api_dispatch_detail', kwargs={
'resource_name': 'status', 'api_name': 'webpay',
'uuid': uuid_})
else:
url = reverse('webpay.pay_status', args=[addon.app_slug, uuid_])
return {'webpayJWT': jwt_, 'contribStatusURL': url}
@login_required
@addon_view
@write
@json_view
def pay_status(request, addon, contrib_uuid):
"""
Return JSON dict of {status: complete|incomplete}.
The status of the payment is only complete when it exists by uuid,
was purchased by the logged in user, and has been marked paid by the
JWT postback. After that the UI is free to call app/purchase/record
to generate a receipt.
"""
au = request.amo_user
qs = Contribution.objects.filter(uuid=contrib_uuid,
addon__addonpurchase__user=au,
type=amo.CONTRIB_PURCHASE)
return {'status': 'complete' if qs.exists() else 'incomplete'}
@csrf_exempt
@write
@post_required
def postback(request):
"""Verify signature and set contribution to paid."""
signed_jwt = request.POST.get('notice', '')
try:
data = parse_from_webpay(signed_jwt, request.META.get('REMOTE_ADDR'))
except InvalidSender, exc:
app_pay_cef.log(request, 'Unknown app', 'invalid_postback',
'Ignoring invalid JWT %r: %s' % (signed_jwt, exc),
severity=4)
return http.HttpResponseBadRequest()
pd = urlparse.parse_qs(data['request']['productData'])
contrib_uuid = pd['contrib_uuid'][0]
try:
contrib = Contribution.objects.get(uuid=contrib_uuid)
except Contribution.DoesNotExist:
etype, val, tb = sys.exc_info()
raise LookupError('JWT (iss:%s, aud:%s) for trans_id %s '
'links to contrib %s which doesn\'t exist'
% (data['iss'], data['aud'],
data['response']['transactionID'],
contrib_uuid)), None, tb
trans_id = data['response']['transactionID']
if contrib.transaction_id is not None:
if contrib.transaction_id == trans_id:
app_pay_cef.log(request, 'Repeat postback', 'repeat_postback',
'Postback sent again for: %s' % (contrib.addon.pk),
severity=4)
return http.HttpResponse(trans_id)
else:
app_pay_cef.log(request, 'Repeat postback with new trans_id',
'repeat_postback_new_trans_id',
'Postback sent again for: %s, but with new '
'trans_id: %s' % (contrib.addon.pk, trans_id),
severity=7)
raise LookupError('JWT (iss:%s, aud:%s) for trans_id %s is for '
'contrib %s that is already paid and has '
'existing differnet trans_id: %s'
% (data['iss'], data['aud'],
data['response']['transactionID'],
contrib_uuid, contrib.transaction_id))
log.info('webpay postback: fulfilling purchase for contrib %s with '
'transaction %s' % (contrib, trans_id))
app_pay_cef.log(request, 'Purchase complete', 'purchase_complete',
'Purchase complete for: %s' % (contrib.addon.pk),
severity=3)
contrib.update(transaction_id=trans_id, type=amo.CONTRIB_PURCHASE,
amount=Decimal(data['response']['price']['amount']),
currency=data['response']['price']['currency'])
tasks.send_purchase_receipt.delay(contrib.pk)
return http.HttpResponse(trans_id)
@csrf_exempt
@write
@post_required
def chargeback(request):
"""
Verify signature from and create a refund contribution tied
to the original transaction.
"""
raise NotImplementedError
| |
# coding: utf-8
# PYTHON IMPORTS
import os
import ntpath
import posixpath
import shutil
# DJANGO IMPORTS
from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils.encoding import filepath_to_uri
from django.template import Context, Template, TemplateSyntaxError
# FILEBROWSER IMPORTS
import filebrowser
from filebrowser.settings import DEFAULT_PERMISSIONS
from filebrowser.base import FileObject, FileListing
from filebrowser.templatetags.fb_versions import version, version_object, version_setting
from filebrowser.sites import site
TESTS_PATH = os.path.dirname(os.path.abspath(__file__))
FILEBROWSER_PATH = os.path.split(TESTS_PATH)[0]
class VersionTemplateTagsTests(TestCase):
def setUp(self):
"""
Save original values/functions so they can be restored in tearDown
"""
self.original_path = filebrowser.base.os.path
self.original_directory = site.directory
self.original_versions_basedir = filebrowser.base.VERSIONS_BASEDIR
self.original_versions = filebrowser.base.VERSIONS
self.original_admin_versions = filebrowser.base.ADMIN_VERSIONS
self.original_placeholder = filebrowser.templatetags.fb_versions.PLACEHOLDER
self.original_show_placeholder = filebrowser.templatetags.fb_versions.SHOW_PLACEHOLDER
self.original_force_placeholder = filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER
# DIRECTORY
# custom directory because this could be set with sites
# and we cannot rely on filebrowser.settings
# FIXME: find better directory name
self.directory = "fb_test_directory/"
self.directory_path = os.path.join(site.storage.location, self.directory)
if os.path.exists(self.directory_path):
self.fail("Test directory already exists.")
else:
os.makedirs(self.directory_path)
# set site directory
site.directory = self.directory
# VERSIONS
self.versions = "_versionstestdirectory"
self.versions_path = os.path.join(site.storage.location, self.versions)
if os.path.exists(self.versions_path):
self.fail("Versions directory already exists.")
else:
os.makedirs(self.versions_path)
# create temporary test folder and move testimage
# FIXME: find better path names
self.tmpdir_name = os.path.join("fb_tmp_dir", "fb_tmp_dir_sub")
self.tmpdir_path = os.path.join(site.storage.location, self.directory, self.tmpdir_name)
if os.path.exists(self.tmpdir_path):
self.fail("Temporary testfolder already exists.")
else:
os.makedirs(self.tmpdir_path)
# copy test image to temporary test folder
self.image_path = os.path.join(FILEBROWSER_PATH, "static", "filebrowser", "img", "testimage.jpg")
if not os.path.exists(self.image_path):
self.fail("Testimage not found.")
shutil.copy(self.image_path, self.tmpdir_path)
# create temporary test folder (placeholder) and move testimage
# FIXME: find better path names
self.tmpdir_name_ph = os.path.join("fb_tmp_dir", "fb_tmp_placeholder")
self.tmpdir_path_ph = os.path.join(site.storage.location, self.directory, self.tmpdir_name_ph)
if os.path.exists(self.tmpdir_path_ph):
self.fail("Temporary testfolder (placeholder) already exists.")
else:
os.makedirs(self.tmpdir_path_ph)
# copy test image to temporary test folder (placeholder)
shutil.copy(self.image_path, self.tmpdir_path_ph)
# set posixpath
filebrowser.base.os.path = posixpath
# fileobjects
self.f_image = FileObject(os.path.join(self.directory, self.tmpdir_name, "testimage.jpg"), site=site)
self.f_image_not_exists = FileObject(os.path.join(self.directory, self.tmpdir_name, "testimage_does_not_exist.jpg"), site=site)
self.f_folder = FileObject(os.path.join(self.directory, self.tmpdir_name), site=site)
self.f_placeholder = FileObject(os.path.join(self.directory, self.tmpdir_name_ph, "testimage.jpg"), site=site)
def test_version(self):
"""
Templatetag version
"""
# new settings
filebrowser.base.VERSIONS_BASEDIR = "fb_test_directory/_versions"
filebrowser.base.VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'large': {'verbose_name': 'Large', 'width': 600, 'height': '', 'opts': ''},
}
filebrowser.base.ADMIN_VERSIONS = ['large']
filebrowser.settings.VERSIONS = filebrowser.base.VERSIONS
filebrowser.templatetags.fb_versions.VERSIONS = filebrowser.base.VERSIONS
# templatetag version with wrong token
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version obj.path %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version %}'))
# templatetag version without path
t = Template('{% load fb_versions %}{% version obj "medium" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, "") # FIXME: should this throw an error?
# templatetag version with hardcoded path
t = Template('{% load fb_versions %}{% version path "large" %}')
c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimage.jpg"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with obj
t = Template('{% load fb_versions %}{% version obj "large" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with obj.path
t = Template('{% load fb_versions %}{% version obj.path "large" %}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version with suffix as variable
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# # FIXME: templatetag version with non-existing path
# t = Template('{% load fb_versions %}{% version path "large" %}')
# c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimagexxx.jpg"})
# r = t.render(c)
# self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# test placeholder with existing image
filebrowser.templatetags.fb_versions.PLACEHOLDER = "fb_test_directory/fb_tmp_dir/fb_tmp_placeholder/testimage.jpg"
filebrowser.templatetags.fb_versions.SHOW_PLACEHOLDER = True
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = True
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = False
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# test placeholder with non-existing image
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = True
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image_not_exists, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = False
t = Template('{% load fb_versions %}{% version obj.path suffix %}')
c = Context({"obj": self.f_image_not_exists, "suffix": "large"})
r = t.render(c)
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
# Check permissions
if DEFAULT_PERMISSIONS is not None:
permissions_default = oct(DEFAULT_PERMISSIONS)
permissions_file = oct(os.stat(os.path.join(settings.MEDIA_ROOT, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg")).st_mode & 0o777)
self.assertEqual(permissions_default, permissions_file)
def test_version_object(self):
"""
Templatetag version_object
"""
# new settings
filebrowser.base.VERSIONS_BASEDIR = "fb_test_directory/_versions"
filebrowser.base.VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'large': {'verbose_name': 'Large', 'width': 600, 'height': '', 'opts': ''},
}
filebrowser.base.ADMIN_VERSIONS = ['large']
filebrowser.settings.VERSIONS = filebrowser.base.VERSIONS
filebrowser.templatetags.fb_versions.VERSIONS = filebrowser.base.VERSIONS
# templatetag with wrong token
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object obj.path %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object %}'))
self.assertRaises(TemplateSyntaxError, lambda: Template('{% load fb_versions %}{% version_object obj.path "medium" %}'))
# templatetag version_object with hardcoded path
t = Template('{% load fb_versions %}{% version_object path "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimage.jpg"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with obj.path
t = Template('{% load fb_versions %}{% version_object obj.path "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with obj
t = Template('{% load fb_versions %}{% version_object obj "large" as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# templatetag version_object with suffix as variable
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# # FIXME: templatetag version with non-existing path
# t = Template('{% load fb_versions %}{% version_object path "large" as version_large %}{{ version_large.url }}')
# c = Context({"obj": self.f_image, "path": "fb_test_directory/fb_tmp_dir/fb_tmp_dir_sub/testimagexxx.jpg"})
# r = t.render(c)
# self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# test placeholder with existing image
filebrowser.templatetags.fb_versions.PLACEHOLDER = "fb_test_directory/fb_tmp_dir/fb_tmp_placeholder/testimage.jpg"
filebrowser.templatetags.fb_versions.SHOW_PLACEHOLDER = True
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = True
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = False
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_dir_sub/testimage_large.jpg"))
# test placeholder with non-existing image
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = True
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image_not_exists, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = False
t = Template('{% load fb_versions %}{% version_object obj suffix as version_large %}{{ version_large.url }}')
c = Context({"obj": self.f_image_not_exists, "suffix": "large"})
r = t.render(c)
self.assertEqual(c["version_large"].url, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
self.assertEqual(r, os.path.join(settings.MEDIA_URL, "fb_test_directory/_versions/fb_tmp_dir/fb_tmp_placeholder/testimage_large.jpg"))
def test_version_setting(self):
pass
def tearDown(self):
"""
Restore original values/functions
"""
filebrowser.base.os.path = self.original_path
site.directory = self.original_directory
filebrowser.base.VERSIONS_BASEDIR = self.original_versions_basedir
filebrowser.base.VERSIONS = self.original_versions
filebrowser.settings.VERSIONS = self.original_versions
filebrowser.templatetags.fb_versions.VERSIONS = self.original_versions
filebrowser.base.ADMIN_VERSIONS = self.original_admin_versions
filebrowser.templatetags.fb_versions.PLACEHOLDER = self.original_placeholder
filebrowser.templatetags.fb_versions.SHOW_PLACEHOLDER = self.original_show_placeholder
filebrowser.templatetags.fb_versions.FORCE_PLACEHOLDER = self.original_force_placeholder
# remove temporary directory and test folder
shutil.rmtree(self.directory_path)
shutil.rmtree(self.versions_path)
| |
#!/usr/bin/env python
"""
Name : qconcurrency.widgets._progressbar_.py
Created : Apr 17, 2017
Author : Will Pittman
Contact : willjpittman@gmail.com
________________________________________________________________________________
Description : ProgressBar, that can be updated from a separate thread.
________________________________________________________________________________
"""
#builtin
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Iterable
import functools
import uuid
import copy
import logging
import threading
#external
from Qt import QtWidgets
#internal
from qconcurrency.threading_ import ThreadedTask, SoloThreadedTask
#!TODO: http://stackoverflow.com/questions/7108715/how-to-show-hide-a-child-qwidget-with-a-motion-animation
logger = logging.getLogger(__name__)
class _ProgressSoloThreadedTask( SoloThreadedTask ):
"""
customized :py:obj:`qconcurrency.threading_.SoloThreadedTask` that
assigns a new jobid for each thread it starts, and each thread's progress
is measured entirely independently.
This way, there is no race-condition where cancelling one thread can knock out
progress on *all* threads.
"""
def __init__(self, progressbar, callback, signals=None, connections=None, mutex_expiry=5000 ):
self._progressbar = progressbar
SoloThreadedTask.__init__(self,
callback = callback,
signals = signals,
connections = connections,
mutex_expiry = mutex_expiry,
)
def start(self, expiryTimeout=-1, threadpool=None, wait=False, *args, **kwds ):
"""
Wraps :py:meth:`qconcurrency.threading_.SoloThreadedTask.start` ,
adding signal-connections so that they update a progressbar.
See Also:
* :py:meth:`qconcurrency.threading_.SoloThreadedTask.start`
"""
jobid = uuid.uuid4().hex
connections = self._connections.copy()
if not connections:
connections = {}
progbar_connections = {
'incr_progress' : functools.partial( self._progressbar.incr_progress, jobid=jobid ),
'add_progress' : functools.partial( self._progressbar.add_progress, jobid=jobid ),
'returned' : functools.partial( self._progressbar._handle_return_or_abort, jobid=jobid ),
'exception' : functools.partial( self._progressbar._handle_return_or_abort, jobid=jobid ),
}
for signal in progbar_connections:
if signal in connections:
connections[ signal ].append( progbar_connections[signal] )
else:
connections[ signal ] = [ progbar_connections[signal] ]
SoloThreadedTask.start(self,
expiryTimeout = expiryTimeout,
threadpool = threadpool,
wait = wait,
_connections = connections,
*args,**kwds
)
class ProgressBar( QtWidgets.QWidget ):
"""
A ProgressBar designed to track progress of several threads,
that is hidden automatically whenever all threads have exited
(by unhandled exception, or return).
Each thread is assigned it's own `jobid`, so that it's progress
is tracked entirely independently of all other pending tasks.
(my hope is that if errors appear in total-calculated progress in the
codebase, this will lessen the appearance of an error for the user - each
thread's progress being set to 100% once it exits).
"""
def __init__(self):
QtWidgets.QWidget.__init__(self)
self._progress = {} # { jobid: {'total':10, 'current':3} }
self._cancelled_jobids = [] # rolling log of cancelled jobids (so later unhandled progress is ignored)
self._progressbar = None # the ProgressBar Widget
self.setHidden(True)
self._initui()
def _initui(self):
# Create Widgets
layout = QtWidgets.QHBoxLayout()
self._progressbar = QtWidgets.QProgressBar()
self._reset_btn = QtWidgets.QPushButton('reset progress')
# Position Widgets
self.setLayout( layout )
layout.addWidget( self._progressbar )
layout.addWidget( self._reset_btn )
# Connections
self._reset_btn.clicked.connect( self.reset )
def add_progress(self, amount, jobid=None):
"""
Adds to the total number of required steps
required to complete.
"""
with threading.Lock():
if jobid in self._cancelled_jobids:
return
if jobid not in self._progress:
self._progress[ jobid ] = {'total':amount, 'current':0}
else:
self._progress[ jobid ]['total'] += amount
self.refresh_progress()
self.setHidden(False)
def refresh_progress(self):
"""
ReCalculates current/total progress, and updates the progressbar
"""
with threading.Lock():
total_progress = 0
current_progress = 0
for jobid in list(self._progress.keys()):
job_progress = self._progress[ jobid ]
if jobid in self._cancelled_jobids:
self._progress.pop(jobid)
elif job_progress['current'] == job_progress['total']:
self._progress.pop(jobid)
else:
total_progress += self._progress[ jobid ]['total']
current_progress += self._progress[ jobid ]['current']
self._progressbar.setMaximum( total_progress )
self._progressbar.setValue( current_progress )
if total_progress <= current_progress:
self.setHidden(True)
def incr_progress(self, amount, jobid):
"""
Completes a particular number of steps for
a particular jobid.
"""
with threading.Lock():
if jobid in self._cancelled_jobids:
return
if amount == None:
amount = 1
if jobid not in self._progress:
return
self._progress[ jobid ]['current'] += amount
self.refresh_progress()
def reset(self, jobid=None ):
"""
If not `jobid`, sets all required progress-steps back to 0.
Otherwise, removes all required progress associated with that
jobid.
"""
with threading.Lock():
if not jobid:
self._progress = {}
self._progressbar.reset()
elif jobid in self._progress:
self._progress.pop( jobid )
self.refresh_progress()
def new_task(self, callback, signals=None, *args, **kwds ):
"""
Creates a new :py:obj:`ThreadedTask` object, adding
signals to it so that it can update this :py:obj:`ProgressBar`.
:py:obj:`ThreadedTask` objects are most suitable for
producer/consumer patterns (multiple threads running at once),
and no thread depends on another.
See also:
* :py:obj:`ThreadedTask`
* :py:obj:`SoloThreadedTask`
* :py:meth:`ProgressBar.new_solotask`
"""
jobid = uuid.uuid4().hex
# assign signals
default_signals = {
'incr_progress': int,
'add_progress': int,
}
if signals:
default_signals.update( signals )
# create task
task = ThreadedTask(
callback = callback,
signals = default_signals,
*args, **kwds
)
# Connections
task.signal('incr_progress').connect(
functools.partial( self.incr_progress, jobid=jobid )
)
task.signal('add_progress').connect(
functools.partial( self.add_progress, jobid=jobid )
)
task.signal('returned').connect(
functools.partial( self._handle_return_or_abort, jobid=jobid )
)
task.signal('exception').connect(
functools.partial( self._handle_return_or_abort, jobid=jobid )
)
return task
def new_solotask(self, callback, signals=None, connections=None, mutex_expiry=5000 ):
"""
Creates a new :py:obj:`SoloThreadedTask` object, adding
signals to it so that it can update this :py:obj:`ProgressBar`.
"""
jobid = uuid.uuid4().hex
# assign signals
default_signals = {
'incr_progress': int,
'add_progress': int,
}
if signals:
default_signals.update( signals )
# assign connections
default_connections = {}
if connections:
for signal in connections:
if signal in default_connections:
if isinstance( connections[ signal ], Iterable ):
for _callable in connections[ signal ]:
default_connections[ signal ].append( _callable )
else:
_callable = connections[ signal ]
default_connections[ signal ].append( _callable )
else:
default_connections[ signal ] = connections[ signal ]
# create task
solotask = _ProgressSoloThreadedTask(
progressbar = self,
callback = callback,
signals = default_signals,
connections = default_connections,
mutex_expiry = mutex_expiry,
)
return solotask
def _handle_return_or_abort(self, *args,**kwds):
with threading.Lock():
if 'jobid' in kwds:
if kwds['jobid']:
self.reset( jobid=kwds['jobid'] )
self._cancelled_jobids.append( kwds['jobid'] )
# when there are more than 50x entries,
# prune the tracked jobids
if len(self._cancelled_jobids) > 50:
self._cancelled_jobids.pop(0)
self.refresh_progress()
if __name__ == '__main__':
from qconcurrency import QApplication, Fake
from Qt import QtWidgets, QtCore, QtGui
import supercli.logging
import time
supercli.logging.SetLog(lv=20)
def update_progbar( start_wait=0, signalmgr=None ):
if not signalmgr:
signalmgr = Fake()
signalmgr.add_progress.emit(5)
time.sleep( start_wait )
for i in range(5):
signalmgr.handle_if_abort()
time.sleep(0.2)
signalmgr.incr_progress.emit(1)
print('done')
with QApplication():
win = QtWidgets.QWidget()
lyt = QtWidgets.QVBoxLayout()
bar = ProgressBar()
win.setLayout(lyt)
lyt.addWidget(bar)
win.show()
solotask = bar.new_solotask(
callback = update_progbar,
)
solotask.start( start_wait=0 )
# wait before starting next job,
# keeping eventloop alive so progress can be seen
for i in range(2):
time.sleep(0.2)
QtCore.QCoreApplication.instance().processEvents()
solotask.start( start_wait=0.5 )
solotask.start()
solotask.start()
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Setup Model
@copyright: 2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DeployModel",
"setup_create_yaml_file",
"setup_create_playbook",
"setup_get_templates",
"setup_get_prepop_options",
"setup_log",
"setup_rheader",
"setup_management_exists",
"setup_UpgradeMethod",
"setup_refresh",
"setup_getupgrades",
"setup_host_validator",
"setup_upgrade_status",
)
from ..s3 import *
from gluon import *
import os
import socket
import shutil
import time
try:
import ansible.playbook
import ansible.inventory
from ansible import callbacks
except ImportError:
current.log.warning("ansible module needed for Setup")
try:
import yaml
except ImportError:
current.log.warning("PyYAML module needed for Setup")
TIME_FORMAT = "%b %d %Y %H:%M:%S"
MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n"
class S3DeployModel(S3Model):
names = ("setup_deployment",
"setup_server",
"setup_instance",
"setup_host",
"setup_packages",
"setup_upgrade"
)
def model(self):
T = current.T
s3 = current.response.s3
define_table = self.define_table
configure = self.configure
add_components = self.add_components
set_method = self.set_method
tablename = "setup_deployment"
define_table(tablename,
Field("name",
label = T("Name"),
required = True,
),
Field("distro",
label = T("Linux Distribution"),
required = True,
requires = IS_IN_SET(
[
("wheezy", "Debian Wheezy"),
("precise", "Ubuntu 14.04 LTS Precise"),
])
),
Field("remote_user",
label = T("Remote User"),
required = True,
),
Field("secret_key",
label = T("AWS Secret Key"),
required = True,
),
Field("access_key",
label = T("AWS Access Key"),
required = True,
),
Field("private_key", "upload",
custom_retrieve = retrieve_file,
custom_store = store_file,
label = T("Private Key"),
required = True,
),
Field("webserver_type", "integer",
label = T("Web Server"),
required = True,
requires = IS_IN_SET({1:"apache", 2:"cherokee"}),
),
Field("db_type", "integer",
label = T("Database"),
required = True,
requires = IS_IN_SET({1:"mysql", 2: "postgresql"}),
),
Field("db_password", "password",
label = T("Database Password"),
required = True,
readable = False,
),
Field("repo_url",
# @ToDo: Add more advanced options
default = "https://github.com/flavour/eden",
label = T("Eden Repo git URL"),
),
Field("template",
label = T("Template"),
required = True,
requires = IS_IN_SET(setup_get_templates(), zero=None),
),
Field("refresh_lock", "integer",
default = 0,
readable = False,
writable = False,
),
Field("last_refreshed", "datetime",
readable = False,
writable = False,
),
*s3_meta_fields()
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create_button = T("Add Deployment"),
label_list_button = T("View Deployments"),
label_delete_button = T("Delete Deployment"),
msg_record_created = T("Deployment Created"),
msg_record_modified = T("Deployment updated"),
msg_record_deleted = T("Deployment deleted"),
msg_list_empty = T("No Deployment Saved yet"),
subtitle_create = T("Add Deployment"),
title_create = T("Add Deployment"),
title_list = T("View Deployments"),
title_update = T("Edit Deployment"),
)
configure(tablename,
editable = False,
deletable = False,
insertable = True,
listadd = True
)
tablename = "setup_server"
define_table(tablename,
Field("deployment_id", "reference setup_deployment"),
Field("role", "integer",
requires = IS_IN_SET({1: "all",
2: "db",
3: "webserver",
4: "eden",
}),
),
Field("host_ip",
required = True,
),
Field("hostname",
label = "Hostname",
required = True,
),
)
configure(tablename,
onvalidation = server_validation
)
tablename = "setup_instance"
define_table(tablename,
Field("deployment_id", "reference setup_deployment"),
Field("type", "integer",
requires = IS_IN_SET({1: "prod", 2: "test", 3: "demo", 4: "dev"})
),
Field("url",
requires = IS_URL(),
),
Field("prepop_options",
label = "Prepop Options",
required = True,
requires = IS_IN_SET([], multiple=True),
),
Field("scheduler_id", "reference scheduler_task",
readable = False,
writable = False,
),
)
configure(tablename,
deletable = False,
editable = False,
onaccept = instance_onaccept,
)
add_components("setup_deployment",
setup_instance = "deployment_id",
setup_server = "deployment_id",
)
tablename = "setup_packages"
define_table(tablename,
Field("name",
label = T("Package Name"),
),
Field("cv",
label = T("Current Version"),
),
Field("av",
label = T("Available Version"),
),
Field("type",
label = T("Type of Package"),
requires = IS_IN_SET(["os", "pip", "git"]),
),
Field("deployment",
"reference setup_deployment",
),
)
tablename = "setup_upgrade"
define_table(tablename,
Field("deployment",
"reference setup_deployment"
),
Field("scheduler",
"reference scheduler_task"
),
)
set_method("setup", "deploy",
method = "upgrade",
action = setup_UpgradeMethod,
)
return dict()
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
"""
return dict()
# -----------------------------------------------------------------------------
def server_validation(form):
ip = form.vars.host_ip
table = current.s3db.setup_server
db = current.db
rows = db(table.host_ip == ip).select()
if rows:
form.errors["host_ip"] = "Server already in use"
# -----------------------------------------------------------------------------
def instance_onaccept(form):
db = current.db
s3db = current.s3db
form_vars = form.vars
# Get deployment id
itable = s3db.setup_instance
instance = db(itable.id == form_vars.id).select(itable.deployment_id,
limitby = (0, 1)
).first()
deployment_id = instance.deployment_id
stable = s3db.setup_server
query = (stable.deployment_id == deployment_id)
rows = db(query).select(stable.role,
stable.host_ip,
stable.hostname,
orderby = stable.role
)
hosts = []
for row in rows:
hosts.append((row.role, row.host_ip))
if row.role == 1 or row.role == 4:
hostname = row.hostname
dtable = s3db.setup_deployment
deployment = db(dtable.id == deployment_id).select(dtable.db_password,
dtable.webserver_type,
dtable.db_type,
dtable.distro,
dtable.template,
dtable.private_key,
dtable.remote_user,
limitby=(0, 1)
).first()
prepop_options = str(",".join(form_vars.prepop_options))
instance_type = int(form_vars.type)
if instance_type == 2:
demo_type = "na"
elif instance_type == 1 or instance_type == 3:
# find dtype
sctable = s3db.scheduler_task
query = (itable.deployment_id == deployment_id) & \
(sctable.status == "COMPLETED")
existing_instances = db(query).select(itable.type,
join = sctable.on(itable.scheduler_id == sctable.id)
)
if existing_instances:
demo_type = "afterprod"
else:
demo_type = "beforeprod"
webservers = ("apache", "cherokee")
dbs = ("mysql", "postgresql")
prepop = ("prod", "test", "demo")
scheduler_id = setup_create_yaml_file(hosts,
deployment.db_password,
webservers[deployment.webserver_type - 1],
dbs[deployment.db_type - 1],
prepop[instance_type - 1],
prepop_options,
deployment.distro,
False,
hostname,
deployment.template,
form_vars.url,
deployment.private_key,
deployment.remote_user,
demo_type,
)
# add scheduler fk in current record
record = db(itable.id == form_vars.id).select().first()
record.update_record(scheduler_id=scheduler_id)
# -----------------------------------------------------------------------------
def setup_create_yaml_file(hosts, password, web_server, database_type,
prepop, prepop_options, distro, local=False,
hostname=None, template="default", sitename=None,
private_key=None, remote_user=None, demo_type=None):
roles_path = "../private/playbook/roles/"
if len(hosts) == 1:
deployment = [
{
"hosts": hosts[0][1],
"sudo": True,
"remote_user": remote_user,
"vars": {
"password": password,
"template": template,
"web_server": web_server,
"type": prepop,
"distro": distro,
"prepop_options": prepop_options,
"sitename": sitename,
"hostname": hostname,
"dtype": demo_type,
"eden_ip": hosts[0][1],
"db_ip": hosts[0][1],
"db_type": database_type
},
"roles": [
"%s%s" % (roles_path, database_type),
"%scommon" % roles_path,
"%suwsgi" % roles_path,
"%sconfigure" % roles_path,
"%s%s" % (roles_path, web_server),
]
}
]
else:
deployment = [
{
"hosts": hosts[0][1],
"sudo": True,
"remote_user": remote_user,
"vars": {
"distro": distro,
"dtype": demo_type,
"password": password,
"type": prepop
},
"roles": [
"%s%s" % (roles_path, database_type),
]
},
{
"hosts": hosts[2][1],
"sudo": True,
"remote_user": remote_user,
"vars": {
"dtype": demo_type,
"db_ip": hosts[0][1],
"db_type": database_type,
"hostname": hostname,
"password": password,
"prepop_options": prepop_options,
"sitename": sitename,
"template": template,
"type": prepop,
"web_server": web_server,
},
"roles": [
"%scommon" % roles_path,
"%suwsgi" % roles_path,
"%sconfigure" % roles_path,
]
},
{
"hosts": hosts[1][1],
"sudo": True,
"remote_user": remote_user,
"vars": {
"eden_ip": hosts[2][1],
"type": prepop
},
"roles": [
"%s%s" % (roles_path, web_server),
]
}
]
if demo_type == "afterprod":
only_tags = ["demo"]
elif prepop == "test":
only_tags = ["test",]
else:
only_tags = ["all"]
directory = os.path.join(current.request.folder, "yaml")
name = "deployment_%d" % int(time.time())
file_path = os.path.join(directory, "%s.yml" % name)
if not os.path.isdir(directory):
os.mkdir(directory)
with open(file_path, "w") as yaml_file:
yaml_file.write(yaml.dump(deployment, default_flow_style=False))
row = current.s3task.schedule_task(
name,
vars = {
"playbook": file_path,
"private_key": os.path.join(current.request.folder, "uploads", private_key),
"host": [host[1] for host in hosts],
"only_tags": only_tags,
},
function_name = "deploy",
repeats = 1,
timeout = 3600,
sync_output = 300
)
return row
# -----------------------------------------------------------------------------
def setup_create_playbook(playbook, hosts, private_key, only_tags):
inventory = ansible.inventory.Inventory(hosts)
#playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
stats = callbacks.AggregateStats()
# runner_cb = callbacks.PlaybookRunnerCallbacks(
# stats, verbose=utils.VERBOSITY)
head, tail = os.path.split(playbook)
deployment_name = tail.rsplit(".")[0]
cb = CallbackModule(deployment_name)
pb = ansible.playbook.PlayBook(
playbook = playbook,
inventory = inventory,
callbacks = cb,
runner_callbacks = cb,
stats = stats,
private_key_file = private_key,
only_tags = only_tags
)
return pb
# -----------------------------------------------------------------------------
def setup_get_prepop_options(template):
module_name = "applications.eden_deployment.modules.templates.%s.config" % template
__import__(module_name)
config = sys.modules[module_name]
prepopulate_options = config.settings.base.get("prepopulate_options")
if isinstance(prepopulate_options, dict):
if "mandatory" in prepopulate_options:
del prepopulate_options["mandatory"]
return prepopulate_options.keys()
else:
return ["mandatory"]
# -----------------------------------------------------------------------------
def setup_log(filename, category, data):
if type(data) == dict:
if 'verbose_override' in data:
# avoid logging extraneous data from facts
data = 'omitted'
else:
data = data.copy()
invocation = data.pop('invocation', None)
data = json.dumps(data)
if invocation is not None:
data = json.dumps(invocation) + " => %s " % data
path = os.path.join(current.request.folder, "yaml", "%s.log" % filename)
now = time.strftime(TIME_FORMAT, time.localtime())
fd = open(path, "a")
fd.write(MSG_FORMAT % dict(now=now, category=category, data=data))
fd.close()
# -----------------------------------------------------------------------------
def setup_get_templates():
path = os.path.join(current.request.folder, "modules", "templates")
templates = set(
os.path.basename(folder) for folder, subfolders, files in os.walk(path) \
for file_ in files if file_ == 'config.py'
)
return templates
# -----------------------------------------------------------------------------
def store_file(file, filename=None, path=None):
path = os.path.join(current.request.folder, "uploads")
if not os.path.exists(path):
os.makedirs(path)
pathfilename = os.path.join(path, filename)
dest_file = open(pathfilename, 'wb')
try:
shutil.copyfileobj(file, dest_file)
finally:
dest_file.close()
os.chmod(pathfilename, 0600)
return filename
# -----------------------------------------------------------------------------
def retrieve_file(filename, path=None):
path = os.path.join(current.request.folder, "uploads")
return (filename, open(os.path.join(path, filename), 'rb'))
# -----------------------------------------------------------------------------
class CallbackModule(object):
"""
logs playbook results, per deployment in eden/yaml
"""
def __init__(self, filename):
self.filename = filename
def on_any(self, *args, **kwargs):
pass
def on_failed(self, host, res, ignore_errors=False):
setup_log(self.filename, 'FAILED', res)
def on_ok(self, host, res):
setup_log(self.filename, 'OK', res)
def on_error(self, host, msg):
setup_log(self.filename, 'ERROR', msg)
def on_skipped(self, host, item=None):
setup_log(self.filename, 'SKIPPED', '...')
def on_unreachable(self, host, res):
setup_log(self.filename, 'UNREACHABLE', res)
def on_no_hosts(self):
pass
def on_async_poll(self, host, res, jid, clock):
setup_log(self.filename, 'DEBUG', host, res, jid, clock)
def on_async_ok(self, host, res, jid):
setup_log(self.filename, 'DEBUG', host, res, jid)
def on_async_failed(self, host, res, jid):
setup_log(self.filename, 'ASYNC_FAILED', res)
def on_start(self):
setup_log(self.filename, 'DEBUG', 'on_start')
def on_notify(self, host, handler):
setup_log(self.filename, 'DEBUG', host)
def on_no_hosts_matched(self):
setup_log(self.filename, 'DEBUG', 'no_hosts_matched')
def on_no_hosts_remaining(self):
setup_log(self.filename, 'DEBUG', 'no_hosts_remaining')
def on_task_start(self, name, is_conditional):
setup_log(self.filename, 'DEBUG', 'Starting %s' % name)
def on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None):
pass
def on_setup(self):
setup_log(self.filename, 'DEBUG', 'on_setup')
def on_import_for_host(self, host, imported_file):
setup_log(self.filename, 'IMPORTED', imported_file)
def on_not_import_for_host(self, host, missing_file):
setup_log(self.filename, 'NOTIMPORTED', missing_file)
def on_play_start(self, pattern):
setup_log(self.filename, 'play_start', pattern)
def on_stats(self, stats):
setup_log(self.filename, 'DEBUG', stats)
# -----------------------------------------------------------------------------
def setup_rheader(r, tabs=[]):
""" Resource component page header """
if r.representation == "html":
T = current.T
tabs = [(T("Deployment Details"), None),
(T("Servers"), "server"),
(T("Instances"), "instance"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(rheader_tabs)
return rheader
# -----------------------------------------------------------------------------
def setup_management_exists(_type, _id, deployment_id):
""" Returns true/false depending on whether a management task
exists for an instance
"""
db = current.db
ttable = current.s3db.scheduler_task
args = '["%s", "%s", "%s"]' % (_type, _id, deployment_id)
query = ((ttable.function_name == "setup_management") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ASSIGNED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)).first()
if exists:
return True
return False
# -----------------------------------------------------------------------------
class setup_UpgradeMethod(S3Method):
def apply_method(self, r, **attr):
s3db = current.s3db
db = current.db
T = current.T
response = current.response
record = r.record
dtable = s3db.setup_deploy
stable = s3db.scheduler_task
query = (dtable.host == record.host) & \
(stable.status == "COMPLETED")
machines = db(query).select(
dtable.id.with_alias("deployment"),
dtable.type.with_alias("type"),
join = [
stable.on(dtable.scheduler_id == stable.id)
],
distinct = True
)
machine_ids = [machine.deployment for machine in machines]
validate = s3db.setup_host_validator(machine_ids)
if r.http == "GET":
if record.last_refreshed is None:
redirect(URL(c="setup", f="refresh", args=record.id))
# Data table
resource = s3db.resource("setup_packages")
totalrows = resource.count()
list_fields = ["id",
"name",
"cv",
"av",
]
package_filter = (s3db.setup_packages.deployment == record.id) & \
(s3db.setup_packages.cv != s3db.setup_packages.av)
resource.add_filter(package_filter)
data = resource.select(list_fields,
limit = totalrows,
)
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
if validate is not None:
dt_bulk_actions = None
appname = current.request.application
current.response.s3.scripts.append("/%s/static/scripts/S3/s3.setup.js" % appname)
else:
dt_bulk_actions = [(T("Upgrade"), "upgrade")]
items = dt.html(totalrows,
totalrows,
dt_pagination = "false",
dt_bulk_actions = dt_bulk_actions,
)
output = dict(items=items)
response.view = "list.html"
elif r.http == "POST":
if validate is not None:
current.session.error = validate
redirect(URL(c="setup", f="%s_deploy" % record.type, args=[record.id, "upgrade"]))
post_vars = r.post_vars
ptable = s3db.setup_packages
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
# query = ptable.id.belongs(selected)
# packages = db(query).select()
query = FS("id").belongs(selected)
presource = s3db.resource("setup_packages", filter=query)
packages = presource.select(["name", "type"], as_rows=True)
system_packages = []
pip_packages = []
git_packages = []
for package in packages:
if package.type == "os":
system_packages.append(package.name)
elif package.type == "pip":
pip_packages.append(package.name)
elif package.type == "git":
if package.name == "web2py":
git_packages.append({name: package.name, chdir: "/home/%s" % record.type})
directory = os.path.join(current.request.folder, "yaml")
name = "upgrade_%d" % int(time.time())
file_path = os.path.join(directory, "%s.yml" % name)
roles_path = "../private/playbook/roles/"
upgrade = [
{
"hosts": record.host,
"sudo": True,
"vars": {
"system_packages": system_packages,
"pip_packages": pip_packages,
"git_packages": git_packages,
},
"roles": [
"%supgrades" % roles_path,
]
}
]
if record.type == "remote":
upgrade[0]["remote_user"] = record.remote_user
else:
upgrade[0]["connection"] = "local"
if not os.path.isdir(directory):
os.mkdir(directory)
with open(file_path, "w") as yaml_file:
yaml_file.write(yaml.dump(upgrade, default_flow_style=False))
if record.private_key:
private_key = os.path.join(current.request.folder, "uploads", record.private_key)
else:
private_key = None
only_tags = ['all']
row = current.s3task.schedule_task(
name,
vars = {
"playbook": file_path,
"private_key": private_key,
"host": [record.host],
"only_tags": only_tags,
},
function_name = "deploy",
repeats = 1,
timeout = 3600,
sync_output = 300
)
# Add record to setup_upgrade
utable = s3db.setup_upgrade
utable.insert(deployment=record.id, scheduler=row.id)
current.session.flash = T("Upgrade Queued. Please wait while it is completed")
redirect(URL(c="setup", f="%s_deploy" % record.type, args=[record.id, "upgrade"]))
return output
# -----------------------------------------------------------------------------
def setup_refresh(id):
T = current.T
db = current.db
s3db = current.s3db
dtable = s3db.setup_deploy
query = (dtable.id == id)
record = db(query).select(dtable.id,
dtable.host,
dtable.type,
dtable.prepop,
dtable.remote_user,
dtable.private_key,
).first()
if not record:
return {"success": False,
"msg": T("Record Not Found"),
"f": "index",
"args": None
}
# Get machines with the same host as record
ptable = s3db.setup_packages
stable = s3db.scheduler_task
utable = s3db.setup_upgrade
query = (dtable.host == record.host) & \
(stable.status == "COMPLETED")
machines = db(query).select(
dtable.id.with_alias("deployment"),
dtable.type.with_alias("type"),
join = [
stable.on(dtable.scheduler_id == stable.id)
],
distinct = True
)
# Check if machines have a refresh running
machine_ids = [machine.deployment for machine in machines]
validate = s3db.setup_host_validator(machine_ids)
if validate is not None:
return {"success": False,
"msg": validate,
"f": str("%s_deploy" % record.type),
"args": [record.id, "read"]
}
# set the refresh lock
for machine in machines:
db(dtable.id == machine.deployment).update(refresh_lock=1)
# find new packages
if record.type == "local":
response = s3db.setup_getupgrades(record.host, record.prepop)
else:
response = s3db.setup_getupgrades(record.host,
record.prepop,
record.remote_user,
record.private_key,
)
if response["dark"]:
return {"success": False,
"msg": T("Error contacting the server"),
"f": str("%s_deploy" % record.type),
"args": [record.id, "upgrade"]
}
# Call ansible runner
# get a list of current packages
packages = db(ptable.deployment == record.id).select(ptable.name)
old_set = set()
for package in packages:
old_set.add(package.name)
new_set = set()
fetched_packages = response["contacted"][record.host]["packages"]
for package in fetched_packages:
new_set.add(package["name"])
new_packages = new_set.difference(old_set)
upgrade_packages = new_set.intersection(old_set)
uptodate_packages = old_set.difference(new_set)
for package in fetched_packages:
if package["name"] in new_packages:
for machine in machines:
if package["name"] == "web2py" and machine.deployment != record.id:
continue
ptable.insert(name = package["name"],
cv = package["cv"],
av = package["av"],
type = package["type"],
deployment = machine.deployment,
)
elif package["name"] in upgrade_packages:
for machine in machines:
if package["name"] == "web2py" and machine.deployment != record.id:
continue
query = (ptable.name == package["name"]) & \
(ptable.deployment == machine.deployment)
db(query).update(av=package["av"])
for package in uptodate_packages:
for machine in machines:
if package == "web2py" and machine.deployment != record.id:
continue
query = (ptable.name == package) & \
(ptable.deployment == machine.deployment)
row = db(query).select().first()
row.av = row.cv
row.update_record()
# release the refresh lock
for machine in machines:
db(dtable.id == machine.deployment).update(refresh_lock=0)
# update last refreshed
import datetime
record.update_record(last_refreshed=datetime.datetime.now())
return {"success": True,
"msg": T("Refreshed Packages"),
"f": str("%s_deploy" % record.type),
"args": [record.id, "upgrade"]
}
# -----------------------------------------------------------------------------
def setup_host_validator(machine_ids):
""" Helper Function that checks whether it's safe to allow
upgrade/deployments/refresh packages on given instances
"""
s3db = current.s3db
db = current.db
T = current.T
dtable = s3db.setup_deploy
ptable = s3db.setup_packages
stable = s3db.scheduler_task
utable = s3db.setup_upgrade
if len(machine_ids) > 1:
query = (dtable.id.belongs(machine_ids)) & \
(dtable.refresh_lock != 0)
else:
query = (dtable.id == machine_ids[0]) & \
(dtable.refresh_lock != 0)
rows = db(query).select(dtable.id)
if rows:
return T("A refresh is in progress. Please wait for it to finish")
# or an upgrade in process
if len(machine_ids) > 1:
query = (utable.deployment.belongs(machine_ids)) & \
((stable.status != "COMPLETED") & (stable.status != "FAILED"))
else:
query = (utable.deployment == machine_ids[0]) & \
((stable.status != "COMPLETED") & (stable.status != "FAILED"))
rows = db(query).select(utable.deployment,
join=stable.on(utable.scheduler == stable.id)
)
if rows:
return T("An upgrade is in progress. Please wait for it to finish")
# or even a deployment in process
if len(machine_ids) > 1:
query = (dtable.id.belongs(machine_ids)) & \
((stable.status != "COMPLETED") & (stable.status != "FAILED"))
else:
query = (dtable.id == machine_ids[0]) & \
((stable.status != "COMPLETED") & (stable.status != "FAILED"))
rows = db(query).select(dtable.id,
join = stable.on(utable.scheduler == stable.id)
)
if rows:
return T("A deployment is in progress. Please wait for it to finish")
# -----------------------------------------------------------------------------
def setup_getupgrades(host, web2py_path, remote_user=None, private_key=None):
import ansible.runner
module_path = os.path.join(current.request.folder, "private", "playbook", "library")
if private_key:
private_key = os.path.join(current.request.folder, "uploads", private_key)
inventory = ansible.inventory.Inventory([host])
if private_key and remote_user:
runner = ansible.runner.Runner(module_name = "upgrade",
module_path = module_path,
module_args = "web2py_path=/home/%s" % web2py_path,
remote_user = remote_user,
private_key_file = private_key,
pattern = host,
inventory = inventory,
sudo = True,
)
else:
runner = ansible.runner.Runner(module_name = "upgrade",
module_path = module_path,
module_args = "web2py_path=/home/%s" % web2py_path,
pattern = host,
inventory = inventory,
sudo = True,
)
response = runner.run()
return response
def setup_upgrade_status(_id):
s3db = current.s3db
db = current.db
T = current.T
utable = s3db.setup_upgrade
stable = s3db.scheduler_task
query = (utable.deployment == _id)
row = db(query).select(stable.status,
join = utable.on(stable.id == utable.scheduler)
).last()
if row.status == "COMPLETED":
return T("Upgrade Completed! Refreshing the page in 5 seconds")
| |
"""
fs.contrib.dropboxfs
========
A FS object that integrates with Dropbox.
"""
import time
import shutil
import optparse
import datetime
import tempfile
import calendar
from UserDict import UserDict
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
from dropbox import rest
from dropbox import client
from dropbox import session
# Items in cache are considered expired after 5 minutes.
CACHE_TTL = 300
# The format Dropbox uses for times.
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S +0000'
# Max size for spooling to memory before using disk (5M).
MAX_BUFFER = 1024 ** 2 * 5
class ContextManagerStream(object):
def __init__(self, temp, name):
self.temp = temp
self.name = name
def __iter__(self):
while True:
data = self.read(16384)
if not data:
break
yield data
def __getattr__(self, name):
return getattr(self.temp, name)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# TODO: these classes can probably be replaced with
# tempfile.SpooledTemporaryFile, however I am unsure at this moment if doing
# so would be bad since it is only available in Python 2.6+.
class SpooledWriter(ContextManagerStream):
"""Spools bytes to a StringIO buffer until it reaches max_buffer. At that
point it switches to a temporary file."""
def __init__(self, client, name, max_buffer=MAX_BUFFER):
self.client = client
self.max_buffer = max_buffer
self.bytes = 0
super(SpooledWriter, self).__init__(StringIO(), name)
def __len__(self):
return self.bytes
def write(self, data):
if self.temp.tell() + len(data) >= self.max_buffer:
# We reached the max_buffer size that we want to keep in memory.
# Switch to an on-disk temp file. Copy what has been written so
# far to it.
temp = tempfile.TemporaryFile()
self.temp.seek(0)
shutil.copyfileobj(self.temp, temp)
self.temp = temp
self.temp.write(data)
self.bytes += len(data)
def close(self):
# Need to flush temporary file (but not StringIO).
if hasattr(self.temp, 'flush'):
self.temp.flush()
self.temp.seek(0)
self.client.put_file(self.name, self, overwrite=True)
self.temp.close()
class SpooledReader(ContextManagerStream):
"""
Reads the entire file from the remote server into a buffer or temporary
file. It can then satisfy read(), seek() and other calls using the local
file.
"""
def __init__(self, client, name, max_buffer=MAX_BUFFER):
self.client = client
r = self.client.get_file(name)
self.bytes = int(r.getheader('Content-Length'))
if r > max_buffer:
temp = tempfile.TemporaryFile()
else:
temp = StringIO()
shutil.copyfileobj(r, temp)
temp.seek(0)
super(SpooledReader, self).__init__(temp, name)
def __len__(self):
return self.bytes
class ChunkedReader(ContextManagerStream):
""" A file-like that provides access to a file with dropbox API"""
"""Reads the file from the remote server as requested.
It can then satisfy read()."""
def __init__(self, client, name):
self.client = client
try:
self.r = self.client.get_file(name)
except rest.ErrorResponse, e:
raise RemoteConnectionError(opname='get_file', path=name,
details=e)
self.bytes = int(self.r.getheader('Content-Length'))
self.name = name
self.closed = False
self.pos = 0
self.seek_pos = 0
def __len__(self):
return self.bytes
def __iter__(self):
return self
def seek(self, offset, whence=0):
"""
Change the stream position to the given byte offset in the file-like
object.
"""
if (whence == 0):
self.seek_pos = offset
elif (whence == 1):
self.seek_pos += offset
elif (whence == 2):
self.seek_pos = self.size + offset
def tell(self):
""" Return the current stream position. """
return self.seek_pos
def next(self):
"""
Read the data until all data is read.
data is empty string when there is no more data to read.
"""
data = self.read()
if data is None:
raise StopIteration()
return data
def read(self, amt=None):
""" Read a piece of the file from dropbox. """
if not self.r.isclosed():
# Do some fake seeking
if self.seek_pos < self.pos:
self.r.close()
self.r = self.client.get_file(self.name)
self.r.read(self.seek_pos)
elif self.seek_pos > self.pos:
# Read ahead enough to reconcile pos and seek_pos
self.r.read(self.pos - self.seek_pos)
self.pos = self.seek_pos
# Update position pointers
if amt:
self.pos += amt
self.seek_pos += amt
else:
self.pos = self.bytes
self.seek_pos = self.bytes
return self.r.read(amt)
else:
self.close()
def readline(self, size=-1):
""" Not implemented. Read and return one line from the stream. """
raise NotImplementedError()
def readlines(self, hint=-1):
"""
Not implemented. Read and return a list of lines from the stream.
"""
raise NotImplementedError()
def writable(self):
""" The stream does not support writing. """
return False
def writelines(self, lines):
""" Not implemented. Write a list of lines to the stream. """
raise NotImplementedError()
def close(self):
"""
Flush and close this stream. This method has no effect if the file
is already closed. As a convenience, it is allowed to call this method
more than once; only the first call, however, will have an effect.
"""
# It's a memory leak if self.r not closed.
if not self.r.isclosed():
self.r.close()
if not self.closed:
self.closed = True
class CacheItem(object):
"""Represents a path in the cache. There are two components to a path.
It's individual metadata, and the children contained within it."""
def __init__(self, metadata=None, children=None, timestamp=None):
self.metadata = metadata
self.children = children
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
def add_child(self, name):
if self.children is None:
self.children = [name]
else:
self.children.append(name)
def del_child(self, name):
if self.children is None:
return
try:
i = self.children.index(name)
except ValueError:
return
self.children.pop(i)
def _get_expired(self):
if self.timestamp <= time.time() - CACHE_TTL:
return True
expired = property(_get_expired)
def renew(self):
self.timestamp = time.time()
class DropboxCache(UserDict):
def set(self, path, metadata):
self[path] = CacheItem(metadata)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.add_child(bname)
def pop(self, path, default=None):
value = UserDict.pop(self, path, default)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.del_child(bname)
return value
class DropboxClient(client.DropboxClient):
"""A wrapper around the official DropboxClient. This wrapper performs
caching as well as converting errors to fs exceptions."""
def __init__(self, *args, **kwargs):
super(DropboxClient, self).__init__(*args, **kwargs)
self.cache = DropboxCache()
# Below we split the DropboxClient metadata() method into two methods
# metadata() and children(). This allows for more fine-grained fetches
# and caching.
def metadata(self, path, cache_read=True):
"Gets metadata for a given path."
item = self.cache.get(path) if cache_read else None
if not item or item.metadata is None or item.expired:
try:
metadata = super(DropboxClient, self).metadata(
path, include_deleted=False, list=False)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(path)
raise RemoteConnectionError(opname='metadata', path=path,
details=e)
if metadata.get('is_deleted', False):
raise ResourceNotFoundError(path)
item = self.cache[path] = CacheItem(metadata)
# Copy the info so the caller cannot affect our cache.
return dict(item.metadata.items())
def children(self, path):
"Gets children of a given path."
update, hash = False, None
item = self.cache.get(path)
if item:
if item.expired:
update = True
if item.metadata and item.children:
hash = item.metadata['hash']
else:
if not item.metadata.get('is_dir'):
raise ResourceInvalidError(path)
if not item.children:
update = True
else:
update = True
if update:
try:
metadata = super(DropboxClient, self).metadata(
path, hash=hash, include_deleted=False, list=True)
children = []
contents = metadata.pop('contents')
for child in contents:
if child.get('is_deleted', False):
continue
children.append(basename(child['path']))
self.cache[child['path']] = CacheItem(child)
item = self.cache[path] = CacheItem(metadata, children)
except rest.ErrorResponse, e:
if not item or e.status != 304:
raise RemoteConnectionError(opname='metadata', path=path,
details=e)
# We have an item from cache (perhaps expired), but it's
# hash is still valid (as far as Dropbox is concerned),
# so just renew it and keep using it.
item.renew()
return item.children
def file_create_folder(self, path):
"Add newly created directory to cache."
try:
metadata = super(DropboxClient, self).file_create_folder(path)
except rest.ErrorResponse, e:
if e.status == 404:
raise ParentDirectoryMissingError(path)
if e.status == 403:
raise DestinationExistsError(path)
raise RemoteConnectionError(opname='file_create_folder', path=path,
details=e)
self.cache.set(path, metadata)
def file_copy(self, src, dst):
try:
metadata = super(DropboxClient, self).file_copy(src, dst)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
raise RemoteConnectionError(opname='file_copy', path=path,
details=e)
self.cache.set(dst, metadata)
def file_move(self, src, dst):
try:
metadata = super(DropboxClient, self).file_move(src, dst)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
raise RemoteConnectionError(opname='file_move', path=path,
details=e)
self.cache.pop(src, None)
self.cache.set(dst, metadata)
def file_delete(self, path):
try:
super(DropboxClient, self).file_delete(path)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(path)
if e.status == 400 and 'must not be empty' in str(e):
raise DirectoryNotEmptyError(path)
raise
self.cache.pop(path, None)
def put_file(self, path, f, overwrite=False):
try:
super(DropboxClient, self).put_file(path, f, overwrite=overwrite)
except rest.ErrorResponse, e:
raise RemoteConnectionError(opname='put_file', path=path,
details=e)
self.cache.pop(dirname(path), None)
def create_client(app_key, app_secret, access_type, token_key, token_secret):
"""Uses token from create_token() to gain access to the API."""
s = session.DropboxSession(app_key, app_secret, access_type)
s.set_token(token_key, token_secret)
return DropboxClient(s)
def metadata_to_info(metadata, localtime=False):
isdir = metadata.pop('is_dir', False)
info = {
'size': metadata.pop('bytes', 0),
'isdir': isdir,
'isfile': not isdir,
}
try:
if 'client_mtime' in metadata:
mtime = metadata.get('client_mtime')
else:
mtime = metadata.get('modified')
if mtime:
# Parse date/time from Dropbox as struct_time.
mtime = time.strptime(mtime, TIME_FORMAT)
if localtime:
# Convert time to local timezone in seconds.
mtime = calendar.timegm(mtime)
else:
mtime = time.mktime(mtime)
# Convert to datetime object, store in modified_time
info['modified_time'] = datetime.datetime.fromtimestamp(mtime)
except KeyError:
pass
return info
class DropboxFS(FS):
"""A FileSystem that stores data in Dropbox."""
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': True,
'network': True,
'atomic.setcontents': False,
'atomic.makedir': True,
'atomic.rename': True,
'mime_type': 'virtual/dropbox', }
def __init__(self, app_key, app_secret, access_type, token_key,
token_secret, localtime=False, thread_synchronize=True):
"""Create an fs that interacts with Dropbox.
:param app_key: Your app key assigned by Dropbox.
:param app_secret: Your app secret assigned by Dropbox.
:param access_type: Type of access requested, 'dropbox' or 'app_folder'.
:param token_key: The oAuth key you received after authorization.
:param token_secret: The oAuth secret you received after authorization.
:param thread_synchronize: set to True (default) to enable thread-safety
"""
super(DropboxFS, self).__init__(thread_synchronize=thread_synchronize)
self.client = create_client(app_key, app_secret, access_type,
token_key, token_secret)
self.localtime = localtime
def __str__(self):
return "<DropboxFS: >"
def __unicode__(self):
return u"<DropboxFS: >"
def getmeta(self, meta_name, default=NoDefaultMeta):
if meta_name == 'read_only':
return self.read_only
return super(DropboxFS, self).getmeta(meta_name, default)
@synchronize
def open(self, path, mode="rb", **kwargs):
if 'r' in mode:
return ChunkedReader(self.client, path)
else:
return SpooledWriter(self.client, path)
@synchronize
def getcontents(self, path, mode="rb"):
path = abspath(normpath(path))
return self.open(self, path, mode).read()
def setcontents(self, path, data, *args, **kwargs):
path = abspath(normpath(path))
self.client.put_file(path, data, overwrite=True)
def desc(self, path):
return "%s in Dropbox" % path
def getsyspath(self, path, allow_none=False):
"Returns a path as the Dropbox API specifies."
if allow_none:
return None
return client.format_path(abspath(normpath(path)))
def isdir(self, path):
try:
info = self.getinfo(path)
return info.get('isdir', False)
except ResourceNotFoundError:
return False
def isfile(self, path):
try:
info = self.getinfo(path)
return not info.get('isdir', False)
except ResourceNotFoundError:
return False
def exists(self, path):
try:
self.getinfo(path)
return True
except ResourceNotFoundError:
return False
def listdir(self, path="/", wildcard=None, full=False, absolute=False,
dirs_only=False, files_only=False):
path = abspath(normpath(path))
children = self.client.children(path)
return self._listdir_helper(path, children, wildcard, full, absolute,
dirs_only, files_only)
@synchronize
def getinfo(self, path, cache_read=True):
path = abspath(normpath(path))
metadata = self.client.metadata(path, cache_read=cache_read)
return metadata_to_info(metadata, localtime=self.localtime)
def copy(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_copy(src, dst)
def copydir(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_copy(src, dst)
def move(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def movedir(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def rename(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def makedir(self, path, recursive=False, allow_recreate=False):
path = abspath(normpath(path))
self.client.file_create_folder(path)
# This does not work, httplib refuses to send a Content-Length: 0 header
# even though the header is required. We can't make a 0-length file.
#def createfile(self, path, wipe=False):
# self.client.put_file(path, '', overwrite=False)
def remove(self, path):
path = abspath(normpath(path))
self.client.file_delete(path)
def removedir(self, path, *args, **kwargs):
path = abspath(normpath(path))
self.client.file_delete(path)
def main():
parser = optparse.OptionParser(prog="dropboxfs",
description="CLI harness for DropboxFS.")
parser.add_option(
"-k",
"--app-key",
help="Your Dropbox app key.")
parser.add_option(
"-s",
"--app-secret",
help="Your Dropbox app secret.")
parser.add_option(
"-t",
"--type",
default='dropbox',
choices=('dropbox', 'app_folder'),
help="Your Dropbox app access type.")
parser.add_option(
"-a",
"--token-key",
help="Your access token key (if you previously obtained one.")
parser.add_option(
"-b",
"--token-secret",
help="Your access token secret (if you previously obtained one.")
(options, args) = parser.parse_args()
# Can't operate without these parameters.
if not options.app_key or not options.app_secret:
parser.error('You must obtain an app key and secret from Dropbox at the following URL.\n\nhttps://www.dropbox.com/developers/apps')
# Instantiate a client one way or another.
if not options.token_key and not options.token_secret:
s = session.DropboxSession(options.app_key, options.app_secret,
options.type)
# Get a temporary token, so we can make oAuth calls.
t = s.obtain_request_token()
print "Please visit the following URL and authorize this application.\n"
print s.build_authorize_url(t)
print "\nWhen you are done, please press <enter>."
raw_input()
# Trade up to permanent access token.
a = s.obtain_access_token(t)
token_key, token_secret = a.key, a.secret
print 'Your access token will be printed below, store it for later use.'
print 'For future accesses, you can pass the --token-key and --token-secret'
print ' arguments.\n'
print 'Access token:', a.key
print 'Access token secret:', a.secret
print "\nWhen you are done, please press <enter>."
raw_input()
elif not options.token_key or not options.token_secret:
parser.error('You must provide both the access token and the '
'access token secret.')
else:
token_key, token_secret = options.token_key, options.token_secret
fs = DropboxFS(options.app_key, options.app_secret, options.type,
token_key, token_secret)
print fs.getinfo('/')
print fs.getinfo('/Public')
if fs.exists('/Bar'):
fs.removedir('/Bar')
print fs.listdir('/')
fs.makedir('/Bar')
print fs.listdir('/')
print fs.listdir('/Foo')
filelike = fs.open('/big-file.pdf')
print filelike.read(100)
filelike.seek(100)
chunk2 = filelike.read(100)
print chunk2
filelike.seek(200)
print filelike.read(100)
filelike.seek(100)
chunk2a = filelike.read(100)
print chunk2a
assert chunk2 == chunk2a
if __name__ == '__main__':
main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TestTrainingWithDatasetIterators(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'model': 'functional'},
{'model': 'subclass'},
)
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_iterators_single_io(self, model):
if model == 'functional':
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
elif model == 'subclass':
model = testing_utils.get_small_sequential_mlp(1, 4)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(iterator, steps=2, verbose=1)
model.predict(iterator, steps=2)
# Test with validation data
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=iterator, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
iterator,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(iterator, iterator,
epochs=1, steps_per_epoch=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(iterator, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(iterator, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(iterator, verbose=0)
@tf_test_util.run_in_graph_and_eager_modes
def test_get_next_op_created_once(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
# Finalize graph to make sure we are not appending another iterator
# get_next op in the graph.
ops.get_default_graph().finalize()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
@tf_test_util.run_in_graph_and_eager_modes
def test_iterators_running_out_of_data(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(2)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
self.assertRegexpMatches(
str(mock_log.call_args),
'dataset iterator ran out of data')
class TestTrainingWithDataset(test.TestCase, parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Finalize the graph to make sure new ops aren't added when calling on the
# same dataset
ops.get_default_graph().finalize()
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
@tf_test_util.run_in_graph_and_eager_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@parameterized.parameters(
{'model': 'functional'},
{'model': 'subclass'},
)
@tf_test_util.run_in_graph_and_eager_modes
def test_dataset_with_sparse_labels(self, model):
if model == 'functional':
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
elif model == 'subclass':
model = testing_utils.get_small_sequential_mlp(1, 4)
for loss in ['sparse_categorical_crossentropy',
losses_impl.sparse_softmax_cross_entropy]:
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, loss)
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
class TestMetricsWithDatasetIterators(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'MaterialShapeNicenessLabel'
db.delete_table(u'shapes_materialshapenicenesslabel')
# Deleting field 'MaterialShape.nice'
db.delete_column(u'shapes_materialshape', 'nice')
# Deleting field 'MaterialShape.nice_score'
db.delete_column(u'shapes_materialshape', 'nice_score')
def backwards(self, orm):
# Adding model 'MaterialShapeNicenessLabel'
db.create_table(u'shapes_materialshapenicenesslabel', (
('quality_method', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('admin_score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('time_active_ms', self.gf('django.db.models.fields.IntegerField')(blank=True, null=True, db_index=True)),
('canttell', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('shape', self.gf('django.db.models.fields.related.ForeignKey')(related_name='nicenesses', to=orm['shapes.MaterialShape'])),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserProfile'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('mturk_assignment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, to=orm['mturk.MtAssignment'], on_delete=models.SET_NULL, blank=True)),
('invalid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('sandbox', self.gf('django.db.models.fields.BooleanField')(default=False)),
('time_ms', self.gf('django.db.models.fields.IntegerField')(blank=True, null=True, db_index=True)),
('reward', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=4, blank=True)),
('nice', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'shapes', ['MaterialShapeNicenessLabel'])
# Adding field 'MaterialShape.nice'
db.add_column(u'shapes_materialshape', 'nice',
self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True),
keep_default=False)
# Adding field 'MaterialShape.nice_score'
db.add_column(u'shapes_materialshape', 'nice_score',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
models = {
u'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'always_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exclude_from_aggregation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_worker_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bsdfs.environmentmap': {
'Meta': {'ordering': "['-id']", 'object_name': 'EnvironmentMap'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'tonemap_scale': ('django.db.models.fields.FloatField', [], {}),
'tonemap_white': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'bsdfs.shapebsdflabel_wd': {
'Meta': {'ordering': "['-edit_nnz', '-time_ms']", 'object_name': 'ShapeBsdfLabel_wd'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'color_L': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_a': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_b': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'color_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'contrast': ('django.db.models.fields.FloatField', [], {}),
'doi': ('django.db.models.fields.IntegerField', [], {}),
'edit_dict': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_nnz': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'edit_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'envmap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bsdfs.EnvironmentMap']", 'null': 'True', 'blank': 'True'}),
'give_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'give_up_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gloss_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gloss_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_blob': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'init_method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metallic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bsdfs_wd'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'licenses.license': {
'Meta': {'object_name': 'License'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'cc_attribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_no_deriv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_noncommercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_share_alike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creative_commons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
},
u'mturk.experiment': {
'Meta': {'ordering': "['slug', 'variant']", 'unique_together': "(('slug', 'variant'),)", 'object_name': 'Experiment'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'completed_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cubam_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'examples_group_attr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_tutorial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'new_hit_settings': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['mturk.ExperimentSettings']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'template_dir': ('django.db.models.fields.CharField', [], {'default': "'mturk/experiments'", 'max_length': '255'}),
'test_contents_per_assignment': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'mturk.experimentsettings': {
'Meta': {'object_name': 'ExperimentSettings'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_add_hits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'content_filter': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'experiment_settings_in'", 'to': u"orm['contenttypes.ContentType']"}),
'contents_per_hit': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '1800'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '2678400'}),
'max_active_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'max_total_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'min_output_consensus': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'num_outputs_max': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'out_content_attr': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'out_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_settings_out'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'qualifications': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'requirements': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'reward': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.experimenttestcontent': {
'Meta': {'ordering': "['-id']", 'object_name': 'ExperimentTestContent'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_contents'", 'to': u"orm['mturk.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'})
},
u'mturk.mtassignment': {
'Meta': {'object_name': 'MtAssignment'},
'accept_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'action_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approve_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auto_approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'bonus_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feedback_bonus_given': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['mturk.MtHit']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'manually_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_test_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_correct': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_incorrect': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'partially_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'post_meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reject_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rejection_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submission_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'test_contents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assignments'", 'symmetrical': 'False', 'to': u"orm['mturk.ExperimentTestContent']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_load_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'wage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'worker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']", 'null': 'True', 'blank': 'True'})
},
u'mturk.mthit': {
'Meta': {'object_name': 'MtHit'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'all_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'any_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'compatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hit_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': u"orm['mturk.MtHitType']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'incompatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_assignments': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'num_assignments_available': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'review_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.mthittype': {
'Meta': {'object_name': 'MtHitType'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '3600'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.Experiment']"}),
'experiment_settings': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.ExperimentSettings']"}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'default': "'0.01'", 'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'normals.shaperectifiednormallabel': {
'Meta': {'ordering': "['-admin_score']", 'object_name': 'ShapeRectifiedNormalLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'automatic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canvas_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'canvas_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'correct_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'focal_pixels': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_rectified': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'num_vanishing_lines': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pos_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pos_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rectified_normals'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'uvnb': ('django.db.models.fields.TextField', [], {})
},
u'photos.flickruser': {
'Meta': {'ordering': "['-id']", 'object_name': 'FlickrUser'},
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '127'})
},
u'photos.photo': {
'Meta': {'ordering': "['aspect_ratio', '-id']", 'object_name': 'Photo'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exif': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'flickr_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.FlickrUser']"}),
'focal_y': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'fov': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_orig': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inappropriate': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['licenses.License']"}),
'light_stack': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoLightStack']"}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'median_intrinsic_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nonperspective': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'num_intrinsic_comparisons': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_intrinsic_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_shapes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rotated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoSceneCategory']"}),
'scene_category_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stylized': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vanishing_length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'vanishing_lines': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'vanishing_points': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'whitebalanced': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'whitebalanced_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'photos.photolightstack': {
'Meta': {'ordering': "['-id']", 'object_name': 'PhotoLightStack'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'photos.photoscenecategory': {
'Meta': {'ordering': "['name']", 'object_name': 'PhotoSceneCategory'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photos.PhotoSceneCategory']", 'null': 'True', 'blank': 'True'})
},
u'shapes.materialshape': {
'Meta': {'ordering': "['-num_vertices', '-time_ms']", 'object_name': 'MaterialShape'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'area': ('django.db.models.fields.FloatField', [], {}),
'bsdf_wd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bsdfs.ShapeBsdfLabel_wd']", 'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'correct_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'dominant_b': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_delta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_g': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_r': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_rgb0': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb3': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_bbox': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'image_crop': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'image_pbox': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label_pos_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'label_pos_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeName']", 'null': 'True', 'blank': 'True'}),
'name_entropy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'num_segments': ('django.db.models.fields.IntegerField', [], {}),
'num_triangles': ('django.db.models.fields.IntegerField', [], {}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'pbox': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pbox_aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'material_shapes'", 'to': u"orm['photos.Photo']"}),
'pixel_area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'planar': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'planar_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'planar_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'rectified_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rectified_normal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['normals.ShapeRectifiedNormalLabel']", 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'segments': ('django.db.models.fields.TextField', [], {}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'special_slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'submitted_shapes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'material_shapes'", 'symmetrical': 'False', 'to': u"orm['shapes.SubmittedShape']"}),
'substance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeSubstance']", 'null': 'True', 'blank': 'True'}),
'substance_entropy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'substance_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'triangles': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vertices': ('django.db.models.fields.TextField', [], {})
},
u'shapes.materialshapenamelabel': {
'Meta': {'ordering': "['-name', '-time_ms']", 'object_name': 'MaterialShapeNameLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeName']", 'null': 'True', 'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'shapes.materialshapequality': {
'Meta': {'object_name': 'MaterialShapeQuality'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'canttell': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'qualities'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'shapes.shapename': {
'Meta': {'ordering': "['-fail', 'name']", 'object_name': 'ShapeName'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeName']", 'null': 'True', 'blank': 'True'}),
'representative_shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.MaterialShape']", 'null': 'True', 'blank': 'True'})
},
u'shapes.shapeplanaritylabel': {
'Meta': {'object_name': 'ShapePlanarityLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'canttell': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'planar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planarities'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'shapes.shapesubstance': {
'Meta': {'ordering': "['-fail', 'name']", 'object_name': 'ShapeSubstance'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'substances'", 'null': 'True', 'to': u"orm['shapes.ShapeSubstanceGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeSubstance']", 'null': 'True', 'blank': 'True'}),
'representative_shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.MaterialShape']", 'null': 'True', 'blank': 'True'})
},
u'shapes.shapesubstancegroup': {
'Meta': {'ordering': "['-id']", 'object_name': 'ShapeSubstanceGroup'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'names': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'substance_groups'", 'symmetrical': 'False', 'to': u"orm['shapes.ShapeName']"})
},
u'shapes.shapesubstancelabel': {
'Meta': {'ordering': "['-substance', '-time_ms']", 'object_name': 'ShapeSubstanceLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'substances'", 'to': u"orm['shapes.MaterialShape']"}),
'substance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeSubstance']", 'null': 'True', 'blank': 'True'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'shapes.submittedshape': {
'Meta': {'ordering': "['-time_ms']", 'object_name': 'SubmittedShape'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submitted_shapes'", 'to': u"orm['photos.Photo']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vertices': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['shapes']
| |
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType
from .validators import boolean, integer
from .validators.waf import validate_waf_action_type
class FieldToMatch(AWSProperty):
"""
`FieldToMatch <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-xssmatchset-xssmatchtuple-fieldtomatch.html>`__
"""
props: PropsDictType = {
"Data": (str, False),
"Type": (str, True),
}
class ByteMatchTuples(AWSProperty):
"""
`ByteMatchTuples <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-bytematchset-bytematchtuples.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"PositionalConstraint": (str, True),
"TargetString": (str, False),
"TargetStringBase64": (str, False),
"TextTransformation": (str, True),
}
class ByteMatchSet(AWSObject):
"""
`ByteMatchSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-bytematchset.html>`__
"""
resource_type = "AWS::WAF::ByteMatchSet"
props: PropsDictType = {
"ByteMatchTuples": ([ByteMatchTuples], False),
"Name": (str, True),
}
class IPSetDescriptors(AWSProperty):
"""
`IPSetDescriptors <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-ipset-ipsetdescriptors.html>`__
"""
props: PropsDictType = {
"Type": (str, True),
"Value": (str, True),
}
class IPSet(AWSObject):
"""
`IPSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-ipset.html>`__
"""
resource_type = "AWS::WAF::IPSet"
props: PropsDictType = {
"IPSetDescriptors": ([IPSetDescriptors], False),
"Name": (str, True),
}
class Predicates(AWSProperty):
"""
`Predicates <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-rule-predicates.html>`__
"""
props: PropsDictType = {
"DataId": (str, True),
"Negated": (boolean, True),
"Type": (str, True),
}
class Rule(AWSObject):
"""
`Rule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-rule.html>`__
"""
resource_type = "AWS::WAF::Rule"
props: PropsDictType = {
"MetricName": (str, True),
"Name": (str, True),
"Predicates": ([Predicates], False),
}
class SizeConstraint(AWSProperty):
"""
`SizeConstraint <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-sizeconstraintset-sizeconstraint.html>`__
"""
props: PropsDictType = {
"ComparisonOperator": (str, True),
"FieldToMatch": (FieldToMatch, True),
"Size": (integer, True),
"TextTransformation": (str, True),
}
class SizeConstraintSet(AWSObject):
"""
`SizeConstraintSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-sizeconstraintset.html>`__
"""
resource_type = "AWS::WAF::SizeConstraintSet"
props: PropsDictType = {
"Name": (str, True),
"SizeConstraints": ([SizeConstraint], True),
}
class SqlInjectionMatchTuples(AWSProperty):
"""
`SqlInjectionMatchTuples <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-sqlinjectionmatchset-sqlinjectionmatchtuples.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"TextTransformation": (str, True),
}
class SqlInjectionMatchSet(AWSObject):
"""
`SqlInjectionMatchSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-sqlinjectionmatchset.html>`__
"""
resource_type = "AWS::WAF::SqlInjectionMatchSet"
props: PropsDictType = {
"Name": (str, True),
"SqlInjectionMatchTuples": ([SqlInjectionMatchTuples], False),
}
class Action(AWSProperty):
"""
`Action <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-webacl-action.html>`__
"""
props: PropsDictType = {
"Type": (validate_waf_action_type, True),
}
class Rules(AWSProperty):
"""
`Rules <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-webacl-rules.html>`__
"""
props: PropsDictType = {
"Action": (Action, False),
"Priority": (integer, True),
"RuleId": (str, True),
}
class WebACL(AWSObject):
"""
`WebACL <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-webacl.html>`__
"""
resource_type = "AWS::WAF::WebACL"
props: PropsDictType = {
"DefaultAction": (Action, True),
"MetricName": (str, True),
"Name": (str, True),
"Rules": ([Rules], False),
}
class XssMatchTuple(AWSProperty):
"""
`XssMatchTuple <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waf-xssmatchset-xssmatchtuple.html>`__
"""
props: PropsDictType = {
"FieldToMatch": (FieldToMatch, True),
"TextTransformation": (str, True),
}
class XssMatchSet(AWSObject):
"""
`XssMatchSet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-waf-xssmatchset.html>`__
"""
resource_type = "AWS::WAF::XssMatchSet"
props: PropsDictType = {
"Name": (str, True),
"XssMatchTuples": ([XssMatchTuple], True),
}
| |
# Copyright (C) 2014 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""VisTrails runner for reprounzip.
This file provides the reprounzip plugin that builds a VisTrails pipeline
alongside an unpacked experiment. Although you don't need VisTrails to generate
the .vt file, you will need it if you want to run it.
See http://www.vistrails.org/
"""
if __name__ == '__main__': # noqa
from reprounzip_vistrails import run_from_vistrails
run_from_vistrails()
assert False
import argparse
from datetime import datetime
import itertools
import logging
import os
from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
import zipfile
from reprozip_core.common import load_config, setup_logging, record_usage
from reprounzip import signals
from reprounzip.unpackers.common import shell_escape
__version__ = '1.0.16'
logger = logging.getLogger('reprounzip.vistrails')
def escape_xml(s):
"""Escapes for XML.
"""
return ("%s" % s).replace('&', '&').replace('"', '"')
class IdScope(object):
def __init__(self):
self._ids = {'add': 0,
'module': 0,
'location': 0,
'annotation': 0,
'function': 0,
'parameter': 0,
'connection': 0,
'port': 0,
'portspec': 0,
'portspecitem': 0}
def _add(type_):
def getter(self):
i = self._ids[type_]
self._ids[type_] += 1
return i
return getter
add = _add('add')
module = _add('module')
location = _add('location')
annotation = _add('annotation')
function = _add('function')
parameter = _add('parameter')
connection = _add('connection')
port = _add('port')
portspec = _add('portspec')
portspecitem = _add('portspecitem')
del _add
def split_sig(sig):
pkg, name = sig.rsplit(':', 1)
return name, pkg
class Workflow(object):
def __init__(self, file_, ids):
self._file = file_
self._ids = ids
self._mod_y = 0
file_.write('<vistrail id="" name="" version="1.0.4" xmlns:xsi="http:'
'//www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation'
'="http://www.vistrails.org/vistrail.xsd">\n'
'<!-- Generated by reprounzip-vistrails {version} -->\n'
' <action date="{date}" id="1" prevId="0" session="0" '
'user="ReproUnzip">\n'.format(
date=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version=__version__))
def close(self):
self._file.write(
' </action>\n'
'</vistrail>\n')
def add_module(self, sig, version, desc=None):
mod_id = self._ids.module()
name, pkg = split_sig(sig)
self._file.write(
' <add id="{add_id}" objectId="{mod_id}" parentObjId="" '
'parentObjType="" what="module">\n'
' <module cache="1" id="{mod_id}" name="{mod_name}" namespace'
'="" package="{mod_pkg}" version="{version}" />\n'
' </add>\n'.format(
add_id=self._ids.add(), mod_id=mod_id,
mod_name=name, mod_pkg=pkg, version=version))
self._file.write(
' <add id="{add_id}" objectId="{loc_id}" parentObjId="{mod_id}'
'" parentObjType="module" what="location">\n'
' <location id="{loc_id}" x="0.0" y="{y}" />\n'
' </add>\n'.format(
add_id=self._ids.add(), mod_id=mod_id,
loc_id=self._ids.location(), y=self._mod_y))
if desc is not None:
self._file.write(
' <add id="{add_id}" objectId="{ann_id}" parentObjId="'
'{mod_id}" parentObjType="module" what="annotation">\n'
' <annotation id="{ann_id}" key="__desc__" value="{text}"'
' />\n'
' </add>\n'.format(
add_id=self._ids.add(), mod_id=mod_id,
ann_id=self._ids.annotation(), text=escape_xml(desc)))
self._mod_y -= 100
return mod_id
def add_function(self, mod_id, name, param_values):
func_id = self._ids.function()
self._file.write(
' <add id="{add_id}" objectId="{func_id}" parentObjId="'
'{mod_id}" parentObjType="module" what="function">\n'
' <function id="{func_id}" name="{name}" pos="0" />\n'
' </add>\n'.format(
add_id=self._ids.add(), mod_id=mod_id, func_id=func_id,
name=name))
for i, (sig, val) in enumerate(param_values):
self._file.write(
' <add id="{add_id}" objectId="{param_id}" parentObjId="'
'{func_id}" parentObjType="function" what="parameter">\n'
' <parameter alias="" id="{param_id}" name="<no '
'description>" pos="{pos}" type="{type}" val="{val}" />\n'
' </add>\n'.format(
add_id=self._ids.add(), param_id=self._ids.parameter(),
func_id=func_id, pos=i, type=sig, val=escape_xml(val)))
def connect(self, from_id, from_sig, from_port, to_id, to_sig, to_port):
self._file.write(
' <add id="{add1_id}" objectId="{conn_id}" parentObjId="" '
'parentObjType="" what="connection">\n'
' <connection id="{conn_id}" />\n'
' </add>\n'
' <add id="{add2_id}" objectId="{port1_id}" parentObjId="'
'{conn_id}" parentObjType="connection" what="port">\n'
' <port id="{port1_id}" moduleId="{from_id}" moduleName="'
'{from_mod}" name="{from_port}" signature="({from_sig})" type="'
'source" />\n'
' </add>\n'
' <add id="{add3_id}" objectId="{port2_id}" parentObjId="'
'{conn_id}" parentObjType="connection" what="port">\n'
' <port id="{port2_id}" moduleId="{to_id}" moduleName="'
'{to_mod}" name="{to_port}" signature="({to_sig})" type="'
'destination" />\n'
' </add>\n'.format(
add1_id=self._ids.add(), add2_id=self._ids.add(),
add3_id=self._ids.add(), conn_id=self._ids.connection(),
port1_id=self._ids.port(), from_id=from_id, from_sig=from_sig,
from_mod=split_sig(from_sig)[0], from_port=from_port,
port2_id=self._ids.port(), to_id=to_id, to_sig=to_sig,
to_mod=split_sig(to_sig)[0], to_port=to_port
))
def add_port_spec(self, mod_id, name, type_, sigs, optional=True):
self._file.write(
' <add id="{add_id}" objectId="{ps_id}" parentObjId="{mod_id}" '
'parentObjType="module" what="portSpec">\n'
' <portSpec depth="0" id="{ps_id}" maxConns="1" minConns="0" '
'name="{name}" optional="{opt}" sortKey="0" type="{type}'
'">\n'.format(
add_id=self._ids.add(), ps_id=self._ids.portspec(),
mod_id=mod_id, name=escape_xml(name), type=type_,
opt='1' if optional else '0'))
for i, (pkg, mod) in enumerate(sigs):
self._file.write(
' <portSpecItem default="" entryType="" id="{psi_id}" '
'label="" module="{mod}" namespace="" package="{pkg}" pos="'
'{pos}" values="" />\n'.format(
psi_id=self._ids.portspecitem(), mod=mod, pkg=pkg,
pos=i))
self._file.write(
' </portSpec>\n'
' </add>\n')
directory_sig = 'org.vistrails.vistrails.basic:Directory'
file_pkg_mod = 'org.vistrails.vistrails.basic', 'File'
integer_sig = 'org.vistrails.vistrails.basic:Integer'
string_sig = 'org.vistrails.vistrails.basic:String'
rpz_id = 'io.github.vida-nyu.reprozip.reprounzip'
rpz_version = '0.1'
experiment_sig = '%s:Directory' % rpz_id
def do_vistrails(target, pack=None, **kwargs):
"""Create a VisTrails workflow that runs the experiment.
This is called from signals after an experiment has been setup by any
unpacker.
"""
record_usage(do_vistrails=True)
config = load_config(target / 'config.yml', canonical=True)
# Writes VisTrails workflow
bundle = target / 'vistrails.vt'
logger.info("Writing VisTrails workflow %s...", bundle)
vtdir = Path(tempfile.mkdtemp(prefix='reprounzip_vistrails_'))
ids = IdScope()
try:
with (vtdir / 'vistrail').open('w',
encoding='utf-8', newline='\n') as fp:
wf = Workflow(fp, ids)
# Directory module, refering to this directory
d = wf.add_module('%s:Directory' % rpz_id, rpz_version)
wf.add_function(d, 'directory',
[(directory_sig, str(target.resolve()))])
connect_from = d
for i, run in enumerate(config.runs):
inputs = sorted(n for n, f in config.inputs_outputs.items()
if i in f.read_runs)
outputs = sorted(n for n, f in config.inputs_outputs.items()
if i in f.write_runs)
ports = itertools.chain((('input', p) for p in inputs),
(('output', p) for p in outputs))
# Run module
r = wf.add_module('%s:Run' % rpz_id, rpz_version,
desc=run.get('id', 'run%d' % i))
wf.add_function(r, 'cmdline', [
(string_sig,
' '.join(shell_escape(arg)
for arg in run['argv']))])
wf.add_function(r, 'run_number', [(integer_sig, i)])
# Port specs for input/output files
for type_, name in ports:
wf.add_port_spec(r, name, type_, [file_pkg_mod])
# Draw connection
wf.connect(connect_from, experiment_sig, 'experiment',
r, experiment_sig, 'experiment')
connect_from = r
wf.close()
with bundle.open('wb') as fp:
z = zipfile.ZipFile(fp, 'w')
for path in vtdir.glob('**/*'):
if not path.is_dir():
z.write(vtdir / path, path)
z.close()
finally:
shutil.rmtree(vtdir)
def setup_vistrails():
"""Setup the plugin.
"""
signals.post_setup.subscribe(do_vistrails)
def run_from_vistrails():
setup_logging('REPROUNZIP-VISTRAILS', logging.INFO)
cli_version = 1
if len(sys.argv) > 1:
try:
cli_version = int(sys.argv[1])
except ValueError:
logger.info("Compatibility mode: reprounzip-vistrails didn't get "
"a version number")
if cli_version != 1:
logger.critical("Unknown interface version %d; you are probably "
"using a version of reprounzip-vistrails too old for "
"your VisTrails package. Consider upgrading.",
cli_version)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('unpacker')
parser.add_argument('directory')
parser.add_argument('run')
parser.add_argument('--input-file', action='append', default=[])
parser.add_argument('--output-file', action='append', default=[])
parser.add_argument('--cmdline', action='store')
args = parser.parse_args(sys.argv[2:])
config = load_config(Path(args.directory) / 'config.yml', canonical=True)
python = sys.executable
rpuz = [python, '-c', 'from reprounzip.main import main; main()',
args.unpacker]
os.environ['REPROUNZIP_NON_INTERACTIVE'] = 'y'
def cmd(lst, add=None):
if add:
logger.info("cmd: %s %s", ' '.join(rpuz + lst), add)
string = ' '.join(shell_escape(a) for a in (rpuz + lst))
string += ' ' + add
subprocess.check_call(string, shell=True,
cwd=args.directory)
else:
logger.info("cmd: %s", ' '.join(rpuz + lst))
subprocess.check_call(rpuz + lst,
cwd=args.directory)
logger.info("reprounzip-vistrails calling reprounzip; dir=%s",
args.directory)
# Parses input files from the command-line
upload_command = []
seen_input_names = set()
for input_file in args.input_file:
input_name, filename = input_file.split(':', 1)
upload_command.append('%s:%s' % (filename, input_name))
seen_input_names.add(input_name)
# Resets the input files that are used by this run and were not given
for name, f in config.inputs_outputs.items():
if name not in seen_input_names and int(args.run) in f.read_runs:
upload_command.append(':%s' % name)
# Runs the command
cmd(['upload', '.'] + upload_command)
# Runs the experiment
if args.cmdline:
cmd(['run', '.', args.run, '--cmdline'], add=args.cmdline)
else:
cmd(['run', '.', args.run])
# Gets output files
for output_file in args.output_file:
output_name, filename = output_file.split(':', 1)
cmd(['download', '.',
'%s:%s' % (output_name, filename)])
sys.exit(0)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.losses_utils import compute_weighted_loss
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.util.tf_export import keras_export
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
Args:
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""
def __init__(self,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
self.reduction = reduction
self.name = name
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` matches the shape of `y_pred`, then the
loss of each measurable element of `y_pred` is scaled by the
corresponding value of `sample_weight`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `y_true`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
with ops.name_scope(self.name, format(self.__class__.__name__),
(y_pred, y_true, sample_weight)):
losses = self.call(y_true, y_pred)
return compute_weighted_loss(
losses, sample_weight, reduction=self.reduction)
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values, with the same shape as 'y_pred'.
y_pred: The predicted values.
"""
NotImplementedError('Must be implemented in subclasses.')
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(Loss):
"""Computes the mean of squares of errors between labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean squared error value is 3/4 (0.75).
Usage:
```python
mse = tf.keras.losses.MeanSquaredError()
loss = mse([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def call(self, y_true, y_pred):
"""Invokes the `MeanSquaredError` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Mean squared error losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return mean_squared_error(y_true, y_pred)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(Loss):
"""Computes the mean of absolute difference between labels and predictions.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean absolute error value is 3/4 (0.75).
Usage:
```python
mae = tf.keras.losses.MeanAbsoluteError()
loss = mae([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.75
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def call(self, y_true, y_pred):
"""Invokes the `MeanAbsoluteError` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Mean absolute error losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return mean_absolute_error(y_true, y_pred)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(Loss):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean absolute percentage error value is 5e+08.
Usage:
```python
mape = tf.keras.losses.MeanAbsolutePercentageError()
loss = mape([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 5e+08
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def call(self, y_true, y_pred):
"""Invokes the `MeanAbsolutePercentageError` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Mean absolute percentage error losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return mean_absolute_percentage_error(y_true, y_pred)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(Loss):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
For example, if `y_true` is [0., 0., 1., 1.] and `y_pred` is [1., 1., 1., 0.]
then the mean squared logarithmic error value is 0.36034.
Usage:
```python
msle = tf.keras.losses.MeanSquaredLogarithmicError()
loss = msle([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 0.36034
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def call(self, y_true, y_pred):
"""Invokes the `MeanSquaredLogarithmicError` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Mean squared logarithmic error losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return mean_squared_logarithmic_error(y_true, y_pred)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(Loss):
"""Computes the binary cross entropy loss between the labels and predictions.
Usage:
```python
bce = tf.keras.losses.BinaryCrossentropy()
loss = bce([0., 0., 1., 1.], [1., 1., 1., 0.])
print('Loss: ', loss.numpy()) # Loss: 12.007
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
````
Args:
from_logits: Whether `output` is expected to be a logits tensor. By default,
we consider that `output` encodes a probability distribution.
label_smoothing: If greater than `0` then smooth the labels.
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
super(BinaryCrossentropy, self).__init__(reduction=reduction, name=name)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def call(self, y_true, y_pred):
"""Invokes the `BinaryCrossentropy` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Binary cross entropy losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
if self.label_smoothing > 0:
y_true = y_true * (1 - self.label_smoothing) + 0.5 * self.label_smoothing
return binary_crossentropy(y_true, y_pred, from_logits=self.from_logits)
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(Loss):
"""Computes categorical cross entropy loss between the `y_true` and `y_pred`.
Usage:
```python
cce = tf.keras.losses.CategoricalCrossentropy()
loss = cce(
[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]])
print('Loss: ', loss.numpy()) # Loss: 0.3239
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
````
Args:
from_logits: Whether `output` is expected to be a logits tensor. By default,
we consider that `output` encodes a probability distribution.
label_smoothing: If greater than `0` then smooth the labels. This option is
currently not supported when `y_pred` is a sparse input (not one-hot).
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
super(CategoricalCrossentropy, self).__init__(
reduction=reduction, name=name)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def call(self, y_true, y_pred):
"""Invokes the `CategoricalCrossentropy` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Categorical cross entropy losses.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true)
is_sparse = y_pred.shape != y_true.shape
if is_sparse:
return sparse_categorical_crossentropy(
y_true, y_pred, from_logits=self.from_logits)
else:
y_true = math_ops.cast(y_true, y_pred.dtype)
if self.label_smoothing > 0:
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
smooth_positives = 1.0 - self.label_smoothing
smooth_negatives = self.label_smoothing / num_classes
y_true = y_true * smooth_positives + smooth_negatives
return categorical_crossentropy(
y_true, y_pred, from_logits=self.from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(Loss):
"""Computes the hinge loss between `y_true` and `y_pred`.
Usage:
```python
h = tf.losses.Hinge()
loss = h([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.66
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.Hinge())
```
"""
def call(self, y_true, y_pred):
"""Calculates the hinge loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Hinge loss.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return hinge(y_true, y_pred)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(Loss):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
Usage:
```python
sh = tf.losses.SquaredHinge()
loss = sh([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 0.66
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.SquaredHinge())
```
"""
def call(self, y_true, y_pred):
"""Calculates the squared hinge loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Squared hinge loss.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return squared_hinge(y_true, y_pred)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(Loss):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
Usage:
```python
ch = tf.losses.CategoricalHinge()
loss = ch([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 1.0
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.CategoricalHinge())
```
"""
def call(self, y_true, y_pred):
"""Calculates the categorical hinge loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Categorical hinge loss.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return categorical_hinge(y_true, y_pred)
class LogLoss(Loss):
"""Computes the log loss between `y_true` and `y_pred`.
logloss = -y log(p) - (1-y) log(1-p)
Usage:
```python
l = tf.losses.LogLoss()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 10.745
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.LogLoss())
```
Args:
epsilon: A small increment to add to avoid taking a log of zero.
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""
def __init__(self,
epsilon=1e-7,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
super(LogLoss, self).__init__(reduction=reduction, name=name)
self.epsilon = epsilon
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return logloss(y_true, y_pred, epsilon=self.epsilon)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred):
diff = math_ops.abs(
(y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(math_ops.square(first_log - second_log), axis=-1)
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def logloss(y_true, y_pred, epsilon=1e-7):
losses = math_ops.multiply(y_true, math_ops.log(y_pred + epsilon))
losses += math_ops.multiply((1 - y_true), math_ops.log(1 - y_pred + epsilon))
return K.mean(-losses, axis=-1)
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Arguments:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
Returns:
Tensor with one scalar loss entry per sample.
"""
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.log(2.)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true, y_pred, from_logits=False):
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False):
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False):
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export('keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine')
def cosine_proximity(y_true, y_pred):
y_true = nn.l2_normalize(y_true, axis=-1)
y_pred = nn.l2_normalize(y_pred, axis=-1)
return -math_ops.reduce_sum(y_true * y_pred, axis=-1)
@keras_export('keras.losses.CosineProximity')
class CosineProximity(Loss):
"""Computes the cosine distance between `y_true` and `y_pred`.
Usage:
```python
cosine_loss = tf.losses.CosineProximity()
loss = cosine_loss([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: -0.5
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.CosineProximity())
```
"""
def call(self, y_true, y_pred):
"""Calculates the cosine proximity loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Cosine distance loss.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return cosine_proximity(y_true, y_pred)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine = cosine_proximity
@keras_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.BatchMatMul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BatchMatmulOpTest(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
# output's shape depends on adj[0] and adj[1]
d0 = x.shape[-2] if not adjoint_a else x.shape[-1]
d2 = y.shape[-1] if not adjoint_b else y.shape[-2]
batch_dims = x.shape[:-2]
num = np.prod(batch_dims)
z = np.empty(list(batch_dims) + [d0, d2], dtype=x.dtype)
xr = x.reshape([num, x.shape[-2], x.shape[-1]])
yr = y.reshape([num, y.shape[-2], y.shape[-1]])
zr = z.reshape([num, z.shape[-2], z.shape[-1]])
for i in range(num):
a = np.matrix(xr[i, :, :])
if adjoint_a:
a = a.transpose().conj()
b = np.matrix(yr[i, :, :])
if adjoint_b:
b = b.transpose().conj()
zr[i, :, :] = a * b
return z
# Test _npBatchMatMul works.
def testNpVersion(self):
x = np.array([0., 1., 2., 3.]).reshape([1, 2, 2])
y = np.array([1., 2., 3., 4.]).reshape([1, 2, 2])
z0 = self._npBatchMatmul(x, y, False, False)
z1 = np.array([3., 4., 11., 16.]).reshape([1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
x = np.array([1., (1j), (-1.), (-1j)]).reshape([1, 2, 2])
y = x * np.complex(1, 1) # rotate x 90 degree
z0 = self._npBatchMatmul(x, y, False, False)
z1 = np.array([2., (2.j), -2., (-2.j)]).reshape([1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
z0 = self._npBatchMatmul(x, y, False, True)
z1 = np.array([(2. - 2.j), (-2. + 2.j), (-2. + 2.j), (2. - 2.j)]).reshape(
[1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
z0 = self._npBatchMatmul(x, y, True, False)
z1 = np.array([(2. + 2.j), (-2. + 2.j), (2. - 2.j), (2. + 2.j)]).reshape(
[1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
# Compares _tfpBatchMatmul(x, y, alpha, adj) and _npBatchMatMul(x, y, alpha,
# adj)
def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape=True):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
is_floating = x.dtype != np.int32
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
with self.test_session(use_gpu=is_floating) as sess:
if static_shape:
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = z0.eval()
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)
z0 = math_ops.matmul(
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
def _rand(self, shape, dtype):
vals = np.array(np.random.normal(-10, 10, np.prod(shape)), dtype=dtype)
if dtype in (np.complex64, np.complex128):
imag = np.array(np.random.normal(-10, 10, np.prod(shape)), dtype=dtype)
vals += 1j * imag
return vals.reshape(shape)
def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def compareNonEmpty(self, a_shape, b_shape):
self._compare(
self._rand(a_shape, dtype),
self._rand(b_shape, dtype), adjoint_a, adjoint_b, use_static_shape)
compareNonEmpty(self, [1, 2, 3], [1, 3, 5])
compareNonEmpty(self, [1, 2, 3], [1, 3, 1])
compareNonEmpty(self, [1, 2, 3], [1, 3, 5])
compareNonEmpty(self, [7, 1, 3], [7, 3, 5])
compareNonEmpty(self, [7, 2, 3], [7, 3, 1])
compareNonEmpty(self, [7, 2, 3], [7, 3, 5])
compareNonEmpty(self, [10, 64, 75], [10, 75, 30])
compareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])
def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def compareEmpty(self, a_shape, b_shape):
self._compare(
np.zeros(a_shape).astype(dtype),
np.zeros(b_shape).astype(dtype), adjoint_a, adjoint_b,
use_static_shape)
compareEmpty(self, [0, 3, 2], [0, 2, 4])
compareEmpty(self, [3, 0, 2], [3, 2, 5])
compareEmpty(self, [3, 3, 2], [3, 2, 0])
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
def Test(self):
np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
class BatchMatmulGradientTest(test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
epsilon = np.finfo(x.dtype).eps
delta = epsilon**(1.0 / 3.0)
with self.test_session(use_gpu=True):
inx = constant_op.constant(x)
iny = constant_op.constant(y)
z = math_ops.matmul(inx, iny, adjoint_a, adjoint_b)
loss = math_ops.reduce_sum(z)
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [x.shape, y.shape],
loss, [1],
x_init_value=[x, y],
delta=delta)
tol = 20 * delta
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)
# Tests a batched matmul of x, and y: x is a 3D tensor of shape [b,
# n, k] y is a 3D tensor of shape [b, k, m] the batched matmul
# computes z of shape [b, n, m], where z[i, :, :] = x[i, :, :]
# matmul y[i, :, :]
def _compare(self, b, n, k, m, dtype, adjoint_a, adjoint_b):
np.random.seed(42)
x = np.random.normal(0, 1, b * n * k).astype(dtype).reshape([b, n, k])
y = np.random.normal(0, 1, b * k * m).astype(dtype).reshape([b, k, m])
self._checkGrad(x, y, adjoint_a, adjoint_b)
def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):
def Test(self):
self._compare(1, 2, 3, 5, dtype, adjoint_a, adjoint_b)
self._compare(3, 4, 7, 10, dtype, adjoint_a, adjoint_b)
return Test
if __name__ == "__main__":
for dtype_ in [
np.float16, np.float32, np.float64, np.complex64, np.complex128, np.int32
]:
for adjoint_a_ in False, True:
for adjoint_b_ in False, True:
name = "%s_%s_%s" % (dtype_.__name__, adjoint_a_, adjoint_b_)
for use_static_shape in True, False:
setattr(BatchMatmulOpTest,
"testBatchMatmulOp_" + name + ("_%s" % use_static_shape),
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape))
if dtype_ is not np.int32:
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
test.main()
| |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile, shutil
from datetime import date
import prepare_rebtel_sdk
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk', 'rebtel_sdk_download_url']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
try:
import markdown2 as markdown
except ImportError:
import markdown
# use the README as doc generation input,
file_path = os.path.join(cwd, "README.md")
html = markdown.markdown(open(file_path).read())
return [{"index.html": html}]
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.rebtel.sdk.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComRebtelSdkModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':', 1)
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
prepare_rebtel_sdk.prepare(manifest['rebtel_sdk_download_url'])
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| |
"""
6.00.2x Problem Set 4
"""
# pylint: disable=invalid-name
import pylab
# pylint: disable=E0401, E0602, W0401, W0612
from ps3b import *
#
# PROBLEM 1
#
# pylint: disable=too-many-locals
def simulationDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 1.
Runs numTrials simulations to show the relationship between delayed
treatment and patient outcome using a histogram.
Histograms of final total virus populations are displayed for delays of 300,
150, 75, 0 timesteps (followed by an additional 150 timesteps of
simulation).
numTrials: number of simulation runs to execute (an integer)
"""
# define parameters for patient
numViruses = 100
maxPop = 1000
# define parameters for resistant virus
clearProb = 0.05
maxBirthProb = 0.1
mutProb = 0.005
resistances = {'guttagonol': False, 'grimpex': False}
# define step sizes for variable administration
step1 = [300, 150, 75, 0]
step2 = 150
# for runtime improvement, use tmpStep tmpTrial in while than for loop
tmpStep = 0
tmpTrial = 0
# to use subplot function
subPlot = 0
# for each variable step size run trials
for step in step1:
print "Simulation for step size : ", step
# initialize viruses
viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, \
mutProb)]* numViruses
# initialize virus count to take virus population at end of trial
virusCount = []
tmpTrial = numTrials
# run number of trials
while tmpTrial > 0:
# initialize patient for given trial
patient = TreatedPatient(viruses, maxPop)
# run 150 timesteps to check growth of viruses
tmpStep = step
while tmpStep > 0:
patient.update()
tmpStep -= 1
# administer guttagonol to check impact of it on viruses
patient.addPrescription("guttagonol")
# run 150 timesteps to check growth of viruses
tmpStep = step2
while tmpStep > 0:
patient.update()
tmpStep -= 1
# check and log the virus population at end of trial
virusCount.append(patient.getTotalPop())
tmpTrial -= 1
# end of a given trial run
print "End of Trial for step size : ", step
# preparation of histogram for the variable step size
subPlot += 1
pylab.subplot(2, 2, subPlot)
# each bin of size 50
pylab.hist(virusCount, bins=12, range=(0, 600))
pylab.title("Drug ddministration delay : %s"%step)
pylab.xlabel("Final virus count")
pylab.ylabel("# trials")
# end of trial for variable step size
pylab.tight_layout()
pylab.show()
return None
# pylint: enable=too-many-locals
#
# PROBLEM 2
#
# pylint: disable=too-many-locals
def simulationTwoDrugsDelayedTreatment(numTrials):
"""
Runs simulations and make histograms for problem 2.
Runs numTrials simulations to show the relationship between administration
of multiple drugs and patient outcome.
Histograms of final total virus populations are displayed for lag times of
300, 150, 75, 0 timesteps between adding drugs (followed by an additional
150 timesteps of simulation).
numTrials: number of simulation runs to execute (an integer)
"""
# define parameters for patient
numViruses = 100
maxPop = 1000
# define parameters for resistant virus
clearProb = 0.05
maxBirthProb = 0.1
mutProb = 0.005
resistances = {'guttagonol': False, 'grimpex': False}
# define step sizes for variable administration
step1 = 150
step2 = [300, 150, 75, 0]
step3 = 150
# for runtime improvement, use tmpStep tmpTrial in while than for loop
tmpStep = 0
tmpTrial = 0
# to use subplot function
subPlot = 0
# for each variable step size run trials
for step in step2:
print "Simulation for step size : ", step
# initialize viruses
viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, \
mutProb)]* numViruses
# initialize virus count to take virus population at end of trial
virusCount = []
tmpTrial = numTrials
# run number of trials
while tmpTrial > 0:
# initialize patient for given trial
patient = TreatedPatient(viruses, maxPop)
# run 150 timesteps to check growth of viruses
tmpStep = step1
while tmpStep > 0:
patient.update()
tmpStep -= 1
# administer guttagonol to check impact of it on viruses
patient.addPrescription("guttagonol")
# run trial specific timesteps to check impact of it on viruses
tmpStep = step
while tmpStep > 0:
patient.update()
tmpStep -= 1
# administer grimpex to check impact of it on viruses
patient.addPrescription("grimpex")
# run 150 timesteps to check growth of viruses
tmpStep = step3
while tmpStep > 0:
patient.update()
tmpStep -= 1
# check and log the virus population at end of trial
virusCount.append(patient.getTotalPop())
tmpTrial -= 1
# end of a given trial run
print "End of Trial for step size : ", step
# preparation of histogram for the variable step size
subPlot += 1
pylab.subplot(2, 2, subPlot)
pylab.hist(virusCount, bins=50)
pylab.title("Drug Administration Delay : %s"%step)
pylab.xlabel("Virus Frequency")
pylab.ylabel("Trials")
# end of trial for variable step size
pylab.tight_layout()
pylab.show()
return None
# pylint: enable=too-many-locals
# this implementation is slower by 3 seconds when running ith 100 samples.
## PROBLEM 2
##
## pylint: disable=too-many-locals
#def simulationTwoDrugsDelayedTreatment(numTrials):
# """
# Runs simulations and make histograms for problem 2.
#
# Runs numTrials simulations to show the relationship between administration
# of multiple drugs and patient outcome.
#
# Histograms of final total virus populations are displayed for lag times of
# 300, 150, 75, 0 timesteps between adding drugs (followed by an additional
# 150 timesteps of simulation).
#
# numTrials: number of simulation runs to execute (an integer)
# """
# # define parameters for patient
# numViruses = 100
# maxPop = 1000
# # define parameters for resistant virus
# clearProb = 0.05
# maxBirthProb = 0.1
# mutProb = 0.005
# resistances = {'guttagonol': False, 'grimpex': False}
# # define step sizes for variable administration
# step1 = 150
# step2 = [300, 150, 75, 0]
# step3 = 150
# # for runtime improvement, use tmpStep tmpTrial in while than for loop
# tmpCount = 0
# tmpTrial = 0
# # to use subplot function
# subPlot = 0
# # for each variable step size run trials
# for step in step2:
# print "Simulation for step size : ", step
# # initialize viruses
# viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, \
# mutProb)]* numViruses
# # initialize virus count to take virus population at end of trial
# virusCount = []
# tmpTrial = numTrials
# # run number of trials
# while tmpTrial > 0:
# totalStep = step1 + step + step3
# tmpCount = runOneTrialTwoDrugSimulation(numViruses, maxPop, maxBirthProb,
# clearProb, resistances, mutProb, step1,step,
# totalStep)
# virusCount.append(tmpCount)
# tmpTrial -= 1
# # end of a given trial run
# print "End of Trial for step size : ", step
# # preparation of histogram for the variable step size
# subPlot += 1
# pylab.subplot(2, 2, subPlot)
# # each bin of size 50
# pylab.hist(virusCount, bins=12, range=(0, 600))
# pylab.title("Drug ddministration delay : %s"%step)
# pylab.xlabel("Final virus Count")
# pylab.ylabel("# trials")
# # end of trial for variable step size
# pylab.tight_layout()
# pylab.show()
# return None
#
#def runOneTrialTwoDrugSimulation(numViruses, maxPop, maxBirthProb, clearProb, \
# resistances, mutProb, \
# numStepsBeforeDrugOneApplied, \
# numStepsBeforeDrugTwoApplied, \
# totalNumSteps):
# """
# Helper function for doing one actual simulation run with drugs applied
# """
#
# viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, \
# mutProb)]* numViruses
#
# patient = TreatedPatient(viruses, maxPop)
#
#
#
# step1 = totalNumSteps - numStepsBeforeDrugOneApplied
# step2 = step1 - numStepsBeforeDrugTwoApplied
#
#
# while totalNumSteps > 0:
# if step1 == totalNumSteps:
# patient.addPrescription("guttagonol")
# if step2 == totalNumSteps:
# patient.addPrescription("grimpex")
# patient.update()
# totalNumSteps -= 1
#
# return patient.getTotalPop()
simulationDelayedTreatment(100)
#simulationTwoDrugsDelayedTreatment(100)
| |
# -*- coding: utf-8 -*-
"""Test triggers"""
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
FUNC_SRC = "BEGIN NEW.c3 := CURRENT_TIMESTAMP; RETURN NEW; END"
FUNC_INSTEAD_SRC = "BEGIN INSERT INTO t1 VALUES (NEW.c1, NEW.c2, now()); " \
"RETURN NULL; END"
CREATE_TABLE_STMT = "CREATE TABLE t1 (c1 integer, c2 text, " \
"c3 timestamp with time zone)"
CREATE_FUNC_STMT = "CREATE FUNCTION f1() RETURNS trigger LANGUAGE plpgsql " \
"AS $_$%s$_$" % FUNC_SRC
CREATE_STMT = "CREATE TRIGGER tr1 BEFORE INSERT OR UPDATE ON t1 " \
"FOR EACH ROW EXECUTE PROCEDURE f1()"
DROP_TABLE_STMT = "DROP TABLE IF EXISTS t1"
DROP_FUNC_STMT = "DROP FUNCTION IF EXISTS f1()"
COMMENT_STMT = "COMMENT ON TRIGGER tr1 ON t1 IS 'Test trigger tr1'"
class TriggerToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing triggers"""
def setUp(self):
super(self.__class__, self).setUp()
if self.db.version < 90000:
if not self.db.is_plpgsql_installed():
self.db.execute_commit("CREATE LANGUAGE plpgsql")
def test_map_trigger(self):
"Map a simple trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}
def test_map_trigger2(self):
"Map another simple trigger with different attributes"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER DELETE OR TRUNCATE ON t1 "
"EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['delete', 'truncate'],
'level': 'statement', 'procedure': 'f1()'}}
def test_map_trigger_update_cols(self):
"Map trigger with UPDATE OF columns"
if self.db.version < 90000:
self.skipTest('Only available on PG 9.0')
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER INSERT OR UPDATE OF c1, c2 ON t1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['insert', 'update'],
'columns': ['c1', 'c2'], 'level': 'row',
'procedure': 'f1()'}}
def test_map_trigger_conditional(self):
"Map trigger with a WHEN qualification"
if self.db.version < 90000:
self.skipTest('Only available on PG 9.0')
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE TRIGGER tr1 AFTER UPDATE ON t1 FOR EACH ROW "
"WHEN (OLD.c2 IS DISTINCT FROM NEW.c2) "
"EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'timing': 'after', 'events': ['update'],
'level': 'row', 'procedure': 'f1()',
'condition': '(old.c2 IS DISTINCT FROM new.c2)'}}
def test_map_trigger_instead(self):
"Map an INSTEAD OF trigger"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
stmts = [CREATE_TABLE_STMT, "CREATE VIEW v1 AS SELECT c1, c2 FROM t1",
"CREATE FUNCTION f1() RETURNS trigger LANGUAGE plpgsql AS "
"$_$%s$_$" % FUNC_INSTEAD_SRC,
"CREATE TRIGGER tr1 INSTEAD OF INSERT ON v1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['view v1']['triggers'] == {
'tr1': {'timing': 'instead of', 'events': ['insert'],
'level': 'row', 'procedure': 'f1()'}}
def test_map_trigger_comment(self):
"Map a trigger comment"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers']['tr1'][
'description'] == 'Test trigger tr1'
class ConstraintTriggerToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing constraint triggers"""
def setUp(self):
super(self.__class__, self).setUp()
if self.db.version < 90000:
if not self.db.is_plpgsql_installed():
self.db.execute_commit("CREATE LANGUAGE plpgsql")
def test_map_trigger(self):
"Map a simple constraint trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE CONSTRAINT TRIGGER tr1 AFTER INSERT OR UPDATE ON t1 "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'constraint': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'f1()'}}
def test_map_trigger_deferrable(self):
"Map a deferrable, initially deferred constraint trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT,
"CREATE CONSTRAINT TRIGGER tr1 AFTER INSERT OR UPDATE ON t1 "
"DEFERRABLE INITIALLY DEFERRED "
"FOR EACH ROW EXECUTE PROCEDURE f1()"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1']['triggers'] == {
'tr1': {'constraint': True, 'deferrable': True,
'initially_deferred': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'f1()'}}
class TriggerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input triggers"""
def setUp(self):
super(self.__class__, self).setUp()
if self.db.version < 90000:
if not self.db.is_plpgsql_installed():
self.db.execute_commit("CREATE LANGUAGE plpgsql")
def test_create_trigger(self):
"Create a simple trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == CREATE_STMT
def test_create_trigger2(self):
"Create another simple trigger with"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {'timing': 'after',
'events': ['delete', 'truncate'],
'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 AFTER DELETE OR " \
"TRUNCATE ON t1 FOR EACH STATEMENT EXECUTE PROCEDURE f1()"
def test_create_trigger_update_cols(self):
"Create a trigger with UPDATE OF columns"
if self.db.version < 90000:
self.skipTest('Only available on PG 9.0')
inmap = self.std_map()
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {'timing': 'before', 'events': [
'insert', 'update'], 'columns': ['c1', 'c2'], 'level': 'row',
'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE INSERT OR " \
"UPDATE OF c1, c2 ON t1 FOR EACH ROW EXECUTE PROCEDURE f1()"
def test_create_trigger_conditional(self):
"Create a trigger with a WHEN qualification"
if self.db.version < 90000:
self.skipTest('Only available on PG 9.0')
inmap = self.std_map()
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {'timing': 'before', 'events': [
'update'], 'level': 'row', 'procedure': 'f1()',
'condition': '(old.c2 IS DISTINCT FROM new.c2)'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE UPDATE " \
"ON t1 FOR EACH ROW WHEN ((old.c2 IS DISTINCT FROM new.c2)) " \
"EXECUTE PROCEDURE f1()"
def test_create_trigger_instead(self):
"Create an INSTEAD OF trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger',
'source': FUNC_INSTEAD_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}]},
'view v1': {'definition': "SELECT c1, c2 FROM t1",
'triggers': {'tr1': {'timing': 'instead of',
'events': ['insert'],
'level': 'row',
'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_TABLE_STMT
cr1, cr2 = (1, 2) if 'VIEW' in sql[1] else (2, 1)
assert fix_indent(sql[cr1]) == \
"CREATE VIEW v1 AS SELECT c1, c2 FROM t1"
assert fix_indent(sql[cr2]) == "CREATE FUNCTION f1() RETURNS " \
"trigger LANGUAGE plpgsql AS $_$%s$_$" % FUNC_INSTEAD_SRC
assert fix_indent(sql[3]) == "CREATE TRIGGER tr1 INSTEAD OF INSERT " \
"ON v1 FOR EACH ROW EXECUTE PROCEDURE f1()"
def test_create_trigger_in_schema(self):
"Create a trigger within a non-public schema"
inmap = self.std_map(plpgsql_installed=True)
inmap.update({'schema s1': {'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC},
'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}}})
sql = self.to_sql(inmap, ["CREATE SCHEMA s1"])
assert fix_indent(sql[2]) == "CREATE TRIGGER tr1 BEFORE INSERT OR " \
"UPDATE ON s1.t1 FOR EACH ROW EXECUTE PROCEDURE f1()"
def test_drop_trigger(self):
"Drop an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}]}})
sql = self.to_sql(inmap, stmts)
assert sql == ["DROP TRIGGER tr1 ON t1"]
def test_drop_trigger_table(self):
"Drop an existing trigger and the related table"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
sql = self.to_sql(inmap, stmts)
assert sql[0] == "DROP TRIGGER tr1 ON t1"
assert sql[1] == "DROP TABLE t1"
def test_trigger_with_comment(self):
"Create a trigger with a comment"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'description': 'Test trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == CREATE_STMT
assert sql[3] == COMMENT_STMT
def test_comment_on_trigger(self):
"Create a comment on an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'description': 'Test trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == [COMMENT_STMT]
def test_drop_trigger_comment(self):
"Drop a comment on an existing trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == ["COMMENT ON TRIGGER tr1 ON t1 IS NULL"]
def test_change_trigger_comment(self):
"Change existing comment on a trigger"
stmts = [CREATE_TABLE_STMT, CREATE_FUNC_STMT, CREATE_STMT,
COMMENT_STMT]
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'description': 'Changed trigger tr1',
'timing': 'before', 'events': ['insert', 'update'],
'level': 'row', 'procedure': 'f1()'}}}})
sql = self.to_sql(inmap, stmts)
assert sql == ["COMMENT ON TRIGGER tr1 ON t1 IS 'Changed trigger tr1'"]
class ConstraintTriggerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input triggers"""
def setUp(self):
super(self.__class__, self).setUp()
if self.db.version < 90000:
if not self.db.is_plpgsql_installed():
self.db.execute_commit("CREATE LANGUAGE plpgsql")
def test_create_trigger(self):
"Create a constraint trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'constraint': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE CONSTRAINT TRIGGER tr1 AFTER " \
"INSERT OR UPDATE ON t1 FOR EACH ROW EXECUTE PROCEDURE f1()"
def test_create_trigger_deferrable(self):
"Create a deferrable constraint trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema public'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'trigger', 'source': FUNC_SRC}})
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'c3': {'type': 'timestamp with time zone'}}],
'triggers': {'tr1': {
'constraint': True, 'deferrable': True,
'initially_deferred': True, 'timing': 'after',
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'f1()'}}}})
sql = self.to_sql(inmap)
crt0, crt1 = (0, 1) if 'TABLE' in sql[0] else (1, 0)
assert fix_indent(sql[crt0]) == CREATE_TABLE_STMT
assert fix_indent(sql[crt1]) == CREATE_FUNC_STMT
assert fix_indent(sql[2]) == "CREATE CONSTRAINT TRIGGER tr1 " \
"AFTER INSERT OR UPDATE ON t1 DEFERRABLE INITIALLY " \
"DEFERRED FOR EACH ROW EXECUTE PROCEDURE f1()"
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Datastore backed Blobstore API stub.
Class:
BlobstoreServiceStub: BlobstoreService stub backed by datastore.
"""
import os
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.api import blobstore
from google.appengine.api.blobstore import blobstore_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BlobStorage',
'BlobstoreServiceStub',
'ConfigurationError',
'CreateUploadSession',
'Error',
]
class Error(Exception):
"""Base blobstore error type."""
class ConfigurationError(Error):
"""Raised when environment is not correctly configured."""
_UPLOAD_SESSION_KIND = '__BlobUploadSession__'
def CreateUploadSession(creation, success_path, user):
"""Create upload session in datastore.
Creates an upload session and puts it in Datastore to be referenced by
upload handler later.
Args:
creation: Creation timestamp.
success_path: Path in users application to call upon success.
user: User that initiated this upload, if any.
Returns:
String encoded key of new Datastore entity.
"""
entity = datastore.Entity(_UPLOAD_SESSION_KIND)
entity.update({'creation': creation,
'success_path': success_path,
'user': user,
'state': 'init'})
datastore.Put(entity)
return str(entity.key())
class BlobStorage(object):
"""Base class for defining how blobs are stored.
This base class merely defines an interface that all stub blob-storage
mechanisms must implement.
"""
def StoreBlob(self, blob_key, blob_stream):
"""Store blob stream.
Implement this method to persist blob data.
Args:
blob_key: Blob key of blob to store.
blob_stream: Stream or stream-like object that will generate blob content.
"""
raise NotImplementedError('Storage class must override StoreBlob method.')
def OpenBlob(self, blob_key):
"""Open blob for streaming.
Args:
blob_key: Blob-key of existing blob to open for reading.
Returns:
Open file stream for reading blob. Caller is responsible for closing
file.
"""
raise NotImplementedError('Storage class must override OpenBlob method.')
def DeleteBlob(self, blob_key):
"""Delete blob data from storage.
Args:
blob_key: Blob-key of existing blob to delete.
"""
raise NotImplementedError('Storage class must override DeleteBlob method.')
class BlobstoreServiceStub(apiproxy_stub.APIProxyStub):
"""Datastore backed Blobstore service stub.
This stub stores manages upload sessions in the Datastore and must be
provided with a blob_storage object to know where the actual blob
records can be found after having been uploaded.
This stub does not handle the actual creation of blobs, neither the BlobInfo
in the Datastore nor creation of blob data in the blob_storage. It does,
however, assume that another part of the system has created these and
uses these objects for deletion.
An upload session is created when the CreateUploadURL request is handled and
put in the Datastore under the __BlobUploadSession__ kind. There is no
analog for this kind on a production server. Other than creation, this stub
not work with session objects. The URLs created by this service stub are:
http://<appserver-host>:<appserver-port>/<uploader-path>/<session-info>
This is very similar to what the URL is on a production server. The session
info is the string encoded version of the session entity
"""
def __init__(self,
blob_storage,
time_function=time.time,
service_name='blobstore',
uploader_path='_ah/upload/'):
"""Constructor.
Args:
blob_storage: BlobStorage class instance used for blob storage.
time_function: Used for dependency injection in tests.
service_name: Service name expected for all calls.
uploader_path: Path to upload handler pointed to by URLs generated
by this service stub.
"""
super(BlobstoreServiceStub, self).__init__(service_name)
self.__storage = blob_storage
self.__time_function = time_function
self.__next_session_id = 1
self.__uploader_path = uploader_path
@property
def storage(self):
"""Access BlobStorage used by service stub.
Returns:
BlobStorage instance used by blobstore service stub.
"""
return self.__storage
def _GetEnviron(self, name):
"""Helper method ensures environment configured as expected.
Args:
name: Name of environment variable to get.
Returns:
Environment variable associated with name.
Raises:
ConfigurationError if required environment variable is not found.
"""
try:
return os.environ[name]
except KeyError:
raise ConfigurationError('%s is not set in environment.' % name)
def _CreateSession(self, success_path, user):
"""Create new upload session.
Args:
success_path: Application path to call upon successful POST.
user: User that initiated the upload session.
Returns:
String encoded key of a new upload session created in the datastore.
"""
return CreateUploadSession(self.__time_function(),
success_path,
user)
def _Dynamic_CreateUploadURL(self, request, response):
"""Create upload URL implementation.
Create a new upload session. The upload session key is encoded in the
resulting POST URL. This URL is embedded in a POST form by the application
which contacts the uploader when the user posts.
Args:
request: A fully initialized CreateUploadURLRequest instance.
response: A CreateUploadURLResponse instance.
"""
session = self._CreateSession(request.success_path(),
users.get_current_user())
response.set_url('http://%s:%s/%s%s' % (self._GetEnviron('SERVER_NAME'),
self._GetEnviron('SERVER_PORT'),
self.__uploader_path,
session))
def _Dynamic_DeleteBlob(self, request, response):
"""Delete a blob by its blob-key.
Delete a blob from the blobstore using its blob-key. Deleting blobs that
do not exist is a no-op.
Args:
request: A fully initialized DeleteBlobRequest instance.
response: Not used but should be a VoidProto.
"""
for blob_key in request.blob_key_list():
key = datastore_types.Key.from_path(blobstore.BLOB_INFO_KIND,
str(blob_key))
datastore.Delete(key)
self.__storage.DeleteBlob(blob_key)
def _Dynamic_FetchData(self, request, response):
"""Fetch a blob fragment from a blob by its blob-key.
Fetches a blob fragment using its blob-key. Start index is inclusive,
end index is inclusive. Valid requests for information outside of
the range of the blob return a partial string or empty string if entirely
out of range.
Args:
request: A fully initialized FetchDataRequest instance.
response: A FetchDataResponse instance.
Raises:
ApplicationError when application has the following errors:
INDEX_OUT_OF_RANGE: Index is negative or end > start.
BLOB_FETCH_SIZE_TOO_LARGE: Request blob fragment is larger than
MAX_BLOB_FRAGMENT_SIZE.
BLOB_NOT_FOUND: If invalid blob-key is provided or is not found.
"""
start_index = request.start_index()
if start_index < 0:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE)
end_index = request.end_index()
if end_index < start_index:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE)
fetch_size = end_index - start_index + 1
if fetch_size > blobstore.MAX_BLOB_FETCH_SIZE:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE)
blob_key = request.blob_key()
blob_info_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key)
try:
datastore.Get(blob_info_key)
except datastore_errors.EntityNotFoundError, err:
raise apiproxy_errors.ApplicationError(
blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND)
blob_file = self.__storage.OpenBlob(blob_key)
blob_file.seek(start_index)
response.set_data(blob_file.read(fetch_size))
| |
# mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2011 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 0.7.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'mocksignature',
'patch',
'patch_object',
'sentinel',
'DEFAULT'
)
__version__ = '0.7.0'
__unittest = True
import sys
import warnings
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
inPy3k = sys.version_info[0] == 3
if inPy3k:
self = '__self__'
else:
self = 'im_self'
# getsignature and mocksignature heavily "inspired" by
# the decorator module: http://pypi.python.org/pypi/decorator/
# by Michele Simionato
def _getsignature(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if inspect.isclass(func):
func = func.__init__
# will have a self arg
skipfirst = True
elif not (inspect.ismethod(func) or inspect.isfunction(func)):
func = func.__call__
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
# instance methods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
_msg = "_mock_ is a reserved argument name, can't mock signatures using _mock_"
assert '_mock_' not in regargs, _msg
if varargs is not None:
assert '_mock_' not in varargs, _msg
if varkwargs is not None:
assert '_mock_' not in varkwargs, _msg
if skipfirst:
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
else:
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def mocksignature(func, mock=None, skipfirst=False):
"""
mocksignature(func, mock=None, skipfirst=False)
Create a new function with the same signature as `func` that delegates
to `mock`. If `skipfirst` is True the first argument is skipped, useful
for methods where `self` needs to be omitted from the new function.
If you don't pass in a `mock` then one will be created for you.
The mock is set as the `mock` attribute of the returned function for easy
access.
`mocksignature` can also be used with classes. It copies the signature of
the `__init__` method.
When used with callable objects (instances) it copies the signature of the
`__call__` method.
"""
if mock is None:
mock = Mock()
signature, func = _getsignature(func, skipfirst)
src = "lambda %(signature)s: _mock_(%(signature)s)" % {
'signature': signature
}
funcopy = eval(src, dict(_mock_=mock))
_copy_func_details(func, funcopy)
funcopy.mock = mock
return funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return '<SentinelObject "%s">' % self.name
class Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, SentinelObject(name))
sentinel = Sentinel()
DEFAULT = sentinel.DEFAULT
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
if inPy3k:
class_types = type
else:
class_types = (type, ClassType)
class Mock(object):
"""
Create a new ``Mock`` object. ``Mock`` takes several optional arguments
that specify the behaviour of the Mock object:
* ``spec``: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an ``AttributeError``.
If ``spec`` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* ``spec_set``: A stricter variant of ``spec``. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
``spec_set`` will raise an ``AttributeError``.
* ``side_effect``: A function to be called whenever the Mock is called. See
the :attr:`Mock.side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns :data:`DEFAULT`, the return
value of this function is used as the return value.
Alternatively ``side_effect`` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
* ``return_value``: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
:attr:`Mock.return_value` attribute.
* ``wraps``: Item for the mock object to wrap. If ``wraps`` is not None
then calling the Mock will pass the call through to the wrapped object
(returning the real result and ignoring ``return_value``). Attribute
access on the mock will return a Mock object that wraps the corresponding
attribute of the wrapped object (so attempting to access an attribute that
doesn't exist will raise an ``AttributeError``).
If the mock has an explicit ``return_value`` set then calls are not passed
to the wrapped object and the ``return_value`` is returned instead.
* ``name``: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
return object.__new__(new)
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None):
self._parent = parent
self._name = name
_spec_class = None
if spec_set is not None:
spec = spec_set
spec_set = True
if spec is not None and not isinstance(spec, list):
if isinstance(spec, class_types):
_spec_class = spec
else:
_spec_class = spec.__class__
spec = dir(spec)
self._spec_class = _spec_class
self._spec_set = spec_set
self._methods = spec
self._children = {}
self._return_value = return_value
self.side_effect = side_effect
self._wraps = wraps
self.reset_mock()
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.call_args_list = []
self.method_calls = []
for child in self._children.values():
child.reset_mock()
if isinstance(self._return_value, Mock):
if not self._return_value is self:
self._return_value.reset_mock()
def __get_return_value(self):
if self._return_value is DEFAULT:
self._return_value = self._get_child_mock()
return self._return_value
def __set_return_value(self, value):
self._return_value = value
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
def __call__(self, *args, **kwargs):
self.called = True
self.call_count += 1
self.call_args = callargs((args, kwargs))
self.call_args_list.append(callargs((args, kwargs)))
parent = self._parent
name = self._name
while parent is not None:
parent.method_calls.append(callargs((name, args, kwargs)))
if parent._parent is None:
break
name = parent._name + '.' + name
parent = parent._parent
ret_val = DEFAULT
if self.side_effect is not None:
if (isinstance(self.side_effect, BaseException) or
isinstance(self.side_effect, class_types) and
issubclass(self.side_effect, BaseException)):
raise self.side_effect
ret_val = self.side_effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if self._wraps is not None and self._return_value is DEFAULT:
return self._wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
def __getattr__(self, name):
if name == '_methods':
raise AttributeError(name)
elif self._methods is not None:
if name not in self._methods or name in _all_magics:
raise AttributeError("Mock object has no attribute '%s'" % name)
elif _is_magic(name):
raise AttributeError(name)
if name not in self._children:
wraps = None
if self._wraps is not None:
wraps = getattr(self._wraps, name)
self._children[name] = self._get_child_mock(parent=self, name=name, wraps=wraps)
return self._children[name]
def __repr__(self):
if self._name is None and self._spec_class is None:
return object.__repr__(self)
name_string = ''
spec_string = ''
if self._name is not None:
def get_name(name):
if name is None:
return 'mock'
return name
parent = self._parent
name = self._name
while parent is not None:
name = get_name(parent._name) + '.' + name
parent = parent._parent
name_string = ' name=%r' % name
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (type(self).__name__,
name_string,
spec_string,
id(self))
def __setattr__(self, name, value):
if not 'method_calls' in self.__dict__:
# allow all attribute setting until initialisation is complete
return object.__setattr__(self, name, value)
if (self._spec_set and self._methods is not None and name not in
self._methods and name not in self.__dict__ and
name != 'return_value'):
raise AttributeError("Mock object has no attribute '%s'" % name)
if name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._methods is not None and name not in self._methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not isinstance(value, Mock):
setattr(type(self), name, _get_method(name, value))
original = value
real = lambda *args, **kw: original(self, *args, **kw)
value = mocksignature(value, real, skipfirst=True)
else:
setattr(type(self), name, value)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
return object.__delattr__(self, name)
def assert_called_with(self, *args, **kwargs):
"""
assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock.
"""
if self.call_args is None:
raise AssertionError('Expected: %s\nNot called' % ((args, kwargs),))
if not self.call_args == (args, kwargs):
raise AssertionError(
'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args)
)
def assert_called_once_with(self, *args, **kwargs):
"""
assert that the mock was called exactly once and with the specified
arguments.
"""
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def _get_child_mock(self, **kw):
klass = type(self).__mro__[1]
return klass(**kw)
class callargs(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a callargs tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
callargs('name', (), {}) == ('name',)
callargs('name', (1,), {}) == ('name', (1,))
callargs((), {'a': 'b'}) == ({'a': 'b'},)
"""
def __eq__(self, other):
if len(self) == 3:
if other[0] != self[0]:
return False
args_kwargs = self[1:]
other_args_kwargs = other[1:]
else:
args_kwargs = tuple(self)
other_args_kwargs = other
if len(other_args_kwargs) == 0:
other_args, other_kwargs = (), {}
elif len(other_args_kwargs) == 1:
if isinstance(other_args_kwargs[0], tuple):
other_args = other_args_kwargs[0]
other_kwargs = {}
else:
other_args = ()
other_kwargs = other_args_kwargs[0]
else:
other_args, other_kwargs = other_args_kwargs
return tuple(args_kwargs) == (other_args, other_kwargs)
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
class _patch(object):
def __init__(self, target, attribute, new, spec, create,
mocksignature, spec_set):
self.target = target
self.attribute = attribute
self.new = new
self.spec = spec
self.create = create
self.has_local = False
self.mocksignature = mocksignature
self.spec_set = spec_set
def copy(self):
return _patch(self.target, self.attribute, self.new, self.spec,
self.create, self.mocksignature, self.spec_set)
def __call__(self, func):
if isinstance(func, class_types):
return self.decorate_class(func)
else:
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if attr.startswith("test") and hasattr(attr_value, "__call__"):
setattr(klass, attr, self.copy()(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with 2.5)
extra_args = []
for patching in patched.patchings:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
try:
return func(*args, **keywargs)
finally:
for patching in reversed(getattr(patched, 'patchings', [])):
patching.__exit__()
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
func.func_code.co_firstlineno)
return patched
def get_original(self):
target = self.target
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError("%s does not have the attribute %r" % (target, name))
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
original, local = self.get_original()
if new is DEFAULT:
# XXXX what if original is DEFAULT - shouldn't use it as a spec
inherit = False
if spec_set == True:
spec_set = original
if isinstance(spec_set, class_types):
inherit = True
elif spec == True:
# set spec to the object we are replacing
spec = original
if isinstance(spec, class_types):
inherit = True
new = Mock(spec=spec, spec_set=spec_set)
if inherit:
new.return_value = Mock(spec=spec, spec_set=spec_set)
new_attr = new
if self.mocksignature:
new_attr = mocksignature(original, new)
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
return new
def __exit__(self, *_):
"""Undo the patch."""
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
start = __enter__
stop = __exit__
def _patch_object(target, attribute, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
Arguments new, spec, create, mocksignature and spec_set have the same
meaning as for patch.
"""
return _patch(target, attribute, new, spec, create, mocksignature,
spec_set)
def patch_object(*args, **kwargs):
"A deprecated form of patch.object(...)"
warnings.warn(('Please use patch.object instead.'), DeprecationWarning, 2)
return _patch_object(*args, **kwargs)
def patch(target, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None):
"""
``patch`` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the ``target``
(specified in the form `'PackageName.ModuleName.ClassName'`) is patched
with a ``new`` object. When the function/with statement exits the patch is
undone.
The target is imported and the specified attribute patched with the new
object, so it must be importable from the environment you are calling the
decorator from.
If ``new`` is omitted, then a new ``Mock`` is created and passed in as an
extra argument to the decorated function.
The ``spec`` and ``spec_set`` keyword arguments are passed to the ``Mock``
if patch is creating one for you.
In addition you can pass ``spec=True`` or ``spec_set=True``, which causes
patch to pass in the object being mocked as the spec/spec_set object.
If ``mocksignature`` is True then the patch will be done with a function
created by mocking the one being replaced. If the object being replaced is
a class then the signature of `__init__` will be copied. If the object
being replaced is a callable object then the signature of `__call__` will
be copied.
By default ``patch`` will fail to replace attributes that don't exist. If
you pass in 'create=True' and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a TestCase class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set.
Patch can be used with the with statement, if this is available in your
version of Python. Here the patching applies to the indented block after
the with statement. If you use "as" then the patched object will be bound
to the name after the "as"; very useful if `patch` is creating a mock
object for you.
`patch.dict(...)` and `patch.object(...)` are available for alternate
use-cases.
"""
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
target = _importer(target)
return _patch(target, attribute, new, spec, create, mocksignature, spec_set)
class _patch_dict(object):
"""
Patch a dictionary and restore the dictionary to its original state after
the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of ``(key, value)`` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
"""
def __init__(self, in_dict, values=(), clear=False):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, class_types):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if attr.startswith("test") and hasattr(attr_value, "__call__"):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
"""Unpatch the dict."""
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
patch.object = _patch_object
patch.dict = _patch_dict
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div truediv floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex '
# __truediv__ and __rtruediv__ not available in Python 3 either
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'dir', 'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__iter__': iter([]),
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _set_return_value(mock, method, name):
return_value = DEFAULT
if name in _return_values:
return_value = _return_values[name]
elif name in _calculate_return_value:
try:
return_value = _calculate_return_value[name](mock)
except AttributeError:
return_value = AttributeError(name)
if return_value is not DEFAULT:
method.return_value = return_value
class MagicMock(Mock):
"""
MagicMock is a subclass of :Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the ``spec`` or ``spec_set`` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def __init__(self, *args, **kw):
Mock.__init__(self, *args, **kw)
these_magics = _magics
if self._methods is not None:
these_magics = _magics.intersection(self._methods)
for entry in these_magics:
# could specify parent?
m = Mock()
setattr(self, entry, m)
_set_return_value(self, m, entry)
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Generate the class definition corresponding to a given IFMap DB object.
"""
from type_classgen import TypeClassGenerator, TypeImplGenerator
from ifmap_global import CppTypeMap, GetModuleName
from ifmap_model import IFMapIdentifier, IFMapProperty, IFMapLink, IFMapLinkAttr, MemberInfo, SimpleTypeWrapper
class IFMapGenBase(object):
def __init__(self):
pass
def getName(self):
pass
def getElementName(self):
pass
def TableClassDefn(self, file, component):
cdecl = """
class DBTable_%(impl)s_%(class)s : public IFMap%(impl)sTable {
public:
DBTable_%(impl)s_%(class)s(DB *db, const std::string &name, DBGraph *graph);
IFMapObject *AllocObject();
virtual const char *Typename() const;
static DBTable *CreateTable(DB *db, const std::string &name, DBGraph *graph);
private:
DISALLOW_COPY_AND_ASSIGN(DBTable_%(impl)s_%(class)s);
};
""" % {'impl':component, 'class':self.getName()}
file.write(cdecl)
def TableClassImpl(self, file, impl):
cdecl = """
DBTable_%(impl)s_%(class)s::DBTable_%(impl)s_%(class)s(DB *db, const std::string &name, DBGraph *graph)
: IFMap%(impl)sTable(db, name, graph) {
}
const char *DBTable_%(impl)s_%(class)s::Typename() const {
return "%(elementname)s";
}
IFMapObject *DBTable_%(impl)s_%(class)s::AllocObject() {
return new %(class)s();
}
DBTable *DBTable_%(impl)s_%(class)s::CreateTable(DB *db, const string &name, DBGraph *graph) {
DBTable *tbl = new DBTable_%(impl)s_%(class)s(db, name, graph);
tbl->Init();
return tbl;
}
""" % {'impl': impl, 'class': self.getName(),
'elementname': self.getElementName()}
file.write(cdecl)
class IFMapGenIdentifier(IFMapGenBase):
def __init__(self, TypeDict, identifier):
self._TypeDict = TypeDict
self._identifier = identifier
def getName(self):
return self._identifier.getCppName()
def getElementName(self):
return self._identifier.getName()
def ServerClassDefn(self, file):
header = """
class %s : public IFMapIdentifier {
public:
""" % self.getName()
file.write(header)
self._GenTypedefs(file)
public_methods = """
%(class)s();
virtual std::string ToString() const;
virtual void EncodeUpdate(pugi::xml_node *parent) const;
static bool Decode(const pugi::xml_node &parent, std::string *id_name,
%(class)s *ptr);
virtual boost::crc_32_type::value_type CalculateCrc() const;
""" % {'class': self.getName() }
file.write(public_methods)
property_methods = """
virtual bool SetProperty(const std::string &property, AutogenProperty *data);
virtual void ClearProperty(const std::string &property);
"""
file.write(property_methods)
property_tests = """
bool IsPropertySet(PropertyId property) const {
return property_set_.test(property);
}
virtual bool empty() const;
"""
if len(self._identifier.getProperties()) > 0:
file.write(property_tests)
# accessors
for prop in self._identifier.getProperties():
info = prop.getMemberInfo()
file.write(' const %s &%s() const { return %s; }\n'
% (info.ctypename, prop.getPropertyName(),
info.membername))
file.write('\nprivate:\n')
self._GenServerAttributes(file)
footer = """
DISALLOW_COPY_AND_ASSIGN(%s);
};
""" % (self.getName())
file.write(footer)
def _GenTypedefs(self, file):
gen_types = {}
for ctype in self._identifier.getDataTypes():
if ctype in gen_types:
continue
file.write(' typedef autogen::%s %s;\n' % (ctype, ctype))
gen_types[ctype] = ctype
properties = self._identifier.getProperties()
for prop in properties:
info = prop.getMemberInfo()
assert info
if not info.isComplex:
if info.ctypename in gen_types:
continue
cdecl = """
struct %(typename)s : public AutogenProperty {
%(ctype)s data;
};
""" % {'typename': SimpleTypeWrapper(info), 'ctype': info.ctypename}
file.write(cdecl)
gen_types[info.ctypename] = info.ctypename
if len(properties) > 0:
file.write(' enum PropertyId {\n')
for prop in properties:
file.write(' %s,\n' % prop.getPropertyId())
if len(properties) > 0:
file.write(' PROPERTY_ID_COUNT\n };\n')
def _GenServerPrimaryKey(self, file):
""" Generate the data members that define the object primary key
Identifier _key_members contains (string, name) which is already
present in IFMapIdentifier.
"""
pass
def _GenServerAttributes(self, file):
""" Generate the data members that define the object properties
"""
for member in self._identifier.getDataMembers():
file.write(' %s %s;\n' % (member.ctypename, member.membername))
def ServerClassImpl(self, file):
self._GenConstructor(file)
self._GenSetProperty(file)
self._GenClearProperty(file)
self._GenToString(file)
self._GenEmpty(file);
self._GenProcessPropertyDiff(file)
def _GenConstructor(self, file):
file.write('%s::%s() ' %
(self.getName(), self.getName()))
if len(self._identifier.getProperties()) > 0:
file.write(': IFMapIdentifier(PROPERTY_ID_COUNT) ')
file.write('{\n')
# TODO: key members
for member in self._identifier.getDataMembers():
if not member.isComplex:
if member.ctypename == 'std::string':
file.write(' %s.clear();\n' % member.membername)
elif member.ctypename == 'bool':
file.write(' %s = false;\n' % member.membername)
else:
file.write(' %s = 0;\n' % member.membername)
elif member.isSequence:
file.write(' %s.clear();\n' % member.membername)
else:
file.write(' %s.Clear();\n' % member.membername)
file.write('}\n')
def _GenSetProperty(self, file):
header = """
bool %s::SetProperty(const string &property, AutogenProperty *data) {
""" % self.getName()
file.write(header)
test_else = ''
for prop in self._identifier.getProperties():
membername = prop.getPropertyName() + '_'
file.write(' %sif (property == "%s") {\n' %
(test_else, prop.getName()))
test_else = 'else '
# TODO: compare with previous value and return false if unchanged
info = prop.getMemberInfo()
assert info
indent = ' ' * 8
if info.isSequence:
file.write(indent +
'autogen::%(type)s *v = static_cast<autogen::%(type)s *>(data);\n' %
{'type': prop._xelement.getType()})
cinfo = self._TypeDict[prop._xelement.getType()]
file.write(indent + '%s.swap(v->%s);\n' %
(membername, cinfo._data_members[0].membername))
elif info.isComplex:
file.write(indent + '%s.Copy(*static_cast<const %s *>(data));\n' % (membername, info.ctypename))
else:
file.write(indent +
'%s = static_cast<const %s *>(data)->data;\n' %
(membername, SimpleTypeWrapper(info)))
cdecl = """
property_set_.set(%s);
}
""" % prop.getPropertyId()
file.write(cdecl)
retval = """
if (property_set_.is_subset_of(old_property_set_)) {
return false;
} else {
return true;
}
}
"""
file.write(retval)
def _GenClearProperty(self, file):
cdecl = """
void %s::ClearProperty(const string &property) {
""" % self.getName()
file.write(cdecl)
elsestmt = ''
for prop in self._identifier.getProperties():
cdecl = """
%sif (property == "%s") {
property_set_.reset(%s);
}
""" % (elsestmt, prop.getName(), prop.getPropertyId())
file.write(cdecl)
elsestmt = 'else '
file.write('}\n')
def _GenToString(self, file):
""" Generate the method ToString.
"""
fn = """
string %(typename)s::ToString() const {
string repr;
return repr;
}
""" % {'typename': self.getName(), 'elementname' : self.getElementName() }
file.write(fn)
def _GenEmpty(self, file):
if len(self._identifier.getProperties()) > 0:
cdecl = """
bool %s::empty() const {
return property_set_.none();
}
""" % self.getName()
file.write(cdecl)
def _GenProcessPropertyDiff(self, file):
if len(self._identifier.getProperties()) > 0:
header = """
boost::crc_32_type::value_type %s::CalculateCrc() const {
""" % self.getName()
file.write(header)
indent_l1 = ' ' * 4
indent_l11 = ' ' * 9
indent_l2 = ' ' * 8
file.write(indent_l1 + 'boost::crc_32_type crc;\n')
for prop in self._identifier.getProperties():
membername = prop.getPropertyName() + '_'
info = prop.getMemberInfo()
assert info
if info.isSequence:
file.write(indent_l1 + 'for (%s::const_iterator iter = \n'
%(info.ctypename))
file.write(indent_l11 + '%s.begin();\n' %(membername))
file.write(indent_l11 + 'iter != %s.end(); ++iter) {\n'
%(membername))
# code inside the for loop
sequencetype = info.sequenceType
if sequencetype == 'int':
file.write(indent_l2 +
'const %s *obj = static_cast<const %s *>(iter.operator->());\n'
%(sequencetype, sequencetype))
file.write(indent_l2 +
'crc.process_bytes(obj, sizeof(*obj));\n')
elif sequencetype == 'std::string':
file.write(indent_l2 + 'const std::string &str = *iter;\n');
file.write(indent_l2 +
'crc.process_bytes(str.c_str(), str.size());\n')
elif info.isComplex:
# vector of non-basic type
file.write(indent_l2 +
'const %s *obj = iter.operator->();\n' %sequencetype)
file.write(indent_l2 + 'obj->CalculateCrc(&crc);\n')
else:
assert()
file.write(indent_l1 + '}\n')
elif info.isComplex:
file.write(indent_l1 + '%s.CalculateCrc(&crc);\n'
% (membername))
else:
cpptype = info.ctypename
if (cpptype == 'int' or cpptype == 'bool' or
cpptype == 'uint64_t'):
file.write(indent_l1 +
'crc.process_bytes(&%s, sizeof(%s));\n'
%(membername, membername));
elif (cpptype == 'std::string'):
file.write(indent_l1 +
'crc.process_bytes(%s.c_str(), %s.size());\n'
%(membername, membername));
else:
assert()
file.write(indent_l1 + 'return crc.checksum();\n');
retval = "}\n\n"
file.write(retval)
else:
function = """
boost::crc_32_type::value_type %s::CalculateCrc() const {
return 0xffffffff;
}\n
""" % self.getName()
file.write(function)
class IFMapGenLinkAttr(IFMapGenBase):
def __init__(self, TypeDict, meta):
self._TypeDict = TypeDict
self._meta = meta
def getName(self):
return self._meta.getCppName()
def getElementName(self):
return self._meta.getName()
def ServerClassDefn(self, file):
ctypename = self._meta.getCTypename()
cdef = """
class %s : public IFMapLinkAttr {
public:
""" % self.getName()
file.write(cdef)
if self._meta.getCType():
file.write(' typedef autogen::%s %s;\n' %
(ctypename, ctypename))
else:
cdef = """
struct %sData : public AutogenProperty {
%s data;
};
""" % (self.getName(), ctypename)
file.write(cdef)
cdef = """
%(class)s();
virtual std::string ToString() const;
virtual void EncodeUpdate(pugi::xml_node *parent) const;
static bool Decode(const pugi::xml_node &parent, std::string *id_name,
%(class)s *ptr);
virtual boost::crc_32_type::value_type CalculateCrc() const;
virtual bool SetData(const AutogenProperty *data);
const %(datatype)s &data() const { return data_; }
""" % {'class':self.getName(), 'datatype': ctypename}
file.write(cdef)
if not self._meta.getCType():
cdef = """
static bool ParseMetadata(const pugi::xml_node &parent,
std::auto_ptr<AutogenProperty> *resultp);
"""
file.write(cdef)
cdef = """
private:
%s data_;
DISALLOW_COPY_AND_ASSIGN(%s);
};
""" % (ctypename, self.getName())
file.write(cdef)
def ServerClassImpl(self, file):
self._GenConstructor(file)
self._GenToString(file)
self._GenSetData(file)
self._GenProcessPropertyDiff(file)
def _GenConstructor(self, file):
if self._meta.getCType():
ccase = 'C'
else:
ccase = 'c'
ctor = """
%s::%s() {
data_.%clear();
}
""" % (self.getName(), self.getName(), ccase)
file.write(ctor)
def _GenToString(self, file):
fn = """
string %(typename)s::ToString() const {
string repr;
return repr;
}
""" % {'typename': self.getName(), 'elementname': self._meta.getName()}
file.write(fn)
def _GenSetData(self, file):
cdecl = """
bool %s::SetData(const AutogenProperty *data) {
if (data == NULL) {
return false;
}
""" % self.getName()
file.write(cdecl)
ctypename = self._meta.getCTypename()
if self._meta.getCType():
cdecl = """
data_ = *static_cast<const %s *>(data);
""" % ctypename
else:
cdecl = """
const %sData *var = static_cast<const %sData *>(data);
data_ = var->data;
""" % (self.getName(), self.getName())
file.write(cdecl)
cdecl = """
return true;
}
"""
file.write(cdecl)
def _GenProcessPropertyDiff(self, file):
header = """
boost::crc_32_type::value_type %s::CalculateCrc() const { """ % self.getName()
file.write(header)
ctypename = self._meta.getCTypename()
if self._meta.getCType():
cdecl = """
boost::crc_32_type crc;
data_.CalculateCrc(&crc);
return crc.checksum();
}
"""
else:
cdecl = """
}
"""
file.write(cdecl)
class IFMapClassGenerator(object):
def __init__(self, cTypeDict):
self._cTypeDict = cTypeDict
self._generated_types = { }
self._TypeGenerator = TypeClassGenerator(cTypeDict)
self._generated_props = { }
def _GenerateProperty(self, file, prop):
ctype = prop.getCType()
self._TypeGenerator.GenerateType(file, ctype)
def _GenerateSimpleProperty(self, file, prop):
name = prop.getCppName() + 'Type'
if name in self._generated_props:
return
self._generated_props[name] = name
info = prop.getMemberInfo()
cdecl = """
class %(class)s {
public:
struct %(typename)s : public AutogenProperty {
%(ctype)s data;
};
};
""" % {'class': name,
'typename': SimpleTypeWrapper(info),
'ctype': info.ctypename}
file.write(cdecl)
def Generate(self, file, IdentifierDict, MetaDict):
module_name = GetModuleName(file, '_types.h')
#include <boost/cstdint.hpp> // for boost::uint16_t
header = """
// autogenerated file --- DO NOT EDIT ---
#ifndef __SCHEMA__%(modname)s_TYPES_H__
#define __SCHEMA__%(modname)s_TYPES_H__
#include <vector>
#include <boost/dynamic_bitset.hpp>
#include <boost/crc.hpp> // for boost::crc_32_type
namespace pugi {
class xml_node;
} // namespace pugi
#include "ifmap/autogen.h"
#include "ifmap/ifmap_object.h"
class DB;
class DBGraph;
class IFMapServerParser;
class IFMapAgentParser;
namespace autogen {
""" % {'modname': module_name.upper()}
file.write(header)
for idn in IdentifierDict.values():
# generate all dependent types
properties = idn.getProperties()
for prop in properties:
if prop._xelement.isComplex():
self._GenerateProperty(file, prop)
elif prop.getParent() == 'all':
self._GenerateSimpleProperty(file, prop)
for meta in MetaDict.values():
if type(meta) is IFMapLinkAttr:
ctype = meta.getCType()
if ctype:
self._TypeGenerator.GenerateType(file, ctype)
for idn in IdentifierDict.values():
if not idn._xelement:
# cross-ref'd id from another file
continue
generator = IFMapGenIdentifier(self._cTypeDict, idn)
generator.ServerClassDefn(file)
for meta in MetaDict.values():
if type(meta) is IFMapLinkAttr:
generator = IFMapGenLinkAttr(self, meta)
generator.ServerClassDefn(file)
file.write('} // namespace autogen\n')
file.write('\nstruct %s_GraphFilterInfo {\n' % module_name)
file.write(' %s_GraphFilterInfo(std::string left, std::string right,\n' % module_name)
file.write(' std::string meta, bool linkattr) :\n')
file.write(' left_(left), right_(right), metadata_(meta), linkattr_(linkattr) { }\n')
file.write(' std::string left_;\n')
file.write(' std::string right_;\n')
file.write(' std::string metadata_;\n')
file.write(' bool linkattr_;\n')
file.write('};\n')
file.write('typedef std::vector<%s_GraphFilterInfo> %s_FilterInfo;\n\n' % (module_name, module_name))
file.write('extern void %s_Server_ModuleInit(DB *, DBGraph *);\n'
% module_name)
file.write('extern void %s_Server_GenerateGraphFilter(%s_FilterInfo *);\n'
% (module_name, module_name))
file.write('extern void %s_Agent_ModuleInit(DB *, DBGraph *);\n'
% module_name)
file.write('extern void %s_Agent_ParserInit(DB *, IFMapAgentParser *);\n' % module_name)
file.write('extern void %s_ParserInit(IFMapServerParser *);\n' % module_name)
file.write('#endif // __SCHEMA__%s_TYPES_H__\n' %
module_name.upper())
class IFMapImplGenerator(object):
def __init__(self, cTypeDict):
self._cTypeDict = cTypeDict
self._TypeImplGenerator = TypeImplGenerator(None)
self._DBTableList = []
self._module_name = ''
def Generate(self, file, hdrname, IdentifierDict, MetaDict):
header = """
// autogenerated file --- DO NOT EDIT ---
#include "%s"
#include "ifmap/autogen.h"
#include <pugixml/pugixml.hpp>
using namespace std;
namespace autogen {
""" % hdrname
file.write(header)
for ctype in self._cTypeDict.values():
self._TypeImplGenerator.GenerateType(file, ctype)
self._module_name = GetModuleName(file, '_types.cc')
for idn in IdentifierDict.values():
if not idn._xelement:
# cross-ref'd id from another file
continue
generator = IFMapGenIdentifier(self._cTypeDict, idn)
generator.ServerClassImpl(file)
tbl = (idn.getCIdentifierName(), idn.getCppName())
self._DBTableList.append(tbl)
for meta in MetaDict.values():
if type(meta) is IFMapLinkAttr:
generator = IFMapGenLinkAttr(self, meta)
generator.ServerClassImpl(file)
tbl = (meta.getCIdentifierName(), meta.getCppName())
self._DBTableList.append(tbl)
file.write('} // namespace autogen\n')
# end
def _GenerateGraphFilter(self, file, hdrname, component, IdentifierDict,
MetaDict):
cdecl = """
void %(module)s_%(comp)s_GenerateGraphFilter(%(module)s_FilterInfo *filter_info) {
""" % { 'module': self._module_name, 'comp': component}
file.write(cdecl)
for idn in IdentifierDict.values():
links = idn.getLinksInfo()
for link_info in links:
to_ident = idn.getLinkTo(link_info)
link_meta = idn.getLink(link_info)
if link_meta.getXsdType():
linkattr = "true"
else:
linkattr = "false"
fmt = ' filter_info->push_back(%s_GraphFilterInfo("%s", "%s", "%s", %s));\n'
file.write(fmt % (self._module_name, idn.getName(),
to_ident.getName(), link_meta.getName(),
linkattr))
file.write('}\n')
# end
def _GenerateComponent(self, file, hdrname, component,
IdentifierDict, MetaDict):
header = """
// autogenerated file --- DO NOT EDIT ---
#include "%(hdrname)s"
#include <boost/bind.hpp>
#include "db/db.h"
#include "ifmap/ifmap_%(comp)s_table.h"
#include "ifmap/ifmap_%(comp)s_parser.h"
using namespace std;
namespace autogen {
""" % {'hdrname': hdrname, 'comp': component.lower()}
file.write(header)
for idn in IdentifierDict.values():
if not idn._xelement:
# cross-ref'd id from another file
continue
generator = IFMapGenIdentifier(self._cTypeDict, idn)
generator.TableClassDefn(file, component)
generator.TableClassImpl(file, component)
for meta in MetaDict.values():
if type(meta) is IFMapLinkAttr:
generator = IFMapGenLinkAttr(self, meta)
generator.TableClassDefn(file, component)
generator.TableClassImpl(file, component)
file.write('} // namespace autogen\n')
self._GenerateGraphFilter(file, hdrname, component, IdentifierDict,
MetaDict)
cdecl = """
void %(module)s_%(comp)s_ModuleInit(DB *db, DBGraph *graph) {
DBTable *table;
""" % { 'module': self._module_name, 'comp': component}
file.write(cdecl)
for tbl in self._DBTableList:
cdecl = """
table = autogen::DBTable_%(impl)s_%(class)s::CreateTable(
db, "__ifmap__.%(tablename)s.0", graph);
db->AddTable(table);
""" % {'impl': component, 'tablename': tbl[0], 'class':tbl[1]}
file.write(cdecl)
file.write('}\n')
def GenerateServer(self, file, hdrname, IdentifierDict, MetaDict):
self._GenerateComponent(file, hdrname, 'Server',
IdentifierDict, MetaDict)
def GenerateClient(self, file, hdrname, IdentifierDict, MetaDict):
self._GenerateComponent(file, hdrname, 'Client',
IdentifierDict, MetaDict)
def GenerateAgent(self, file, hdrname, IdentifierDict, MetaDict):
self._GenerateComponent(file, hdrname, 'Agent',
IdentifierDict, MetaDict)
| |
"""
Plotting vectors
================
Plotting vectors is handled by :meth:`pygmt.Figure.plot`.
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import pygmt
########################################################################################
# Plot Cartesian Vectors
# ----------------------
#
# Create a simple Cartesian vector using a starting point through
# ``x``, ``y``, and ``direction`` parameters.
# On the shown figure, the plot is projected on a 10cm X 10cm region,
# which is specified by the ``projection`` parameter.
# The direction is specified
# by a list of two 1d arrays structured as ``[[angle_in_degrees], [length]]``.
# The angle is measured in degrees and moves counter-clockwise from the
# horizontal.
# The length of the vector uses centimeters by default but
# could be changed using :meth:`pygmt.config`
# (Check the next examples for unit changes).
#
# Notice that the ``v`` in the ``style`` parameter stands for
# vector; it distinguishes it from regular lines and allows for
# different customization. ``0c`` is used to specify the size
# of the arrow head which explains why there is no arrow on either
# side of the vector.
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
x=2,
y=8,
style="v0c",
direction=[[-45], [6]],
)
fig.show()
########################################################################################
# In this example, we apply the same concept shown previously to plot multiple
# vectors. Notice that instead of passing int/float to ``x`` and ``y``, a list
# of all x and y coordinates will be passed. Similarly, the length of direction
# list will increase accordingly.
#
# Additionally, we change the style of the vector to include a red
# arrow head at the end (**+e**) of the vector and increase the
# thickness (``pen="2p"``) of the vector stem. A list of different
# styling attributes can be found in
# :doc:`Vector heads and tails </gallery/lines/vector_heads_tails>`.
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
x=[2, 4],
y=[8, 1],
style="v0.6c+e",
direction=[[-45, 23], [6, 3]],
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# The default unit of vector length is centimeters,
# however, this can be changed to inches or points. Note that, in PyGMT,
# one point is defined as 1/72 inch.
#
# In this example, the graphed region is 5in X 5in, but
# the length of the first vector is still graphed in centimeters.
# Using ``pygmt.config(PROJ_LENGTH_UNIT="i")``, the default unit
# can be changed to inches in the second plotted vector.
fig = pygmt.Figure()
# Vector 1 with default unit as cm
fig.plot(
region=[0, 10, 0, 10],
projection="X5i/5i",
frame="ag",
x=2,
y=8,
style="v1c+e",
direction=[[0], [3]],
pen="2p",
color="red3",
)
# Vector 2 after changing default unit to inch
with pygmt.config(PROJ_LENGTH_UNIT="i"):
fig.plot(
x=2,
y=7,
direction=[[0], [3]],
style="v1c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Vectors can also be plotted by including all the information
# about a vector in a single list. However, this requires creating
# a 2D list or numpy array containing all vectors.
# Each vector list contains the information structured as:
# ``[x_start, y_start, direction_degrees, length]``.
#
# If this approach is chosen, the ``data`` parameter must be
# used instead of ``x``, ``y`` and ``direction``.
# Create a list of lists that include each vector information
vectors = [[2, 3, 45, 4]]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
data=vectors,
style="v0.6c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Using the functionality mentioned in the previous example,
# multiple vectors can be plotted at the same time. Another
# vector could be simply added to the 2D list or numpy
# array object and passed using ``data`` parameter.
# Vector specifications structured as: [x_start, y_start, direction_degrees, length]
vector_1 = [2, 3, 45, 4]
vector_2 = [7.5, 8.3, -120.5, 7.2]
# Create a list of lists that include each vector information
vectors = [vector_1, vector_2]
# Vectors structure: [[2, 3, 45, 4], [7.5, 8.3, -120.5, 7.2]]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c/10c",
frame="ag",
data=vectors,
style="v0.6c+e",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# In this example, cartesian vectors are plotted over a Mercator
# projection of the continental US. The x values represent the
# longitude and y values represent the latitude where the vector starts.
#
# This example also shows some of the styles a vector supports.
# The beginning point of the vector (**+b**)
# should take the shape of a circle (**c**). Similarly, the end
# point of the vector (**+e**) should have an arrow shape (**a**)
# (to draw a plain arrow, use **A** instead). Lastly, the **+a**
# specifies the angle of the vector head apex (30 degrees in
# this example).
# Create a plot with coast, Mercator projection (M) over the continental US
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame="ag",
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
land="grey",
water="lightblue",
)
# Plot a vector using the x, y, direction parameters
style = "v0.4c+bc+ea+a30"
fig.plot(
x=-110,
y=40,
style=style,
direction=[[-25], [3]],
pen="1p",
color="red3",
)
# vector specifications structured as: [x_start, y_start, direction_degrees, length]
vector_2 = [-82, 40.5, 138, 2.5]
vector_3 = [-71.2, 45, -115.7, 4]
# Create a list of lists that include each vector information
vectors = [vector_2, vector_3]
# Plot vectors using the data parameter.
fig.plot(
data=vectors,
style=style,
pen="1p",
color="yellow",
)
fig.show()
########################################################################################
# Another example of plotting cartesian vectors over a coast plot. This time
# a Transverse Mercator projection is used. Additionally, :func:`numpy.linspace`
# is used to create 5 vectors with equal stops.
x = np.linspace(36, 42, 5) # x values = [36. 37.5 39. 40.5 42. ]
y = np.linspace(39, 39, 5) # y values = [39. 39. 39. 39.]
direction = np.linspace(-90, -90, 5) # direction values = [-90. -90. -90. -90.]
length = np.linspace(1.5, 1.5, 5) # length values = [1.5 1.5 1.5 1.5]
# Create a plot with coast, Mercator projection (M) over the continental US
fig = pygmt.Figure()
fig.coast(
region=[20, 50, 30, 45],
projection="T35/10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
land="lightbrown",
water="lightblue",
)
fig.plot(
x=x,
y=y,
style="v0.4c+ea+bc",
direction=[direction, length],
pen="0.6p",
color="red3",
)
fig.show()
########################################################################################
# Plot Circular Vectors
# ---------------------
#
# When plotting circular vectors, all of the information for a single vector is
# to be stored in a list. Each circular vector list is structured as:
# ``[x_start, y_start, radius, degree_start, degree_stop]``. The first two values in
# the vector list represent the origin of the circle that will be plotted.
# The next value is the radius which is represented on the plot in centimeters.
#
# The last two values in the vector list represent the degree at which the plot
# will start and stop. These values are measured counter-clockwise from the horizontal
# axis. In this example, the result show is the left half of a circle as the
# plot starts at 90 degrees and goes until 270. Notice that the ``m`` in the
# ``style`` parameter stands for circular vectors.
fig = pygmt.Figure()
circular_vector_1 = [0, 0, 2, 90, 270]
data = [circular_vector_1]
fig.plot(
region=[-5, 5, -5, 5],
projection="X10c",
frame="ag",
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
# Another example using np.array()
circular_vector_2 = [0, 0, 4, -90, 90]
data = np.array([circular_vector_2])
fig.plot(
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# When plotting multiple circular vectors, a two dimensional array or numpy array
# object should be passed as the ``data`` parameter. In this example, :func:`numpy.column_stack`
# is used to generate this two dimensional array. Other numpy objects are used to
# generate linear values for the ``radius`` parameter and random values for
# the ``degree_stop`` parameter discussed in the previous example. This is
# the reason in which each vector has
# a different appearance on the projection.
vector_num = 5
radius = 3 - (0.5 * np.arange(0, vector_num))
startdir = np.full(vector_num, 90)
stopdir = 180 + (50 * np.arange(0, vector_num))
data = np.column_stack(
[np.full(vector_num, 0), np.full(vector_num, 0), radius, startdir, stopdir]
)
fig = pygmt.Figure()
fig.plot(
region=[-5, 5, -5, 5],
projection="X10c",
frame="ag",
data=data,
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Much like when plotting Cartesian vectors, the default unit used is centimeters.
# When this is changed to inches, the size of the plot appears larger when the
# projection units do not change. Below is an example of two circular vectors.
# One is plotted using the default unit, and the second is plotted using inches.
# Despite using the same list to plot the vectors, a different measurement unit
# causes one to be larger than the other.
circular_vector = [6, 5, 1, 90, 270]
fig = pygmt.Figure()
fig.plot(
region=[0, 10, 0, 10],
projection="X10c",
frame="ag",
data=[circular_vector],
style="m0.5c+ea",
pen="2p",
color="red3",
)
with pygmt.config(PROJ_LENGTH_UNIT="i"):
fig.plot(
data=[circular_vector],
style="m0.5c+ea",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Plot Geographic Vectors
# -----------------------
# On this map,
# ``point_1`` and ``point_2`` are coordinate pairs used to set the
# start and end points of the geographic vector.
# The geographical vector is going from Idaho to
# Chicago. To style geographic
# vectors, use ``=`` at the beginning of the ``style`` parameter.
# Other styling features such as vector stem thickness and head color
# can be passed into the ``pen`` and ``color`` parameters.
#
# Note that the **+s** is added to use a startpoint and an endpoint
# to represent the vector instead of input angle and length.
point_1 = [-114.7420, 44.0682]
point_2 = [-87.6298, 41.8781]
data = np.array([point_1 + point_2])
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
########################################################################################
# Using the same technique shown in the previous example,
# multiple vectors can be plotted in a chain where the endpoint
# of one is the starting point of another. This can be done
# by adding the coordinate lists together to create this structure:
# ``[[start_latitude, start_longitude, end_latitude, end_longitude]]``.
# Each list within the 2D list contains the start and end information
# for each vector.
# Coordinate pairs for all the locations used
ME = [-69.4455, 45.2538]
CHI = [-87.6298, 41.8781]
SEA = [-122.3321, 47.6062]
NO = [-90.0715, 29.9511]
KC = [-94.5786, 39.0997]
CA = [-119.4179, 36.7783]
# Add array to piece together the vectors
data = [ME + CHI, CHI + SEA, SEA + KC, KC + NO, NO + CA]
fig = pygmt.Figure()
fig.coast(
region=[-127, -64, 24, 53],
projection="M10c",
frame=True,
borders=1,
shorelines="0.25p,black",
area_thresh=4000,
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
################################################################################
# This example plots vectors over a Mercator projection. The starting points are
# located at SA which is South Africa and going to four different
# locations.
SA = [22.9375, -30.5595]
EUR = [15.2551, 54.5260]
ME = [-69.4455, 45.2538]
AS = [100.6197, 34.0479]
NM = [-105.8701, 34.5199]
data = np.array([SA + EUR, SA + ME, SA + AS, SA + NM])
fig = pygmt.Figure()
fig.coast(
region=[-180, 180, -80, 80],
projection="M0/0/12c",
frame="afg",
land="lightbrown",
water="lightblue",
)
fig.plot(
data=data,
style="=0.5c+ea+s",
pen="2p",
color="red3",
)
fig.show()
| |
import numpy as np
import time
import utils
from scipy.special import gammaln, psi
#epsilon
eps = 1e-100
class hdsp:
"""
hierarchical dirichlet scaling process (hdsp)
"""
def __init__(self, num_topics, num_words, num_labels, dir_prior=0.5):
self.K = num_topics # number of topics
self.N = num_words # vocabulary size
self.J = num_labels # num labels
self.V = np.zeros(self.K)
#for even p
self.V[0] = 1./self.K
for k in xrange(1,self.K-1):
self.V[k] = (1./self.K)/np.prod(1.-self.V[:k])
self.V[self.K-1] = 1.
self.p = self.getP(self.V)
self.alpha = 5.
self.alpha_1 = 1 #prior for alpha
self.alpha_2 = 1e-3 #prior for alpha
self.beta = 5.
self.beta_1 = 1
self.beta_2 = 1e-3
self.dir_prior = dir_prior
self.gamma = np.random.gamma(shape=1, scale=1, size=[self.N, self.K]) + self.dir_prior
self.c_a_max_step = 10
self.is_plot = False
self.is_verbose = True
self.is_compute_lb = True
self.ll_diff_frac = 1e-3
def run_variational_EM(self, max_iter, corpus, directory=None, logger=None):
if self.is_plot:
import matplotlib.pyplot as plt
plt.ion()
lbs = list()
curr = time.clock()
for iter in xrange(max_iter):
lb = 0
lb += self.update_C(corpus)
lb += self.update_Z(corpus)
lb += self.newton_W(corpus)
lb += self.update_V(corpus)
self.update_alpha()
self.update_beta(corpus)
if corpus.heldout_ids != None:
perp = self.heldout_perplexity(corpus)
if self.is_verbose:
print '%d iter, %d topics, %.2f time, %.2f lower_bound %.3f perplexity' % (iter, self.K, time.clock()-curr, lb, perp)
if logger:
logger.write('%d,%d,%f,%f,%f,%f\n'%(iter, self.K, self.dir_prior, time.clock()-curr, lb, perp))
elif corpus.heldout_ids == None and self.is_verbose:
print '%d iter, %d topics, %.2f time, %.2f lower_bound' % (iter, self.K, time.clock()-curr, lb)
if iter > 0:
lbs.append(lb)
if self.is_plot:
plt.close
plt.plot(lbs)
plt.draw()
if iter > 30:
if (abs(lbs[-1] - lbs[-2])/abs(lbs[-2])) < self.ll_diff_frac :
break
if directory:
self.save_result(directory, corpus)
return lbs
def getStickLeft(self, V):
stl = np.ones(self.K)
stl[1:] = np.cumprod(1.-V)[:-1]
return stl
def getP(self, V):
one_v = np.ones(self.K)
one_v[1:] = (1.-V)[:-1]
p = V * np.cumprod(one_v)
return p
#update per word v.d. phi (c denoted by z in the icml paper)
def update_C(self, corpus):
corpus.phi_doc = np.zeros([corpus.M, self.K])
psiGamma = psi(self.gamma)
gammaSum = np.sum(self.gamma,0)
psiGammaSum = psi(np.sum(self.gamma, 0))
lnZ = psi(corpus.A) - np.log(corpus.B)
Z = corpus.A/corpus.B
#entropy of q(eta)
lb = 0
if(self.is_compute_lb):
lb += -np.sum(gammaln(gammaSum)) + np.sum(gammaln(self.gamma)) - np.sum((self.gamma - 1)*(psiGamma - psiGammaSum))
#expectation of eta over variational q(eta)
lb += self.K * gammaln(self.dir_prior*self.N) - self.K * self.N * gammaln(self.dir_prior) - np.sum((self.dir_prior-1)*(psiGamma-psiGammaSum))
self.gamma = np.zeros([self.N, self.K]) + self.dir_prior #multinomial topic distribution prior
for m in xrange(corpus.M):
ids = corpus.word_ids[m]
cnt = corpus.word_cnt[m]
# C = len(ids) x K
E_ln_eta = psiGamma[ids,:] - psiGammaSum
C = np.exp(E_ln_eta + lnZ[m,:])
C = C/np.sum(C,1)[:,np.newaxis]
self.gamma[ids,:] += cnt[:,np.newaxis] * C
corpus.phi_doc[m,:] = np.sum(cnt[:,np.newaxis] * C,0)
#expectation of p(X) over variational q
lb += np.sum(cnt[:,np.newaxis] * C * E_ln_eta)
#entropy of q(C)
lb -= np.sum(cnt[:,np.newaxis] * C * np.log(C+eps))
#expectation of p(C) over variational q
lb += np.sum(cnt[:,np.newaxis] * C * (lnZ[m,:] - np.log(np.sum(Z[m,:]))) )
if self.is_verbose:
print 'p(x,c)-q(c) %f' %lb
return lb
#update variational gamma prior a and b for Z_mk (z denoted by \pi in the icml paper)
def update_Z(self, corpus):
lb = 0
bp = self.beta*self.p
corpus.A = bp + corpus.phi_doc
# taylor approximation on E[\sum lnZ]
xi = np.sum(corpus.A/corpus.B, 1)
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w))
E_wr = np.dot(corpus.R,corpus.w) # M x K
corpus.B = E_exp_wr + (corpus.Nm / xi)[:,np.newaxis]
# expectation of p(Z)
lb += np.sum(-bp * E_wr + (bp-1)*(psi(corpus.A)-np.log(corpus.B)) - E_exp_wr*(corpus.A/corpus.B) - gammaln(bp))
# entropy of q(Z)
lb -= np.sum(corpus.A*np.log(corpus.B) + (corpus.A-1)*(psi(corpus.A) - np.log(corpus.B)) - corpus.A - gammaln(corpus.A))
if self.is_verbose:
print 'p(z)-q(z) %f' %lb
return lb
def newton_W(self, corpus):
lb = 0
bp = self.beta * self.p
Z = corpus.A/corpus.B
lnZ = psi(corpus.A)-np.log(corpus.B)
for ki in np.random.permutation(corpus.K):
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
det_w = np.zeros([self.J])
H = np.zeros([self.J,self.J])
new_second = corpus.R*(E_exp_wr[:,ki][:,np.newaxis])*(Z[:,ki][:,np.newaxis]) # M x J
det_w = np.sum(bp[ki]*corpus.R - new_second, 0) - corpus.w[:,ki] # with normal prior mean 0 and variance 1
H = - np.dot(new_second.T, corpus.R) - np.identity(self.J) # - identity for normal
# for ji in xrange(corpus.J):
# H[:,ji] = np.sum(- corpus.R * new_second[:,ji][:,np.newaxis], 0)
# second = corpus.R[:,ji]*E_exp_wr[:,ki]*Z[:,ki] # M-dim
# det_w[ji] = np.sum(bp[ki]*corpus.R[:,ji] - second) # - 2.0 * corpus.w[ji,ki] # normal prior
# for ji2 in xrange(corpus.J):
# H[ji2,ji] = np.sum(- corpus.R[:,ji2] * corpus.R[:,ji] * E_exp_wr[:,ki]*Z[:,ki])
invH = np.linalg.inv(H)
corpus.w[:,ki] = corpus.w[:,ki] - np.dot(invH, det_w)
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
lb = np.sum(-bp * E_wr + (bp-1)*(lnZ) - E_exp_wr*(Z))
if self.is_verbose:
print 'p(w)-q(w) %f, max %f, min %f' % (lb, np.max(corpus.w), np.min(corpus.w))
return lb
#coordinate ascent for w_jk
def update_W(self, corpus):
lb = 0
bp = self.beta * self.p
Z = corpus.A/corpus.B
lnZ = psi(corpus.A)-np.log(corpus.B)
for iter in xrange(10):
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
old_lb = np.sum(-bp * E_wr - E_exp_wr*(Z))
del_w = np.zeros([corpus.J, self.K])
for ji in xrange(corpus.J):
for ki in xrange(corpus.K):
del_w[ji,ki] = np.sum(bp[ki]*corpus.R[:,ji] - corpus.R[:,ji]*E_exp_wr[:,ki]*Z[:,ki])
stepsize = 1.0/np.max(np.abs(del_w))
steps = np.logspace(-10,0)
ll = list()
for si in xrange(len(steps)):
step = steps[si]
new_w = corpus.w + step * stepsize * del_w
E_exp_wr = np.exp(np.dot(corpus.R, new_w))
E_wr = np.dot(corpus.R,new_w) # M x K
new_lb = np.sum(-bp * E_wr - E_exp_wr*(Z))
if np.isnan(new_lb):
break
ll.append(new_lb)
ll = np.array(ll)
idx = ll.argsort()[::-1][0]
corpus.w = corpus.w + steps[idx]*stepsize*del_w
print '\t%d w old new diff %f \t %f \t %f \t%f \t%f \t%f' %(iter, (ll[idx] - old_lb), stepsize, np.max(np.abs(del_w)), np.max(np.abs(del_w))*stepsize, np.max(corpus.w), np.min(corpus.w) )
if np.abs(ll[idx] - old_lb) < 0.1:
break
lb = np.sum(-bp * E_wr + (bp-1)*(lnZ) - E_exp_wr*(Z))
if self.is_verbose:
print 'p(w)-q(w) %f' % lb
return lb
#coordinate ascent for V
def update_V(self, corpus):
lb = 0
sumLnZ = np.sum(psi(corpus.A) - np.log(corpus.B), 0) # K dim
tmp = np.dot(corpus.R, corpus.w) # M x K
sum_r_w = np.sum(tmp, 0)
assert len(sum_r_w) == self.K
for i in xrange(self.c_a_max_step):
one_V = 1-self.V
stickLeft = self.getStickLeft(self.V) # prod(1-V_(dim-1))
p = self.V * stickLeft
psiV = psi(self.beta * p)
vVec = - self.beta*stickLeft*sum_r_w + self.beta*stickLeft*sumLnZ - corpus.M*self.beta*stickLeft*psiV;
for k in xrange(self.K):
tmp1 = self.beta*sum(sum_r_w[k+1:]*p[k+1:]/one_V[k]);
tmp2 = self.beta*sum(sumLnZ[k+1:]*p[k+1:]/one_V[k]);
tmp3 = corpus.M*self.beta*sum(psiV[k+1:]*p[k+1:]/one_V[k]);
vVec[k] = vVec[k] + tmp1 - tmp2;
vVec[k] = vVec[k] + tmp3;
vVec[k] = vVec[k]
vVec[:self.K-2] -= (self.alpha-1)/one_V[:self.K-2];
vVec[self.K-1] = 0;
step_stick = self.getstepSTICK(self.V,vVec,sum_r_w,sumLnZ,self.beta,self.alpha,corpus.M);
self.V = self.V + step_stick*vVec;
self.p = self.getP(self.V)
lb += self.K*gammaln(self.alpha+1) - self.K*gammaln(self.alpha) + np.sum((self.alpha-1)*np.log(1-self.V[:self.K-1]))
if self.is_verbose:
print 'p(V)-q(V) %f' % lb
return lb
def update_alpha(self):
old = (self.K-1) * gammaln(self.alpha + 1) - (self.K-1) * gammaln(self.alpha) + np.sum(self.alpha*(1-self.V[:-1])) + self.alpha_1*np.log(self.alpha_2) + (self.alpha_1 - 1)*np.log(self.alpha) - self.alpha_2 * self.alpha - gammaln(self.alpha_1)
self.alpha = (self.K + self.alpha_1 -2)/(self.alpha_2 - np.sum(np.log(1-self.V[:-1]+eps)))
new = (self.K-1) * gammaln(self.alpha + 1) - (self.K-1) * gammaln(self.alpha) + np.sum(self.alpha*(1-self.V[:-1])) + self.alpha_1*np.log(self.alpha_2) + (self.alpha_1 - 1)*np.log(self.alpha) - self.alpha_2 * self.alpha - gammaln(self.alpha_1)
if self.is_verbose:
print 'new alpha = %.2f, %.2f' % (self.alpha, (new-old))
def update_beta(self, corpus):
E_wr = np.dot(corpus.R, corpus.w) #M x K
lnZ = psi(corpus.A) - np.log(corpus.B)
first = self.p * E_wr
# since beta does not change a lot, this way is more efficient
candidate = np.linspace(-1, 1, 31)
f = np.zeros(len(candidate))
for i in xrange(len(candidate)):
step = candidate[i]
new_beta = self.beta + self.beta*step
if new_beta < 0:
f[i] = -np.inf
else:
bp = new_beta * self.p
f[i] = np.sum(new_beta * first) + np.sum((bp - 1) * lnZ) - np.sum(corpus.M * gammaln(bp))
best_idx = f.argsort()[-1]
maxstep = candidate[best_idx]
self.beta += self.beta*maxstep
if self.is_verbose:
print 'new beta = %.2f, %.2f' % (self.beta, candidate[best_idx])
# get stick length to update the gradient
def getstepSTICK(self,curr,grad,sumMu,sumlnZ,beta,alpha,M):
_curr = curr[:len(curr)-1]
_grad = grad[:len(curr)-1]
_curr = _curr[_grad != 0]
_grad = _grad[_grad != 0]
step_zero = -_curr/_grad
step_one = (1-_curr)/_grad
min_zero = 1
min_one = 1
if(np.sum(step_zero>=0) > 0):
min_zero = min(step_zero[step_zero>=0])
if(np.sum(step_one>=0) > 0):
min_one = min(step_one[step_one>=0])
max_step = min([min_zero,min_one])
if max_step > 0:
step_check_vec = np.array([.01, .125, .25, .375, .5, .625, .75, .875 ])*max_step;
else:
step_check_vec = list();
f = np.zeros(len(step_check_vec));
for ite in xrange(len(step_check_vec)):
step_check = step_check_vec[ite];
vec_check = curr + step_check*grad;
p = self.getP(vec_check)
f[ite] = -np.sum(beta*p*sumMu) - M*np.sum(gammaln(beta*p)) + np.sum((beta*p-1)*sumlnZ) + (alpha-1.)*np.sum(np.log(1.-vec_check[:-1]+eps))
if len(f) != 0:
b = f.argsort()[-1]
step = step_check_vec[b]
else:
step = 0;
if b == 0:
rho = .5;
bool = 1;
fold = f[b];
while bool:
step = rho*step;
vec_check = curr + step*grad;
tmp = np.zeros(vec_check.size)
tmp[1:] = vec_check[:-1]
p = vec_check * np.cumprod(1-tmp)
fnew = -np.sum(beta*p*sumMu) - M*np.sum(gammaln(beta*p)) + np.sum((beta*p-1)*sumlnZ) + (alpha-1.)*np.sum(np.log(1.-vec_check[:-1]+eps))
if fnew > fold:
fold = fnew
else:
bool = 0
step = step/rho
return step
def write_top_words(self, corpus, filepath):
with open(filepath + '/final_top_words.csv', 'w') as f:
posterior_topic_count = np.sum(self.gamma, 0)
topic_rank = posterior_topic_count.argsort()[::-1]
for ti in topic_rank:
top_words = corpus.vocab[self.gamma[:,ti].argsort()[::-1][:20]]
f.write( '%d,%f' % (ti, self.p[ti]) )
for word in top_words:
f.write(',' + word)
f.write('\n')
def write_label_top_words(self, corpus, filepath):
bp = self.beta * self.p
with open(filepath + '/final_label_top_words.csv', 'w') as f, open(filepath + '/final_label_top_words_all.csv', 'w') as f2:
mean = corpus.w
for li in xrange(corpus.J):
for ki in xrange(corpus.K):
top_words = corpus.vocab[self.gamma[:,ki].argsort()[::-1][:20]]
f2.write('%s,%d,%f' % (corpus.label_names[li].replace(',',' '), ki, mean[li,ki]*bp[ki]))
for word in top_words:
f2.write(',' + word)
f2.write('\n')
min_topic = mean[li,:].argsort()[0]
max_topic = mean[li,:].argsort()[-1]
top_words = corpus.vocab[self.gamma[:,min_topic].argsort()[::-1][:20]]
f.write('min,%s,%f'%(corpus.label_names[li].replace(',',' '), mean[li,min_topic] ))
for word in top_words:
f.write(',' + word)
f.write('\n')
f.write('max,%s,%f'%(corpus.label_names[li].replace(',',' '), mean[li,max_topic] ))
top_words = corpus.vocab[self.gamma[:,max_topic].argsort()[::-1][:20]]
for word in top_words:
f.write(',' + word)
f.write('\n')
def save_result(self, folder, corpus):
import os, cPickle
if not os.path.exists(folder):
os.mkdir(folder)
np.savetxt(folder+'/final_w.csv', corpus.w, delimiter=',')
np.savetxt(folder+'/final_V.csv', self.V, delimiter=',')
np.savetxt(folder+'/gamma.csv', self.gamma, delimiter=',')
np.savetxt(folder+'/A.csv',corpus.A, delimiter=',')
np.savetxt(folder+'/B.csv',corpus.B, delimiter=',')
self.write_top_words(corpus, folder)
self.write_label_top_words(corpus, folder)
#cPickle.dump([self,corpus], open(folder+'/model_corpus.pkl','w'))
def heldout_perplexity(self, corpus):
num_hdoc = len(corpus.heldout_ids)
topic = self.gamma/np.sum(self.gamma, 0)
mean = corpus.w
bp = self.beta * self.p
perp = 0
cnt_sum = 0
wr = np.dot(corpus.heldout_responses, corpus.w) # m x k
for di in xrange(num_hdoc):
doc = corpus.heldout_ids[di]
cnt = corpus.heldout_cnt[di]
Z = np.zeros(self.K)
Z = bp / np.exp(wr[di,:])
Z /= np.sum(Z)
if np.sum(cnt) != 0:
perp -= np.sum(np.log(np.dot(topic[doc,:], Z) + eps) * cnt)
cnt_sum += np.sum(cnt)
return np.exp(perp/cnt_sum)
class hdsp_corpus:
def __init__(self, vocab, word_ids, word_cnt, num_topics, labels, label_names = None, heldout_ids = None, heldout_cnt = None, heldout_responses = None):
if type(vocab) == list:
self.vocab = np.array(vocab)
else:
self.vocab = vocab
if type(word_ids[0]) != np.ndarray:
tmp_ids = list()
tmp_cnt = list()
for ids in word_ids:
tmp_ids.append(np.array(ids))
for cnt in word_cnt:
tmp_cnt.append(np.array(cnt))
word_ids = tmp_ids
word_cnt = tmp_cnt
if label_names == None:
label_names = [str(i) for i in xrange(labels.shape[1])]
self.word_ids = word_ids
self.word_cnt = word_cnt
self.R = labels # M x J matrix
self.K = num_topics #num topics
self.N = len(vocab) #num voca
self.M = len(word_ids) #num documents
self.J = labels.shape[1]
self.A = np.random.gamma(shape=1, scale=1, size=[self.M,self.K])
self.B = np.random.gamma(shape=1, scale=1, size=[self.M,self.K])
self.w = np.zeros([self.J, self.K])
self.r_j = np.sum(self.R, 0)
self.label_names = label_names
self.heldout_ids = heldout_ids
self.heldout_cnt = heldout_cnt
self.heldout_responses = heldout_responses
self.Nm = np.zeros(self.M)
for i in xrange(self.M):
self.Nm[i] = np.sum(word_cnt[i])
def plot_expected_topics(model, corpus, labels, save_path=None, num_words = 10, num_topics = 20):
"""plot expected topics
"""
import matplotlib.pyplot as plt
if model.K < num_topics:
num_topics = model.K
if corpus.N < num_words:
num_words = corpus.N
legend_size = 15
word_size = 10
width = 20
height = 3
wr = np.exp(np.dot(labels, corpus.w))
Z = model.p / (wr)
Z /= np.sum(Z) #expected topic proportion given 'labels'
rank = model.p.argsort()[::-1]
fig = plt.gcf()
fig.set_size_inches(width,height)
ax = plt.gca()
l_names = ['%s:%.2f'%(corpus.label_names[i],labels[i]) for i in xrange(0,corpus.J) if labels[i] != 0]
plt.bar(range(0,num_topics), Z[rank[:num_topics]], label='label={%s}'%(', '.join(l_names)), alpha=0.5)
plt.legend(prop={'size':legend_size})
ax.set_xticks(np.arange(num_topics)+0.4)
ax.set_xticklabels(['\n'.join(corpus.vocab[model.gamma[:,i].argsort()[::-1][:num_words]]) for i in rank[:num_topics]], size=word_size)
plt.plot()
if save_path:
plt.savefig(save_path, format='PDF', bbox_inches='tight', dpi=720)
def test():
#corpus parameters
num_topics = 5
num_words = 6
num_labels = 2
num_docs = 3
voca = [str(i) for i in xrange(num_words)]
corpus_ids = [[0,1,2],[1,2,3],[3,4,5]] # word ids for each document
corpus_cnt = [[2,3,1],[1,3,2],[3,2,1]] # word count corresponding to word ids for each document
labels = np.random.random([num_docs,num_labels])
#model parameters
max_iter = 10
output_dir = 'result'
corpus = hdsp_corpus(voca, corpus_ids, corpus_cnt, num_topics, labels)
model = hdsp(num_topics, num_words, num_labels)
model.run_variational_EM(max_iter, corpus, output_dir) # run variational inference
plot_expected_topics(model, corpus, labels[0], save_path='result/expected_topics.pdf')
if __name__ == '__main__':
#test with toy problem
test()
| |
"""Alexa capabilities."""
from datetime import datetime
import logging
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
import homeassistant.components.climate.const as climate
from homeassistant.components import (
light,
fan,
cover,
)
import homeassistant.util.color as color_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
)
from .errors import UnsupportedProperty
_LOGGER = logging.getLogger(__name__)
class AlexaCapibility:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
def __init__(self, entity):
"""Initialize an Alexa capibility."""
self.entity = entity
def name(self):
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported():
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported():
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable():
"""Return True if properties can be retrieved."""
return False
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {
'type': 'AlexaInterface',
'interface': self.name(),
'version': '3',
'properties': {
'supported': self.properties_supported(),
'proactivelyReported': self.properties_proactively_reported(),
'retrievable': self.properties_retrievable(),
},
}
# pylint: disable=assignment-from-none
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result['supportsDeactivation'] = supports_deactivation
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop['name']
# pylint: disable=assignment-from-no-return
prop_value = self.get_property(prop_name)
if prop_value is not None:
yield {
'name': prop_name,
'namespace': self.name(),
'value': prop_value,
'timeOfSample': datetime.now().strftime(DATE_FORMAT),
'uncertaintyInMilliseconds': 0
}
class AlexaEndpointHealth(AlexaCapibility):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.EndpointHealth'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'connectivity'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return False
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'connectivity':
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {'value': 'UNREACHABLE'}
return {'value': 'OK'}
class AlexaPowerController(AlexaCapibility):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.PowerController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'powerState'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'powerState':
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
else:
is_on = self.entity.state != STATE_OFF
return 'ON' if is_on else 'OFF'
class AlexaLockController(AlexaCapibility):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.LockController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'lockState'}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'lockState':
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return 'LOCKED'
if self.entity.state == STATE_UNLOCKED:
return 'UNLOCKED'
return 'JAMMED'
class AlexaSceneController(AlexaCapibility):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.SceneController'
class AlexaBrightnessController(AlexaCapibility):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.BrightnessController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'brightness'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'brightness':
raise UnsupportedProperty(name)
if 'brightness' in self.entity.attributes:
return round(self.entity.attributes['brightness'] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapibility):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.ColorController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'color'}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'color':
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(
light.ATTR_HS_COLOR, (0, 0))
return {
'hue': hue,
'saturation': saturation / 100.0,
'brightness': self.entity.attributes.get(
light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapibility):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.ColorTemperatureController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'colorTemperatureInKelvin'}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'colorTemperatureInKelvin':
raise UnsupportedProperty(name)
if 'color_temp' in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes['color_temp'])
return 0
class AlexaPercentageController(AlexaCapibility):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.PercentageController'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'percentage'}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'percentage':
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapibility):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.Speaker'
class AlexaStepSpeaker(AlexaCapibility):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.StepSpeaker'
class AlexaPlaybackController(AlexaCapibility):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.PlaybackController'
class AlexaInputController(AlexaCapibility):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.InputController'
class AlexaTemperatureSensor(AlexaCapibility):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.TemperatureSensor'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'temperature'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'temperature':
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(
climate.ATTR_CURRENT_TEMPERATURE)
return {
'value': float(temp),
'scale': API_TEMP_UNITS[unit],
}
class AlexaContactSensor(AlexaCapibility):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.ContactSensor'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'detectionState'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'detectionState':
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return 'DETECTED'
return 'NOT_DETECTED'
class AlexaMotionSensor(AlexaCapibility):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.MotionSensor'
def properties_supported(self):
"""Return what properties this entity supports."""
return [{'name': 'detectionState'}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != 'detectionState':
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return 'DETECTED'
return 'NOT_DETECTED'
class AlexaThermostatController(AlexaCapibility):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return 'Alexa.ThermostatController'
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{'name': 'thermostatMode'}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({'name': 'targetSetpoint'})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({'name': 'lowerSetpoint'})
properties.append({'name': 'upperSetpoint'})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name == 'thermostatMode':
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id, type(self.entity),
self.entity.state)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == 'targetSetpoint':
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == 'lowerSetpoint':
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == 'upperSetpoint':
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
return {
'value': float(temp),
'scale': API_TEMP_UNITS[unit],
}
| |
import numpy as np
from pandas._libs import (index as libindex,
algos as libalgos, join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
is_float_dtype,
is_object_dtype,
is_integer_dtype,
is_bool,
is_bool_dtype,
is_scalar)
from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
from pandas.core import algorithms
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance):
try:
return float(tolerance)
except ValueError:
raise ValueError('tolerance argument for %s must be numeric: %r' %
(type(self).__name__, tolerance))
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects.
See also
--------
Index : The base pandas Index type
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra=''
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_arrmap = libalgos.arrmap_int64
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_arrmap = libalgos.arrmap_uint64
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_na_value = 0
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = _asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return _asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_arrmap = libalgos.arrmap_float64
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_float_dtype(dtype):
values = self._values.astype(dtype, copy=copy)
elif is_integer_dtype(dtype):
if self.hasnans:
raise ValueError('cannot convert float NaN to integer')
values = self._values.astype(dtype, copy=copy)
elif is_object_dtype(dtype):
values = self._values.astype('object', copy=copy)
else:
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(values, name=self.name, dtype=dtype)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
return False
except:
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| |
from __future__ import absolute_import
from builtins import str
import re
import sys
import traceback
from flask import current_app
from flask import request
import json
from requests.exceptions import Timeout, ConnectTimeout, ReadTimeout, ConnectionError
import timeout_decorator
import datetime
from .client import client
def do_ned_object_lookup(url, oname):
# Prepare the headers for the query
payload = {
"name": {"v": "{object}".format(object=oname)}
}
headers = {
'User-Agent': 'ADS Object Service (Object Search)',
'Content-type': 'application/json',
'Accept': 'text/plain'
}
# Get timeout for request from the config (use 1 second if not found)
TIMEOUT = current_app.config.get('OBJECTS_NED_TIMEOUT',1)
try:
r = current_app.client.post(url, data=json.dumps(payload), headers=headers, timeout=TIMEOUT)
except (ConnectTimeout, ReadTimeout) as err:
current_app.logger.info('NED request to %s timed out! Request took longer than %s second(s)'%(url, TIMEOUT))
return {"Error": "Unable to get results!", "Error Info": "NED request timed out: {0}".format(str(err))}
except Exception as err:
current_app.logger.error("NED request to %s failed (%s)"%(url, err))
return {"Error": "Unable to get results!", "Error Info": "NED request failed ({0})".format(err)}
# Check if we got a 200 status code back
if r.status_code != 200:
current_app.logger.info('NED request to %s failed! Status code: %s'%(url, r.status_code))
return {"Error": "Unable to get results!", "Error Info": "NED returned status %s" % r.status_code}
# Return query results
return r.json()
def get_ned_data(id_list, input_type):
QUERY_URL = current_app.config.get('OBJECTS_NED_URL')
current_app.logger.info('URL used to get NED data: %s'%QUERY_URL)
results = {}
results['data'] = {}
results['skipped'] = []
# Establish the NED query, based on the type of input
if input_type in ['identifiers', 'objects']:
for ident in id_list:
# Since all spaces in the identifiers where replaced by underscores, we have to undo this
odata = do_ned_object_lookup(QUERY_URL, ident.strip().replace('_',' '))
if "Error" in odata:
# NED query failed. This failure either means timeout or service problems
# We return nothing for the entire query, with the proper error message
return odata
# Did we get a successful result back?
statuscode = odata.get("StatusCode", 999)
if statuscode == 100:
# success
resultcode = odata.get("ResultCode", 999)
if resultcode == 3:
# Proper object name, known by NED
if input_type == 'identifiers':
results['data'][ident] = {'id': ident, 'canonical': odata['Preferred']['Name']}
else:
results['data'][ident] = {'id': odata['Preferred']['Name'].strip().replace(' ','_'), 'canonical': odata['Preferred']['Name']}
elif resultcode in [0,1,2]:
# Unable to create usable results
results['skipped'].append(ident)
current_app.logger.info('NED returned result code {rcode} for object {object}'.format(rcode=resultcode, object=ident))
continue
else:
# Unexpected result code!
results['skipped'].append(ident)
current_app.logger.info('Unexpected result code from NED! NED returned result code {rcode} for object {object}'.format(rcode=resultcode, object=ident))
continue
else:
# NED query was not successful
results['skipped'].append(ident)
current_app.logger.info('NED query failed! NED returned status code {rcode} for object {object}'.format(rcode=statuscode, object=ident))
continue
elif input_type == 'simple':
# We just take the indexed NED identifier value and remove the underscore
results = {}
results['data'] = {}
results['skipped'] = []
for ident in id_list:
results['data'][ident] = {'id': ident, 'canonical': ident.strip().replace('_',' ')}
else:
return {"Error": "Unable to get results!", "Error Info": "Unknown input type specified!"}
return results
def ned_position_query(COORD, RADIUS):
nedids = []
RA, DEC = COORD.to_string('hmsdms').split()
QUERY_URL = current_app.config.get('OBJECTS_NED_OBJSEARCH')
TIMEOUT = current_app.config.get('OBJECTS_NED_TIMEOUT',1)
MAX_RADIUS = float(current_app.config.get('OBJECTS_NED_MAX_RADIUS'))
MAX_OBJECTS = int(current_app.config.get('OBJECTS_NED_MAX_NUMBER'))
# set headers for query
headers = {
'User-Agent': 'ADS Object Service (Cone Search)',
'Content-type': 'application/json',
'Accept': 'text/plain'
}
# First set the default query parameters
query_params = {
'of':'ascii_bar',
'search_type':'Near Position Search',
'img_stamp':'NO',
'list_limit':'5',
'zv_breaker':'30000.0',
'obj_sort':'Distance to search center',
'out_equinox':'J2000.0',
'out_csys':'Equatorial',
'nmp_op':'ANY',
'ot_include':'ANY',
'z_unit':'z',
'z_value1':'',
'z_value2':'',
'z_constraint':'Unconstrained',
'corr_z':'1',
'omegav':'0.73',
'omegam':'0.27',
'hconst':'73',
'radius':'2',
'in_equinox':'J2000.0',
'in_csys':'Equatorial',
}
# Now add the position information
query_params['lon'] = RA
query_params['lat'] = DEC
# NED wants radius in arcminutes
query_params['radius'] = min(float(RADIUS.degree), MAX_RADIUS)
# Do the query
try:
response = current_app.client.get(QUERY_URL, headers=headers, params=query_params, timeout=TIMEOUT)
except (ConnectTimeout, ReadTimeout) as err:
current_app.logger.info('NED cone search to %s timed out! Request took longer than %s second(s)'%(QUERY_URL, TIMEOUT))
return {"Error": "Unable to get results!", "Error Info": "NED cone search timed out: {0}".format(str(err))}
except Exception as err:
current_app.logger.error("NED cone search to %s failed (%s)"%(QUERY_URL, err))
return {"Error": "Unable to get results!", "Error Info": "NED cone search failed ({0})".format(err)}
data = response.text.split('\n')
nedids = [e.split('|')[1].strip().replace(' ','_') for e in data if e.find('|') > -1]
try:
nedids.remove('Object_Name')
except:
pass
return nedids[:MAX_OBJECTS]
def get_NED_refcodes(obj_data):
# NED endpoint to get data
ned_url = current_app.config.get('OBJECTS_NED_URL')
# Where we will store results
result = {}
# For ambiguous object names we will return lists of aliases
result['ambiguous'] = []
# Canonical object names returned from NED
canonicals = []
# Parameters for NED query
payload = {}
# Headers for request
headers = {
'User-Agent': 'ADS Object Service (Classic Object Search)',
'Content-type': 'application/json',
'Accept': 'text/plain'
}
# We're here, so the data submitted has an 'objects' attribute
objects = obj_data.get('objects')
# Let's just check to be sure that the list actually contains entries
if len(objects) == 0:
return {"Error": "Unable to get results!",
"Error Info": "No object names provided"}
# Now attempt to retrieve refcodes for each of the object names submitted
for object_name in objects:
# Payload per NED documentation: https://ned.ipac.caltech.edu/ui/Documents/ObjectLookup
payload["name"] = {"v": "{0}".format(object_name)}
# There is an entry, so now try to get the associated refcodes
# Get timeout for request from the config (use 1 second if not found)
TIMEOUT = current_app.config.get('OBJECTS_NED_TIMEOUT',1)
# Query NED API to retrieve the canonical object names for the ones provided
# (if known to NED)
try:
r = current_app.client.post(ned_url, data=json.dumps(payload), headers=headers, timeout=TIMEOUT)
except (ConnectTimeout, ReadTimeout, Timeout) as err:
current_app.logger.info('NED request to %s timed out! Request took longer than %s second(s)'%(ned_url, TIMEOUT))
return {"Error": "Unable to get results!", "Error Info": "NED request timed out: {0}".format(str(err))}
except Exception as err:
current_app.logger.error("NED request to %s failed (%s)"%(ned_url, err))
return {"Error": "Unable to get results!", "Error Info": "NED request failed ({0})".format(err)}
# Check if we got a 200 status code back
if r.status_code != 200:
current_app.logger.info('NED request to %s failed! Status code: %s'%(ned_url, r.status_code))
return {"Error": "Unable to get results!", "Error Info": "NED returned status %s" % r.status_code}
# We got a proper response back with data
ned_data = r.json()
# We are not interested in these cases: either not a valid object name, or a known one, but there
# is no entry in the NED database
if ned_data['ResultCode'] in [0,2]:
# No or no unique result
continue
# This is still a useless case, but we want to send some data back for potential use. These are
# "ambiguous" cases, where there are a number of potential candidates. This info is sent back.
elif ned_data['ResultCode'] == 1:
result['ambiguous'].append({object_name:ned_data['Interpreted']['Aliases']})
else:
# We have a canonical name. Store it in the appropriate list, so that we can query Solr with
# it and retrieve bibcodes
canonicals.append(ned_data['Preferred']['Name'].strip())
# We retrieve bibcodes with one Solr query, using "nedid:" (we use canonical object names as identifiers,
# with spaces replaced by underscores)
obj_list = " OR ".join(["nedid:%s" % a.replace(' ','_') for a in canonicals])
q = '%s' % obj_list
# Format the date range for filtering: year:YYYY-YYYY
date_range = "{0}-{1}".format(obj_data.get('start_year', str(1800)), obj_data.get('end_year', str(datetime.datetime.now().year)))
q += ' year:{0}'.format(date_range)
# Did we get a bibstem filter?
if 'journals' in obj_data:
jrnl_list = " OR ".join(["%s" % a for a in obj_data['journals']])
q += ' bibstem:({0})'.format(jrnl_list)
# Do we want refereed publications only?
if 'refereed_status' in obj_data:
q += ' property:{0}'.format(obj_data['refereed_status'])
# Get the information from Solr
headers = {'X-Forwarded-Authorization': request.headers.get('Authorization')}
params = {'wt': 'json', 'q': q, 'fl': 'bibcode',
'rows': current_app.config.get('OBJECT_SOLR_MAX_HITS')}
response = client().get(current_app.config.get('OBJECTS_SOLRQUERY_URL'), params=params,headers=headers)
# See if our request was successful
if response.status_code != 200:
return {"Error": "Unable to get results!",
"Error Info": response.text,
"Status Code": response.status_code}
# Retrieve the bibcodes from the data sent back. Throw an error if no bibcodes were found (should not happen)
resp = response.json()
try:
result['data'] = [d['bibcode'] for d in resp['response']['docs']]
return result
except:
return {"Error": "Unable to get results!", "Error Info": "No bibcodes returned for query: {0}".format(q)}
| |
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import glob
import os
import time
import urllib2
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passedf to the NFS client. See section '
'of the nfs man page for details'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._get_hypervisor_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in specs.iteritems():
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
return self.get_config(connection_info, disk_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = netdisk_properties['secret_type']
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
{'command': iscsi_command, 'out': out, 'err': err})
# NOTE(bpokorny): iscsi_command can contain passwords so we need to
# sanitize the password in the message.
LOG.debug(logging.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
#
# as we are parsing a command line utility, allow for the
# possibility that additional debug data is spewed in the
# stream, and only grab actual ip / iqn lines.
targets = []
for data in [line.split() for line in output.splitlines()]:
if len(data) == 2 and data[1].startswith('iqn.'):
targets.append(data)
return targets
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['host_device']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
if self.use_multipath:
# multipath installed, discovering other targets if available
# multipath should be configured on the nova-compute node,
# in order to fit storage vendor
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_LW("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev, 'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
# we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
connection_info['data']['host_device'] = host_device
return self.get_config(connection_info, disk_info)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except processutils.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_LW('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# duplicate logins crash iscsiadm after load,
# so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
# as this might be one of many paths,
# only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath(['-r'], check_exit_code=[0, 1, 21])
def _get_host_device(self, iscsi_properties):
return ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
def _reconnect(self, iscsi_properties):
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
def _get_host_device(self, iser_properties):
time.sleep(1)
host_device = None
device = ("ip-%s-iscsi-%s-lun-%s" %
(iser_properties['target_portal'],
iser_properties['target_iqn'],
iser_properties.get('target_lun', 0)))
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iser_properties):
self._run_iscsiadm(iser_properties,
('--interface', 'iser', '--op', 'new'))
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
return self.get_config(connection_info, disk_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options is not None:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtAOEVolumeDriver,
self).get_config(connection_info, disk_info)
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
conf.source_type = "block"
conf.source_path = aoedevpath
return conf
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
# NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath, 'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
return self.get_config(connection_info, mount_device)
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(data['export']))
path = os.path.join(path, data['name'])
conf.source_type = 'file'
conf.source_path = path
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, mount_device):
data = connection_info['data']
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(data['export'], data.get('options'))
return self.get_config(connection_info, mount_device)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
ports = fc_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(str(wwn))
elif isinstance(ports, six.string_types):
wwns.append(str(ports))
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = libvirt_utils.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device, 'tries': tries})
linuxscsi.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = [device_info]
return self.get_config(connection_info, disk_info)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s", devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtScalityVolumeDriver,
self).get_config(connection_info, disk_info)
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
conf.source_type = 'file'
conf.source_path = path
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
return self.get_config(connection_info, disk_info)
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _LW("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _LW("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _LW("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _LW("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
| |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.utils.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.utils import metrics
import tensorflow as tf
class MetricsTest(tf.test.TestCase):
def testAccuracyMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
scores, _ = metrics.padded_accuracy(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertAlmostEqual(actual, expected)
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
def testSequenceAccuracyMetric(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
expected = np.mean(
np.prod((predictions == targets).astype(float), axis=(1, 2)))
with self.test_session() as session:
scores, _ = metrics.padded_sequence_accuracy(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual, expected)
def testRMSEMetric(self):
predictions = np.full((10, 1), 1) # All 1's
targets = np.full((10, 1), 3) # All 3's
expected = np.sqrt(np.mean((predictions - targets)**2)) # RMSE = 2.0
with self.test_session() as session:
rmse, _ = metrics.padded_rmse(
tf.constant(predictions, dtype=tf.int32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual = session.run(rmse)
self.assertEqual(actual, expected)
def testSequenceEditDistanceMetric(self):
predictions = np.array([[3, 4, 5, 1, 0, 0],
[2, 1, 3, 4, 0, 0],
[2, 1, 3, 4, 0, 0]])
# Targets are just a bit different:
# - first sequence has a different prediction
# - second sequence has a different prediction and one extra step
# - third sequence is identical
targets = np.array([[5, 4, 5, 1, 0, 0],
[2, 5, 3, 4, 1, 0],
[2, 1, 3, 4, 0, 0]])
# Reshape to match expected input format by metric fns.
predictions = np.reshape(predictions, [3, 6, 1, 1])
targets = np.reshape(targets, [3, 6, 1, 1])
with self.test_session() as session:
scores, weight = metrics.sequence_edit_distance(
tf.one_hot(predictions, depth=6, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual_scores, actual_weight = session.run([scores, weight])
self.assertAlmostEqual(actual_scores, 3.0 / 13)
self.assertEqual(actual_weight, 13)
def testNegativeLogPerplexity(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual.shape, ())
def testSigmoidAccuracyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[-1., 1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[1, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.5)
def testSigmoidPrecisionOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_precision_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidRecallOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_recall_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidCrossEntropyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[0, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.688, places=3)
def testRocAuc(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[1],
[0],
[1],
[0]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.roc_auc(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.750, places=3)
def testMultilabelMatch3(self):
predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
targets *= weights
predictions_repeat = np.repeat(predictions, 10, axis=1)
expected = (predictions_repeat == targets).astype(float)
expected = np.sum(expected, axis=(1, 2, 3))
expected = np.minimum(expected / 3.0, 1.)
expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
with self.test_session() as session:
scores, weights_ = metrics.multilabel_accuracy_match3(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a, a_op = tf.metrics.mean(scores, weights_)
session.run(tf.local_variables_initializer())
session.run(tf.global_variables_initializer())
_ = session.run(a_op)
actual = session.run(a)
self.assertAlmostEqual(actual, expected, places=6)
if __name__ == '__main__':
tf.test.main()
| |
from __future__ import print_function
import os
import time
import json
import argparse
import densenet
import numpy as np
import keras.backend as K
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.utils import np_utils
def run_cifar10(batch_size,
nb_epoch,
depth,
nb_dense_block,
nb_filter,
growth_rate,
dropout_rate,
learning_rate,
weight_decay,
plot_architecture):
""" Run CIFAR10 experiments
:param batch_size: int -- batch size
:param nb_epoch: int -- number of training epochs
:param depth: int -- network depth
:param nb_dense_block: int -- number of dense blocks
:param nb_filter: int -- initial number of conv filter
:param growth_rate: int -- number of new filters added by conv layers
:param dropout_rate: float -- dropout rate
:param learning_rate: float -- learning rate
:param weight_decay: float -- weight decay
:param plot_architecture: bool -- whether to plot network architecture
"""
###################
# Data processing #
###################
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = len(np.unique(y_train))
img_dim = X_train.shape[1:]
if K.image_dim_ordering() == "th":
n_channels = X_train.shape[1]
else:
n_channels = X_train.shape[-1]
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Normalisation
X = np.vstack((X_train, X_test))
# 2 cases depending on the image ordering
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean = np.mean(X[:, i, :, :])
std = np.std(X[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean = np.mean(X[:, :, :, i])
std = np.std(X[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std
###################
# Construct model #
###################
model = densenet.DenseNet(nb_classes,
img_dim,
depth,
nb_dense_block,
growth_rate,
nb_filter,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Model output
model.summary()
# Build optimizer
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=["accuracy"])
if plot_architecture:
from keras.utils.visualize_util import plot
plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)
####################
# Network training #
####################
print("Training")
list_train_loss = []
list_test_loss = []
list_learning_rate = []
for e in range(nb_epoch):
if e == int(0.5 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))
if e == int(0.75 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))
split_size = batch_size
num_splits = X_train.shape[0] / split_size
arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)
l_train_loss = []
start = time.time()
for batch_idx in arr_splits:
X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)
l_train_loss.append([train_logloss, train_acc])
test_logloss, test_acc = model.evaluate(X_test,
Y_test,
verbose=0,
batch_size=64)
list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
list_test_loss.append([test_logloss, test_acc])
list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
# to convert numpy array to json serializable
print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
d_log = {}
d_log["batch_size"] = batch_size
d_log["nb_epoch"] = nb_epoch
d_log["optimizer"] = opt.get_config()
d_log["train_loss"] = list_train_loss
d_log["test_loss"] = list_test_loss
d_log["learning_rate"] = list_learning_rate
json_file = os.path.join('./log/experiment_log_cifar10.json')
with open(json_file, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CIFAR10 experiment')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--depth', type=int, default=7,
help='Network depth')
parser.add_argument('--nb_dense_block', type=int, default=1,
help='Number of dense blocks')
parser.add_argument('--nb_filter', type=int, default=16,
help='Initial number of conv filters')
parser.add_argument('--growth_rate', type=int, default=12,
help='Number of new filters added by conv layers')
parser.add_argument('--dropout_rate', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('--learning_rate', type=float, default=1E-3,
help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=1E-4,
help='L2 regularization on weights')
parser.add_argument('--plot_architecture', type=bool, default=False,
help='Save a plot of the network architecture')
args = parser.parse_args()
print("Network configuration:")
for name, value in parser.parse_args()._get_kwargs():
print(name, value)
list_dir = ["./log", "./figures"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
run_cifar10(args.batch_size,
args.nb_epoch,
args.depth,
args.nb_dense_block,
args.nb_filter,
args.growth_rate,
args.dropout_rate,
args.learning_rate,
args.weight_decay,
args.plot_architecture)
| |
import json
import logging
import os
import subprocess
import textwrap
import uuid
import boto3
import passlib.hash
import pytest
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.12-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(release_config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
@pytest.fixture
def valid_storage_config(release_config_aws):
""" Uses the settings from dcos-release.config.yaml ['testing'] to create a
new upload and then deletes it when the test is over
"""
s3_bucket_name = release_config_aws['bucket']
bucket_path = str(uuid.uuid4())
yield """---
master_list:
- 127.0.0.1
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: {bucket_path}
aws_template_upload: true
""".format(
bucket=release_config_aws['bucket'],
bucket_path=bucket_path)
session = boto3.session.Session()
s3 = session.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
for o in s3_bucket.objects.filter(Prefix=bucket_path):
o.delete()
def test_do_aws_cf_configure_valid_storage_config(release_config_aws, valid_storage_config, tmpdir, monkeypatch):
assert aws_cf_configure(valid_storage_config, tmpdir, monkeypatch) == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
def test_override_aws_template_storage_region_name(release_config_aws, valid_storage_config, tmpdir, monkeypatch):
config_str = valid_storage_config
config_str += '\naws_template_storage_region_name: {}'.format(os.environ['AWS_DEFAULT_REGION'])
assert aws_cf_configure(config_str, tmpdir, monkeypatch) == 0
def aws_cf_configure(config, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
return backend.do_aws_cf_configure()
def test_do_configure_valid_config_no_duplicate_logging(tmpdir, monkeypatch, caplog):
"""
Log messages are logged exactly once.
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
# The message comes from gen.get_dcosconfig_source_target_and_templates() function
expected_message = 'Generating configuration files...'
filtered_messages = [rec.message for rec in caplog.records if rec.message == expected_message]
assert [expected_message] == filtered_messages
def test_do_configure_logs_validation_errors(tmpdir, monkeypatch, caplog):
"""
Configuration validation errors are logged as `error` messages.
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
invalid_config = textwrap.dedent("""---
cluster_name: DC/OS
master_discovery: static
# Remove `exhibitor_storage_backend` from configuration
# exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
""")
create_config(invalid_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 1
expected_error_message = (
'exhibitor_storage_backend: Must set exhibitor_storage_backend, '
'no way to calculate value.'
)
error_logs = [rec for rec in caplog.records if rec.message == expected_error_message]
assert len(error_logs) == 1
error_log = error_logs[0]
assert error_log.levelno == logging.ERROR
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| |
"""
Setup logging handler to write syslog-like messages to file or to syslog.
Logging handlers can be set upto write to a file or to the system syslog
facility. If writing to a file, then file rotation parameters are
configurable.
The logging handler is created and configured by calling one of the functions
in this module, and is then added to the logger specified by the base_name
argument.
All modules in a python program can use the logger and handler that is setup by
this module by doing the following:
import logging
logger = logging.getLogger(LOGGER_NAME)
In only one place, when the program first starts, does the logger need to be
configured with a handler by one of the functions in this module.
"""
__author__ = "Andrew Gillis"
import socket
import logging
import logging.handlers
def setup_file_logging(
base_name, log_path=None, log_level=logging.INFO, show_name=False,
log_perror=False, rotations=None, rotate_kb=None, rotate_time=None):
"""Configure and add file logging handler to logger with base_name.
The log file written to is defined by the log_path parameter.
If log_path is None, then log messages are sent to stdout.
If rotations is set to a value other that None, the log rotation is
enabled. If rotate_size is specified, then the log file is rotated
when it is near the specified size (in kB). If rotate_time is
specified (specified as a string 'when,interval'), then the log file is
rotated on the given interval. A log file may rotate by size or by
time, but not both. If both rotate_kb and rotate_time are specified,
then rotate_time is ignored. If both rotate_kb and rotate_time are
None, then the log file will not be rotated.
Arguments:
base_name -- Name of base logger (lowest in hierarchy).
log_path -- Path/name of file to write log entries to.
log_level -- Level value or name. Write entries at or below this level.
rotations -- Number of rotated log files to keep.
rotate_kb -- Rotate when log file reaches this size.
rotate_time -- Rotate log file on this interval.
log_perror -- Write log entries to stderr as well as file.
show_name -- Logger name is included in log message. This is useful when
using a naming hierarchy to distinguish different sources of
of log message. For example, base_name.moduel_name.
Returns:
logger object. This logger object, or its descendents, can be retrieved by
calling, logging.getLogger(logger_name), where logger_name is a
hierarchical name beginning with the base_name, and having child name(s)
separated by '.' (dot).
"""
logger = logging.getLogger(base_name)
logger.setLevel(_get_log_level_value(log_level))
# Setup logging format
if show_name:
# Show logger name in log message.
fmt='%(asctime)s %(name)s %(levelname)-8s %(message)s'
else:
fmt='%(asctime)s %(levelname)-8s %(message)s'
log_formatter = logging.Formatter(fmt=fmt, datefmt='%c')
# If logging to file.
if log_path:
file_handler = None
if rotations is not None:
assert(isinstance(rotations, int) and rotations >= 0)
# If rotating at specified size.
if rotate_kb:
file_handler = logging.handlers.RotatingFileHandler(
log_path, 'a', int(rotate_kb) << 10, rotations)
elif rotate_time:
# If rotating at specified interval.
when, interval = rotate_time.lower.split(',', 1)
assert(when in ('s','m','h','d','w','midnight'))
interval = int(interval)
file_handler = logging.handlers.TimedRotatingFileHandler(
log_path, when, interval, rotations)
if file_handler is None:
file_handler = logging.FileHandler(log_path, 'a')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
else:
# No log file specified, so log to stderr.
log_perror = True
# If logging to stderr enabled, then create additional stream handler.
if log_perror:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
logger.addHandler(stream_handler)
return logger
def setup_syslog_logging(
base_name, log_level=logging.INFO, show_name=False, log_perror=False,
address=None, port=None, facility=None, socktype=None):
"""Configure and add syslog logging handler to logger with base_name.
Arguments:
base_name -- Name of base logger (lowest in hierarchy).
log_level -- Level value or name. Write entries at or below this level.
address -- Syslog address, or path to unix domain socket.
port -- Port number if socktype is 'udp' or 'tcp'.
facility -- Syslog facility name. Defaults to LOG_USER.
socktype -- One of: 'unix', 'udp', 'tcp'. Defaults to 'unix'.
log_perror -- Write log entries to stderr as well as syslog.
show_name -- Logger name is included in log message. This is useful when
using a naming hierarchy to distinguish different sources of
of log message. For example, base_name.moduel_name.
Returns:
logger object. This logger object, or its descendents, can be retrieved by
calling, logging.getLogger(logger_name), where logger_name is a
hierarchical name beginning with the base_name, and having child name(s)
separated by '.' (dot).
"""
logger = logging.getLogger(base_name)
logger.setLevel(_get_log_level_value(log_level))
if facility is not None:
facility_num = logging.handlers.SysLogHandler.facility_names.get(
facility)
if facility_num is None:
names = logging.handlers.SysLogHandler.facility_names.iterkeys()
raise Exception('ERROR: unsupported syslog facility. Must be '
'one of: %s' % ', '.join(names))
else:
facility_num = logging.handlers.SysLogHandler.LOG_USER
if not socktype or socktype == 'unix':
if not address:
address = '/dev/log'
else:
address = str(address)
syslog_handler = logging.handlers.SysLogHandler(
address=address, facility=facility_num)
else:
if socktype == 'udp':
if not port:
port = logging.handlers.SysLogHandler.SYSLOG_UDP_PORT
socktype = socket.SOCK_DGRAM
elif socktype == 'tcp':
if not port:
port = logging.handlers.SysLogHandler.SYSLOG_TCP_PORT
socktype = socket.SOCK_STREAM
else:
raise Exception('ERROR: unsupported socket type: %s'%socktype)
if not address:
address = 'localhost'
syslog_handler = logging.handlers.SysLogHandler(
address=(address, int(port)), facility=facility_num,
socktype=socktype)
# Setup logging format
if show_name:
# Show logger name in log message.
fmt = base_name + ': %(name)s %(levelname)-8s %(message)s'
else:
fmt = base_name + ': %(levelname)-8s %(message)s'
log_formatter = logging.Formatter(fmt=fmt, datefmt='%c')
syslog_handler.setFormatter(log_formatter)
logger.addHandler(syslog_handler)
# If logging to stderr, then create additional stderr stream handler.
if log_perror:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
logger.addHandler(stream_handler)
return logger
def _get_log_level_value(log_level):
if not isinstance(log_level, str):
# Log level is not string, so make sure value is int.
return int(log_level)
# Convert level name to value.
log_level = log_level.lower()
if log_level == 'debug':
return logging.DEBUG
if log_level == 'info':
return logging.INFO
if log_level == 'warning':
return logging.WARNING
if log_level == 'error':
return logging.ERROR
if log_level == 'critical':
return logging.CRITICAL
raise Exception('ERROR: unsupported log level: %s' % log_level)
| |
#!/usr/bin/env python3
"""
This file is part of Mbed TLS (https://tls.mbed.org)
Copyright (c) 2018, Arm Limited, All Rights Reserved
Purpose
This script checks the current state of the source code for minor issues,
including incorrect file permissions, presence of tabs, non-Unix line endings,
trailing whitespace, presence of UTF-8 BOM, and TODO comments.
Note: requires python 3, must be run from Mbed TLS root.
"""
import os
import argparse
import logging
import codecs
import sys
class FileIssueTracker(object):
"""Base class for file-wide issue tracking.
To implement a checker that processes a file as a whole, inherit from
this class and implement `check_file_for_issue` and define ``heading``.
``files_exemptions``: files whose name ends with a string in this set
will not be checked.
``heading``: human-readable description of the issue
"""
files_exemptions = frozenset()
# heading must be defined in derived classes.
# pylint: disable=no-member
def __init__(self):
self.files_with_issues = {}
def should_check_file(self, filepath):
for files_exemption in self.files_exemptions:
if filepath.endswith(files_exemption):
return False
return True
def check_file_for_issue(self, filepath):
raise NotImplementedError
def record_issue(self, filepath, line_number):
if filepath not in self.files_with_issues.keys():
self.files_with_issues[filepath] = []
self.files_with_issues[filepath].append(line_number)
def output_file_issues(self, logger):
if self.files_with_issues.values():
logger.info(self.heading)
for filename, lines in sorted(self.files_with_issues.items()):
if lines:
logger.info("{}: {}".format(
filename, ", ".join(str(x) for x in lines)
))
else:
logger.info(filename)
logger.info("")
class LineIssueTracker(FileIssueTracker):
"""Base class for line-by-line issue tracking.
To implement a checker that processes files line by line, inherit from
this class and implement `line_with_issue`.
"""
def issue_with_line(self, line, filepath):
raise NotImplementedError
def check_file_line(self, filepath, line, line_number):
if self.issue_with_line(line, filepath):
self.record_issue(filepath, line_number)
def check_file_for_issue(self, filepath):
with open(filepath, "rb") as f:
for i, line in enumerate(iter(f.readline, b"")):
self.check_file_line(filepath, line, i + 1)
class PermissionIssueTracker(FileIssueTracker):
"""Track files with bad permissions.
Files that are not executable scripts must not be executable."""
heading = "Incorrect permissions:"
def check_file_for_issue(self, filepath):
is_executable = os.access(filepath, os.X_OK)
should_be_executable = filepath.endswith((".sh", ".pl", ".py"))
if is_executable != should_be_executable:
self.files_with_issues[filepath] = None
class EndOfFileNewlineIssueTracker(FileIssueTracker):
"""Track files that end with an incomplete line
(no newline character at the end of the last line)."""
heading = "Missing newline at end of file:"
def check_file_for_issue(self, filepath):
with open(filepath, "rb") as f:
if not f.read().endswith(b"\n"):
self.files_with_issues[filepath] = None
class Utf8BomIssueTracker(FileIssueTracker):
"""Track files that start with a UTF-8 BOM.
Files should be ASCII or UTF-8. Valid UTF-8 does not start with a BOM."""
heading = "UTF-8 BOM present:"
def check_file_for_issue(self, filepath):
with open(filepath, "rb") as f:
if f.read().startswith(codecs.BOM_UTF8):
self.files_with_issues[filepath] = None
class LineEndingIssueTracker(LineIssueTracker):
"""Track files with non-Unix line endings (i.e. files with CR)."""
heading = "Non Unix line endings:"
def issue_with_line(self, line, _filepath):
return b"\r" in line
class TrailingWhitespaceIssueTracker(LineIssueTracker):
"""Track lines with trailing whitespace."""
heading = "Trailing whitespace:"
files_exemptions = frozenset(".md")
def issue_with_line(self, line, _filepath):
return line.rstrip(b"\r\n") != line.rstrip()
class TabIssueTracker(LineIssueTracker):
"""Track lines with tabs."""
heading = "Tabs present:"
files_exemptions = frozenset([
"Makefile",
"generate_visualc_files.pl",
])
def issue_with_line(self, line, _filepath):
return b"\t" in line
class MergeArtifactIssueTracker(LineIssueTracker):
"""Track lines with merge artifacts.
These are leftovers from a ``git merge`` that wasn't fully edited."""
heading = "Merge artifact:"
def issue_with_line(self, line, _filepath):
# Detect leftover git conflict markers.
if line.startswith(b'<<<<<<< ') or line.startswith(b'>>>>>>> '):
return True
if line.startswith(b'||||||| '): # from merge.conflictStyle=diff3
return True
if line.rstrip(b'\r\n') == b'=======' and \
not _filepath.endswith('.md'):
return True
return False
class TodoIssueTracker(LineIssueTracker):
"""Track lines containing ``TODO``."""
heading = "TODO present:"
files_exemptions = frozenset([
os.path.basename(__file__),
"benchmark.c",
"pull_request_template.md",
])
def issue_with_line(self, line, _filepath):
return b"todo" in line.lower()
class IntegrityChecker(object):
"""Sanity-check files under the current directory."""
def __init__(self, log_file):
"""Instantiate the sanity checker.
Check files under the current directory.
Write a report of issues to log_file."""
self.check_repo_path()
self.logger = None
self.setup_logger(log_file)
self.files_to_check = (
".c", ".h", ".sh", ".pl", ".py", ".md", ".function", ".data",
"Makefile", "CMakeLists.txt", "ChangeLog"
)
self.excluded_directories = ['.git', 'mbed-os']
self.excluded_paths = list(map(os.path.normpath, [
'cov-int',
'examples',
]))
self.issues_to_check = [
PermissionIssueTracker(),
EndOfFileNewlineIssueTracker(),
Utf8BomIssueTracker(),
LineEndingIssueTracker(),
TrailingWhitespaceIssueTracker(),
TabIssueTracker(),
MergeArtifactIssueTracker(),
TodoIssueTracker(),
]
@staticmethod
def check_repo_path():
if not all(os.path.isdir(d) for d in ["include", "library", "tests"]):
raise Exception("Must be run from Mbed TLS root")
def setup_logger(self, log_file, level=logging.INFO):
self.logger = logging.getLogger()
self.logger.setLevel(level)
if log_file:
handler = logging.FileHandler(log_file)
self.logger.addHandler(handler)
else:
console = logging.StreamHandler()
self.logger.addHandler(console)
def prune_branch(self, root, d):
if d in self.excluded_directories:
return True
if os.path.normpath(os.path.join(root, d)) in self.excluded_paths:
return True
return False
def check_files(self):
for root, dirs, files in os.walk("."):
dirs[:] = sorted(d for d in dirs if not self.prune_branch(root, d))
for filename in sorted(files):
filepath = os.path.join(root, filename)
if not filepath.endswith(self.files_to_check):
continue
for issue_to_check in self.issues_to_check:
if issue_to_check.should_check_file(filepath):
issue_to_check.check_file_for_issue(filepath)
def output_issues(self):
integrity_return_code = 0
for issue_to_check in self.issues_to_check:
if issue_to_check.files_with_issues:
integrity_return_code = 1
issue_to_check.output_file_issues(self.logger)
return integrity_return_code
def run_main():
parser = argparse.ArgumentParser(
description=(
"This script checks the current state of the source code for "
"minor issues, including incorrect file permissions, "
"presence of tabs, non-Unix line endings, trailing whitespace, "
"presence of UTF-8 BOM, and TODO comments. "
"Note: requires python 3, must be run from Mbed TLS root."
)
)
parser.add_argument(
"-l", "--log_file", type=str, help="path to optional output log",
)
check_args = parser.parse_args()
integrity_check = IntegrityChecker(check_args.log_file)
integrity_check.check_files()
return_code = integrity_check.output_issues()
sys.exit(return_code)
if __name__ == "__main__":
run_main()
| |
'''
API for the laws app
'''
import logging
from django.core.urlresolvers import reverse
from tastypie.constants import ALL
import tastypie.fields as fields
from agendas.templatetags.agendas_tags import agendas_for
from apis.resources.base import BaseResource
from mks.models import Member, Party
from mks.api import MemberResource
from video.utils import get_videos_queryset
from video.api import VideoResource
from links.models import Link
from links.api import LinkResource
from models import Law, Bill, Vote, VoteAction, PrivateProposal
from simple.management.commands.syncdata_globals import p_explanation
from agendas.models import AgendaVote
from datetime import datetime, timedelta
logger = logging.getLogger("open-knesset.laws.api")
class LawResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = Law.objects.all()
allowed_methods = ['get']
class VoteActionResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = VoteAction.objects.all()
allowed_methods = ['get']
excludes = ['type', 'id']
include_resource_uri = False
filtering = {
'against_own_bill': ALL,
}
list_fields = [
'member', 'party', 'vote', 'against_party', 'against_coalition', 'against_opposition', 'against_own_bill',
'member_title', 'vote_title', 'member_url', 'vote_url', 'vote_time'
]
vote_type = fields.CharField('type', null=True)
member = fields.ToOneField(MemberResource, 'member', full=False)
party = fields.ToOneField('mks.api.PartyResource', 'party', full=False)
vote = fields.ToOneField('laws.api.VoteResource', 'vote', full=False)
member_title = fields.CharField('member')
vote_title = fields.CharField('vote')
member_url = fields.CharField('member__get_absolute_url')
vote_url = fields.CharField('vote__get_absolute_url')
vote_time = fields.DateTimeField('vote__time')
class VoteResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = Vote.objects.all()
allowed_methods = ['get']
list_fields = [
'time', 'title', 'vote_type', 'votes_count', 'for_votes_count',
'against_votes_count', 'meeting_number', 'vote_number',
'importance', 'controversy', 'against_party ', 'against_coalition',
'against_opposition', 'against_own_bill',
]
filtering = dict(tag=('exact'),
member=ALL,
member_for=ALL,
member_against=ALL,
from_date=ALL,
to_date=ALL)
votes = fields.ToManyField(VoteActionResource,
attribute=lambda bundle: VoteAction.objects.filter(
vote=bundle.obj).select_related('member'),
null=True,
full=True)
agendas = fields.ListField()
tags = fields.ToManyField('auxiliary.api.TagResource',
attribute=lambda t: t.obj.tags,
null=True,
full=False)
def build_filters(self, filters={}):
orm_filters = super(VoteResource, self).build_filters(filters)
if 'member' in filters:
orm_filters["voteaction__member"] = filters['member']
if 'member_for' in filters:
orm_filters["voteaction__member"] = filters['member_for']
orm_filters["voteaction__type"] = 'for'
if 'member_against' in filters:
orm_filters["voteaction__member"] = filters['member_against']
orm_filters["voteaction__type"] = 'against'
if 'tag' in filters:
# hard-coded the __in filter. not great, but works.
orm_filters["tagged_items__tag__in"] = \
filters["tag"].split(',')
if 'from_date' in filters and filters.get('from_date'):
orm_filters["time__gte"] = filters['from_date']
if 'to_date' in filters:
# the to_date needs to be incremented by a day since when humans say to_date=2014-07-30 they
# actually mean midnight between 30 to 31. python on the other hand interperts this as midnight between
# 29 and 30
to_date = datetime.strptime(filters["to_date"], "%Y-%M-%d") + timedelta(days=1)
orm_filters["time__lte"] = to_date.strftime("%Y-%M-%d")
return orm_filters
def dehydrate_agendas(self, bundle):
agendavotes = bundle.obj.agendavotes.select_related('agenda')
result = []
for avote in agendavotes:
agenda = avote.agenda
resource_uri = reverse(
'api_dispatch_detail',
kwargs={
'resource_name': 'agenda', 'api_name': 'v2',
'pk': agenda.pk})
agenda_bundle = {
'name': agenda.name,
'image': agenda.image.url if agenda.image else None,
'resource_uri': resource_uri,
'score': avote.score,
'importance': avote.importance,
'reasoning': avote.reasoning,
}
result.append(agenda_bundle)
return result
class PrivateProposalResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = PrivateProposal.objects.all()
allowed_methods = ['get']
class BillAgendaResource(BaseResource): pass
def detailed_agendas(agenda_list):
result = []
for xagenda in agenda_list:
agenda = xagenda.agenda
resource_uri = reverse(
'api_dispatch_detail',
kwargs={
'resource_name': 'agenda', 'api_name': 'v2',
'pk': agenda.pk})
result.append({
'name': agenda.name,
'image': agenda.image.url if agenda.image else None,
'resource_uri': resource_uri,
'public_owner_name': agenda.public_owner_name,
'reasoning': xagenda.reasoning,
'score': xagenda.score,
'importance': xagenda.importance,
})
return result
class BillResource(BaseResource):
''' Bill API '''
class Meta(BaseResource.Meta):
queryset = Bill.objects.all()
allowed_methods = ['get']
ordering = ['stage_date', 'title']
filtering = dict(stage=ALL, proposer=ALL, title=ALL, full_title=ALL)
list_fields = [
'title', 'full_title', 'popular_name', 'law', 'stage',
'stage_date'
]
include_absolute_url = True
limit = 20
explanation = fields.CharField()
legal_code = fields.CharField()
proposers = fields.ToManyField(MemberResource,
'proposers',
full=False)
pre_votes = fields.ToManyField(VoteResource,
'pre_votes',
null=True,
full=False)
first_vote = fields.ToOneField(VoteResource,
'first_vote',
null=True,
full=False)
approval_vote = fields.ToOneField(VoteResource,
'approval_vote',
null=True,
full=False)
proposals = fields.ToManyField(PrivateProposalResource,
'proposals',
null=True,
full=True)
tags = fields.ToManyField('auxiliary.api.TagResource',
attribute=lambda t: t.obj.tags,
null=True,
full=False)
# XXX : this adds the following select phrases
# [sql] SELECT ...
# FROM "agendas_agendabill"
# WHERE ("agendas_agendabill"."agenda_id" IN
# (SELECT ...
# FROM "agendas_agenda"
# WHERE "agendas_agenda"."is_public" = TRUE)
# AND "agendas_agendabill"."bill_id" = XXX)
# [sql] SELECT ...
# FROM "agendas_agenda"
# WHERE "agendas_agenda"."id" = YYY
agendas = fields.ToManyField(BillAgendaResource,
'agendas',
null=True,
full=False)
def dehydrate_agendas(self, bundle):
result = None
try:
result = dict()
# fast-written and ugly code
agendas_detailes = agendas_for(bundle.request.user, bundle.obj, 'bill')
result["agenda_list"] = detailed_agendas(agendas_detailes["agendas"])
result["suggest_agendas"] = agendas_detailes[
"suggest_agendas"] # XXX : should i call detailed_agendas here too?
# XXX : there was no data examples here and i dont understand the data-structure that good. there should probably be a special handling for forms
result["formset"] = agendas_detailes["formset"]
result["suggested_agendas"] = agendas_detailes[
"suggested_agendas"] # XXX : should i call detailed_agendas here three?
result["suggest_agendas_login"] = agendas_detailes["suggest_agendas_login"]
return result
except:
logging.error('Got exception dehydrating agendas')
return None
def dehydrate_explanation(self, bundle):
result = None
try:
result = self.get_src_parts(bundle)[1]
except:
logging.error('Got exception dehydrating explanation')
return ""
# TODO: do we need this here????
# return result
def dehydrate_legal_code(self, bundle):
return self.get_src_parts(bundle)[0]
def dehydrate_stage(self, bundle):
return bundle.obj.get_stage_display()
def get_src_parts(self, bundle):
try:
return bundle.src_parts
except AttributeError:
parts = ['', '']
bill = bundle.obj
try:
ps = bill.proposals.order_by('-date')[0]
if ps.content_html:
parts = ps.content_html.split(p_explanation)
except IndexError:
pass
bundle.src_parts = parts
return parts
def build_filters(self, filters={}):
orm_filters = super(BillResource, self).build_filters(filters)
if 'proposer' in filters:
orm_filters["proposers"] = filters['proposer']
return orm_filters
| |
# Main help from http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm
# also see http://effbot.org/zone/simple-top-down-parsing.htm
from abc import ABCMeta, abstractproperty, abstractmethod
import re
from logic.language import PropositionalConstant
from logic.syntax import SimpleSentence, Negation, Conjunction, Disjunction, \
Equivalence, Implication, Reduction
def parse(text):
return parse_program(tokenise(text))
def parse_program(tokens):
initial_state = ParseState(tokens, [SentinelToken()], [])
if initial_state.next_token == EndToken():
raise ParsingError("Empty expression")
expression_state = expression(initial_state)
completed_state = expect(expression_state, EndToken())
return completed_state.next_operand
def tokenise(text):
token_pattern = r"""
(?P<constant>[a-z]{1}[a-zA-Z0-9\_]*)
|(?P<negation>\-{1})
|(?P<conjunction>\^{1})
|(?P<disjunction>\|{1})
|(?P<equivalence>\<\=\>{1})
|(?P<implication>\=\>{1})
|(?P<reduction>\<\={1})
|(?P<left_parenthesis>\({1})
|(?P<right_parenthesis>\){1})
|(?P<invalid>[^\s]+)
"""
return tuple([
create_token(m.lastgroup, m.group(m.lastgroup), m.start(m.lastgroup))
for m in re.finditer(token_pattern, text, re.VERBOSE)
] + [EndToken()])
def create_token(token_type, value, position):
simple_type_map = {
"negation": NegationToken,
"conjunction": ConjunctionToken,
"disjunction": DisjunctionToken,
"equivalence": EquivalenceToken,
"implication": ImplicationToken,
"reduction": ReductionToken,
"left_parenthesis": LeftParenthesisToken,
"right_parenthesis": RightParenthesisToken
}
constructor = simple_type_map.get(token_type)
if constructor is not None:
return constructor()
elif token_type == 'constant':
return ConstantToken(value)
else:
raise TokenisationError(
"Unrecognised token: %s > %r at position %s" %
(token_type, value, position)
)
class ParsingError(Exception):
pass
class TokenisationError(ParsingError):
pass
class ParsingSyntaxError(ParsingError):
pass
class ParseState(object):
def __init__(self, tokens, operators, operands):
self._tokens = tuple(tokens)
self._operators = tuple(operators)
self._operands = tuple(operands)
@property
def next_token(self):
assert len(self._tokens) > 0, "No tokens left"
return self._tokens[0]
@property
def next_operand(self):
assert len(self._operands) > 0, "No operands left"
return self._operands[0]
@property
def next_operator(self):
assert len(self._operators) > 0, "No operators left"
return self._operators[0]
def next_operands(self, amount):
return tuple(self._operands)[:amount]
def pop_operand(self):
return self.pop_operands(1)
def pop_operands(self, amount):
new_operands = self._operands[amount:]
return ParseState(self._tokens, self._operators, new_operands)
def pop_operator(self):
return ParseState(self._tokens, self._operators[1:], self._operands)
def push_operator(self, operator):
new_operators = (operator, ) + self._operators
return ParseState(self._tokens, new_operators, self._operands)
def push_operand(self, operand):
new_operands = (operand, ) + self._operands
return ParseState(self._tokens, self._operators, new_operands)
def consume_token(self):
return ParseState(self._tokens[1:], self._operators, self._operands)
def expression(state):
state = progress(state)
while state.next_token.is_binary:
state = push_operator(state.next_token, state)
state = progress(state.consume_token())
more_tokens = True
while more_tokens:
if state.next_operator == SentinelToken():
more_tokens = False
else:
state = pop_operator(state)
return state
def progress(state):
if "create_value_sentence" in dir(state.next_token):
state = state.push_operand(state.next_token.create_value_sentence())
state = state.consume_token()
elif state.next_token == LeftParenthesisToken():
state = state.consume_token()
state = state.push_operator(SentinelToken())
state = expression(state)
state = expect(state, RightParenthesisToken())
state = state.pop_operator()
elif state.next_token.is_unary:
state = push_operator(state.next_token, state)
state = state.consume_token()
state = progress(state)
else:
raise Exception("Parsing issue with token %r" % state.next_token)
return state
def expect(state, token):
if state.next_token != token:
raise Exception("Expected %r got %r" % (token, state.next_token))
return state.consume_token()
def pop_operator(state):
operator = state.next_operator
state = state.pop_operator()
if operator.is_binary:
right, left = state.next_operands(2)
state = state.pop_operands(2)
state = state.push_operand(operator.create_binary_sentence(left, right))
elif operator.is_unary:
next_operand = state.next_operand
state = state.pop_operand()
state = state.push_operand(operator.create_unary_sentence(next_operand))
else:
raise Exception("Request a pop of a non-operator %r" % operator)
return state
def push_operator(operator, state):
while state.next_operator.binding_power >= operator.binding_power:
state = pop_operator(state)
return state.push_operator(operator)
class TokenBindingPower(object):
LEVEL_1 = 100
LEVEL_2 = 80
LEVEL_3 = 60
LEVEL_4 = 40
LEVEL_5 = 20
LEVEL_6 = 0
LEVEL_LOWEST = -9999
class AbstractToken(object):
__metaclass__ = ABCMeta
@abstractproperty
def binding_power(self):
pass
@property
def is_binary(self):
return False
@property
def is_unary(self):
return False
def __repr__(self):
return "%s()" % self.__class__.__name__
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class SentinelToken(AbstractToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_LOWEST
class LeftParenthesisToken(AbstractToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_6
def __str__(self):
return "("
class RightParenthesisToken(AbstractToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_6
def __str__(self):
return ")"
class ConstantToken(AbstractToken):
def __init__(self, value):
self._value = value
@property
def binding_power(self):
return TokenBindingPower.LEVEL_1
def create_value_sentence(self):
return SimpleSentence(PropositionalConstant(self._value))
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._value)
class NegationToken(AbstractToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_2
def create_unary_sentence(self, target):
return Negation(target)
@property
def is_unary(self):
return True
class BinaryOperationToken(AbstractToken):
__metaclass__ = ABCMeta
@abstractmethod
def create_binary_sentence(self, left, right):
pass
@property
def is_binary(self):
return True
class ConjunctionToken(BinaryOperationToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_3
def create_binary_sentence(self, left, right):
return Conjunction(left, right)
class DisjunctionToken(BinaryOperationToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_4
def create_binary_sentence(self, left, right):
return Disjunction(left, right)
class EquivalenceToken(BinaryOperationToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_5
def create_binary_sentence(self, left, right):
return Equivalence(left, right)
def __str__(self):
return "<=>"
class ImplicationToken(BinaryOperationToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_5
def create_binary_sentence(self, left, right):
return Implication(left, right)
def __str__(self):
return "=>"
class ReductionToken(BinaryOperationToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_5
def create_binary_sentence(self, left, right):
return Reduction(left, right)
def __str__(self):
return "<="
class EndToken(AbstractToken):
@property
def binding_power(self):
return TokenBindingPower.LEVEL_6
def __str__(self):
return "(end)"
| |
from __future__ import unicode_literals
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils import six
import rest_framework.utils.model_meta
from rest_framework.compat import _resolve_model
from rest_framework.routers import SimpleRouter
from rest_framework.serializers import ModelSerializer
from rest_framework.utils import json
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from tests.models import BasicModel
class Root(APIView):
pass
class ResourceRoot(APIView):
pass
class ResourceInstance(APIView):
pass
class NestedResourceRoot(APIView):
pass
class NestedResourceInstance(APIView):
pass
class CustomNameResourceInstance(APIView):
def get_view_name(self):
return "Foo"
class ResourceViewSet(ModelViewSet):
serializer_class = ModelSerializer
queryset = BasicModel.objects.all()
router = SimpleRouter()
router.register(r'resources', ResourceViewSet)
urlpatterns = [
url(r'^$', Root.as_view()),
url(r'^resource/$', ResourceRoot.as_view()),
url(r'^resource/customname$', CustomNameResourceInstance.as_view()),
url(r'^resource/(?P<key>[0-9]+)$', ResourceInstance.as_view()),
url(r'^resource/(?P<key>[0-9]+)/$', NestedResourceRoot.as_view()),
url(r'^resource/(?P<key>[0-9]+)/(?P<other>[A-Za-z]+)$', NestedResourceInstance.as_view()),
]
urlpatterns += router.urls
@override_settings(ROOT_URLCONF='tests.test_utils')
class BreadcrumbTests(TestCase):
"""
Tests the breadcrumb functionality used by the HTML renderer.
"""
def test_root_breadcrumbs(self):
url = '/'
assert get_breadcrumbs(url) == [('Root', '/')]
def test_resource_root_breadcrumbs(self):
url = '/resource/'
assert get_breadcrumbs(url) == [
('Root', '/'), ('Resource Root', '/resource/')
]
def test_resource_instance_breadcrumbs(self):
url = '/resource/123'
assert get_breadcrumbs(url) == [
('Root', '/'),
('Resource Root', '/resource/'),
('Resource Instance', '/resource/123')
]
def test_resource_instance_customname_breadcrumbs(self):
url = '/resource/customname'
assert get_breadcrumbs(url) == [
('Root', '/'),
('Resource Root', '/resource/'),
('Foo', '/resource/customname')
]
def test_nested_resource_breadcrumbs(self):
url = '/resource/123/'
assert get_breadcrumbs(url) == [
('Root', '/'),
('Resource Root', '/resource/'),
('Resource Instance', '/resource/123'),
('Nested Resource Root', '/resource/123/')
]
def test_nested_resource_instance_breadcrumbs(self):
url = '/resource/123/abc'
assert get_breadcrumbs(url) == [
('Root', '/'),
('Resource Root', '/resource/'),
('Resource Instance', '/resource/123'),
('Nested Resource Root', '/resource/123/'),
('Nested Resource Instance', '/resource/123/abc')
]
def test_broken_url_breadcrumbs_handled_gracefully(self):
url = '/foobar'
assert get_breadcrumbs(url) == [('Root', '/')]
def test_modelviewset_resource_instance_breadcrumbs(self):
url = '/resources/1/'
assert get_breadcrumbs(url) == [
('Root', '/'),
('Resource List', '/resources/'),
('Resource Instance', '/resources/1/')
]
class ResolveModelTests(TestCase):
"""
`_resolve_model` should return a Django model class given the
provided argument is a Django model class itself, or a properly
formatted string representation of one.
"""
def test_resolve_django_model(self):
resolved_model = _resolve_model(BasicModel)
assert resolved_model == BasicModel
def test_resolve_string_representation(self):
resolved_model = _resolve_model('tests.BasicModel')
assert resolved_model == BasicModel
def test_resolve_unicode_representation(self):
resolved_model = _resolve_model(six.text_type('tests.BasicModel'))
assert resolved_model == BasicModel
def test_resolve_non_django_model(self):
with self.assertRaises(ValueError):
_resolve_model(TestCase)
def test_resolve_improper_string_representation(self):
with self.assertRaises(ValueError):
_resolve_model('BasicModel')
class ResolveModelWithPatchedDjangoTests(TestCase):
"""
Test coverage for when Django's `get_model` returns `None`.
Under certain circumstances Django may return `None` with `get_model`:
http://git.io/get-model-source
It usually happens with circular imports so it is important that DRF
excepts early, otherwise fault happens downstream and is much more
difficult to debug.
"""
def setUp(self):
"""Monkeypatch get_model."""
self.get_model = rest_framework.compat.apps.get_model
def get_model(app_label, model_name):
return None
rest_framework.compat.apps.get_model = get_model
def tearDown(self):
"""Revert monkeypatching."""
rest_framework.compat.apps.get_model = self.get_model
def test_blows_up_if_model_does_not_resolve(self):
with self.assertRaises(ImproperlyConfigured):
_resolve_model('tests.BasicModel')
class JsonFloatTests(TestCase):
"""
Internaly, wrapped json functions should adhere to strict float handling
"""
def test_dumps(self):
with self.assertRaises(ValueError):
json.dumps(float('inf'))
with self.assertRaises(ValueError):
json.dumps(float('nan'))
def test_loads(self):
with self.assertRaises(ValueError):
json.loads("Infinity")
with self.assertRaises(ValueError):
json.loads("NaN")
@override_settings(STRICT_JSON=False)
class NonStrictJsonFloatTests(JsonFloatTests):
"""
'STRICT_JSON = False' should not somehow affect internal json behavior
"""
| |
# Generated by Django 2.1.3 on 2018-11-20 22:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
import wagtail.core.models
import wagtail.images.models
import wagtail.search.index
# Functions from wagtail.images.migrations.0002_initial_data
def add_image_permissions_to_admin_groups(apps, schema_editor):
ContentType = apps.get_model("contenttypes.ContentType")
Permission = apps.get_model("auth.Permission")
Group = apps.get_model("auth.Group")
# Get image permissions
image_content_type, _created = ContentType.objects.get_or_create(
model="image", app_label="wagtailimages"
)
add_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename="add_image",
defaults={"name": "Can add image"},
)
change_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename="change_image",
defaults={"name": "Can change image"},
)
delete_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename="delete_image",
defaults={"name": "Can delete image"},
)
# Assign it to Editors and Moderators groups
for group in Group.objects.filter(name__in=["Editors", "Moderators"]):
group.permissions.add(
add_image_permission, change_image_permission, delete_image_permission
)
def remove_image_permissions(apps, schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model("contenttypes.ContentType")
Permission = apps.get_model("auth.Permission")
image_content_type = ContentType.objects.get(
model="image",
app_label="wagtailimages",
)
# This cascades to Group
Permission.objects.filter(
content_type=image_content_type,
codename__in=("add_image", "change_image", "delete_image"),
).delete()
# Functions from wagtail.images.migrations.0012_copy_image_permissions_to_collections
def get_image_permissions(apps):
# return a queryset of the 'add_image' and 'change_image' permissions
Permission = apps.get_model("auth.Permission")
ContentType = apps.get_model("contenttypes.ContentType")
image_content_type, _created = ContentType.objects.get_or_create(
model="image",
app_label="wagtailimages",
)
return Permission.objects.filter(
content_type=image_content_type, codename__in=["add_image", "change_image"]
)
def copy_image_permissions_to_collections(apps, schema_editor):
Collection = apps.get_model("wagtailcore.Collection")
Group = apps.get_model("auth.Group")
GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission")
root_collection = Collection.objects.get(depth=1)
for permission in get_image_permissions(apps):
for group in Group.objects.filter(permissions=permission):
GroupCollectionPermission.objects.create(
group=group, collection=root_collection, permission=permission
)
def remove_image_permissions_from_collections(apps, schema_editor):
GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission")
image_permissions = get_image_permissions(apps)
GroupCollectionPermission.objects.filter(permission__in=image_permissions).delete()
class Migration(migrations.Migration):
replaces = [
("wagtailimages", "0001_initial"),
("wagtailimages", "0002_initial_data"),
("wagtailimages", "0003_fix_focal_point_fields"),
("wagtailimages", "0004_make_focal_point_key_not_nullable"),
("wagtailimages", "0005_make_filter_spec_unique"),
("wagtailimages", "0006_add_verbose_names"),
("wagtailimages", "0007_image_file_size"),
("wagtailimages", "0008_image_created_at_index"),
("wagtailimages", "0009_capitalizeverbose"),
("wagtailimages", "0010_change_on_delete_behaviour"),
("wagtailimages", "0011_image_collection"),
("wagtailimages", "0012_copy_image_permissions_to_collections"),
("wagtailimages", "0013_make_rendition_upload_callable"),
("wagtailimages", "0014_add_filter_spec_field"),
("wagtailimages", "0015_fill_filter_spec_field"),
("wagtailimages", "0016_deprecate_rendition_filter_relation"),
("wagtailimages", "0017_reduce_focal_point_key_max_length"),
("wagtailimages", "0018_remove_rendition_filter"),
("wagtailimages", "0019_delete_filter"),
("wagtailimages", "0020_add-verbose-name"),
("wagtailimages", "0021_image_file_hash"),
]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("wagtailcore", "0002_initial_data"),
("taggit", "0001_initial"),
("wagtailcore", "0026_group_collection_permission"),
]
operations = [
migrations.CreateModel(
name="Image",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255, verbose_name="title")),
(
"file",
models.ImageField(
height_field="height",
upload_to=wagtail.images.models.get_upload_to,
verbose_name="file",
width_field="width",
),
),
("width", models.IntegerField(editable=False, verbose_name="width")),
("height", models.IntegerField(editable=False, verbose_name="height")),
(
"created_at",
models.DateTimeField(
auto_now_add=True, db_index=True, verbose_name="created at"
),
),
("focal_point_x", models.PositiveIntegerField(blank=True, null=True)),
("focal_point_y", models.PositiveIntegerField(blank=True, null=True)),
(
"focal_point_width",
models.PositiveIntegerField(blank=True, null=True),
),
(
"focal_point_height",
models.PositiveIntegerField(blank=True, null=True),
),
(
"tags",
taggit.managers.TaggableManager(
blank=True,
help_text=None,
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="tags",
),
),
(
"uploaded_by_user",
models.ForeignKey(
blank=True,
editable=False,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
verbose_name="uploaded by user",
),
),
("file_size", models.PositiveIntegerField(editable=False, null=True)),
(
"collection",
models.ForeignKey(
default=wagtail.core.models.get_root_collection_id,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="wagtailcore.Collection",
verbose_name="collection",
),
),
(
"file_hash",
models.CharField(blank=True, editable=False, max_length=40),
),
],
options={
"abstract": False,
"verbose_name": "image",
"verbose_name_plural": "images",
},
bases=(models.Model, wagtail.search.index.Indexed),
),
migrations.CreateModel(
name="Rendition",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"file",
models.ImageField(
height_field="height",
upload_to=wagtail.images.models.get_rendition_upload_to,
width_field="width",
),
),
("width", models.IntegerField(editable=False)),
("height", models.IntegerField(editable=False)),
(
"focal_point_key",
models.CharField(
blank=True, default="", editable=False, max_length=16
),
),
("filter_spec", models.CharField(db_index=True, max_length=255)),
(
"image",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="renditions",
to="wagtailimages.Image",
),
),
],
),
migrations.AlterUniqueTogether(
name="rendition",
unique_together={("image", "filter_spec", "focal_point_key")},
),
migrations.RunPython(
add_image_permissions_to_admin_groups, remove_image_permissions
),
migrations.RunPython(
copy_image_permissions_to_collections,
remove_image_permissions_from_collections,
),
]
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import logging
import json
import os
import subprocess
import luigi
from luigi.contrib import bigquery, gcs
from luigi.task import MixinNaiveBulkComplete
logger = logging.getLogger('luigi-interface')
class DataflowParamKeys(metaclass=abc.ABCMeta):
"""
Defines the naming conventions for Dataflow execution params.
For example, the Java API expects param names in lower camel case, whereas
the Python implementation expects snake case.
"""
@property
@abc.abstractmethod
def runner(self):
pass
@property
@abc.abstractmethod
def project(self):
pass
@property
@abc.abstractmethod
def zone(self):
pass
@property
@abc.abstractmethod
def region(self):
pass
@property
@abc.abstractmethod
def staging_location(self):
pass
@property
@abc.abstractmethod
def temp_location(self):
pass
@property
@abc.abstractmethod
def gcp_temp_location(self):
pass
@property
@abc.abstractmethod
def num_workers(self):
pass
@property
@abc.abstractmethod
def autoscaling_algorithm(self):
pass
@property
@abc.abstractmethod
def max_num_workers(self):
pass
@property
@abc.abstractmethod
def disk_size_gb(self):
pass
@property
@abc.abstractmethod
def worker_machine_type(self):
pass
@property
@abc.abstractmethod
def worker_disk_type(self):
pass
@property
@abc.abstractmethod
def job_name(self):
pass
@property
@abc.abstractmethod
def service_account(self):
pass
@property
@abc.abstractmethod
def network(self):
pass
@property
@abc.abstractmethod
def subnetwork(self):
pass
@property
@abc.abstractmethod
def labels(self):
pass
class _CmdLineRunner:
"""
Executes a given command line class in a subprocess, logging its output.
If more complex monitoring/logging is desired, user can implement their
own launcher class and set it in BeamDataflowJobTask.cmd_line_runner.
"""
@staticmethod
def run(cmd, task=None):
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True
)
output_lines = []
while True:
line = process.stdout.readline()
if not line:
break
line = line.decode("utf-8")
output_lines += [line]
logger.info(line.rstrip("\n"))
process.stdout.close()
exit_code = process.wait()
if exit_code:
output = "".join(output_lines)
raise subprocess.CalledProcessError(exit_code, cmd, output=output)
class BeamDataflowJobTask(MixinNaiveBulkComplete, luigi.Task, metaclass=abc.ABCMeta):
"""
Luigi wrapper for a Dataflow job. Must be overridden for each Beam SDK
with that SDK's dataflow_executable().
For more documentation, see:
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
The following required Dataflow properties must be set:
project # GCP project ID
temp_location # Cloud storage path for temporary files
The following optional Dataflow properties can be set:
runner # PipelineRunner implementation for your Beam job.
Default: DirectRunner
num_workers # The number of workers to start the task with
Default: Determined by Dataflow service
autoscaling_algorithm # The Autoscaling mode for the Dataflow job
Default: `THROUGHPUT_BASED`
max_num_workers # Used if the autoscaling is enabled
Default: Determined by Dataflow service
network # Network in GCE to be used for launching workers
Default: a network named "default"
subnetwork # Subnetwork in GCE to be used for launching workers
Default: Determined by Dataflow service
disk_size_gb # Remote worker disk size. Minimum value is 30GB
Default: set to 0 to use GCP project default
worker_machine_type # Machine type to create Dataflow worker VMs
Default: Determined by Dataflow service
job_name # Custom job name, must be unique across project's
active jobs
worker_disk_type # Specify SSD for local disk or defaults to hard
disk as a full URL of disk type resource
Default: Determined by Dataflow service.
service_account # Service account of Dataflow VMs/workers
Default: active GCE service account
region # Region to deploy Dataflow job to
Default: us-central1
zone # Availability zone for launching workers instances
Default: an available zone in the specified region
staging_location # Cloud Storage bucket for Dataflow to stage binary
files
Default: the value of temp_location
gcp_temp_location # Cloud Storage path for Dataflow to stage temporary
files
Default: the value of temp_location
labels # Custom GCP labels attached to the Dataflow job
Default: nothing
"""
project = None
runner = None
temp_location = None
staging_location = None
gcp_temp_location = None
num_workers = None
autoscaling_algorithm = None
max_num_workers = None
network = None
subnetwork = None
disk_size_gb = None
worker_machine_type = None
job_name = None
worker_disk_type = None
service_account = None
zone = None
region = None
labels = {}
cmd_line_runner = _CmdLineRunner
dataflow_params = None
def __init__(self):
if not isinstance(self.dataflow_params, DataflowParamKeys):
raise ValueError("dataflow_params must be of type DataflowParamKeys")
super(BeamDataflowJobTask, self).__init__()
@abc.abstractmethod
def dataflow_executable(self):
"""
Command representing the Dataflow executable to be run.
For example:
return ['java', 'com.spotify.luigi.MyClass', '-Xmx256m']
"""
pass
def args(self):
"""
Extra String arguments that will be passed to your Dataflow job.
For example:
return ['--setup_file=setup.py']
"""
return []
def before_run(self):
"""
Hook that gets called right before the Dataflow job is launched.
Can be used to setup any temporary files/tables, validate input, etc.
"""
pass
def on_successful_run(self):
"""
Callback that gets called right after the Dataflow job has finished
successfully but before validate_output is run.
"""
pass
def validate_output(self):
"""
Callback that can be used to validate your output before it is moved to
its final location. Returning false here will cause the job to fail, and
output to be removed instead of published.
"""
return True
def file_pattern(self):
"""
If one/some of the input target files are not in the pattern of part-*,
we can add the key of the required target and the correct file pattern
that should be appended in the command line here. If the input target key is not found
in this dict, the file pattern will be assumed to be part-* for that target.
:return A dictionary of overridden file pattern that is not part-* for the inputs
"""
return {}
def on_successful_output_validation(self):
"""
Callback that gets called after the Dataflow job has finished
successfully if validate_output returns True.
"""
pass
def cleanup_on_error(self, error):
"""
Callback that gets called after the Dataflow job has finished
unsuccessfully, or validate_output returns False.
"""
pass
def run(self):
cmd_line = self._mk_cmd_line()
logger.info(' '.join(cmd_line))
self.before_run()
try:
self.cmd_line_runner.run(cmd_line, self)
except subprocess.CalledProcessError as e:
logger.error(e, exc_info=True)
self.cleanup_on_error(e)
os._exit(e.returncode)
self.on_successful_run()
if self.validate_output():
self.on_successful_output_validation()
else:
error = ValueError("Output validation failed")
self.cleanup_on_error(error)
raise error
def _mk_cmd_line(self):
cmd_line = self.dataflow_executable()
cmd_line.extend(self._get_dataflow_args())
cmd_line.extend(self.args())
cmd_line.extend(self._format_input_args())
cmd_line.extend(self._format_output_args())
return cmd_line
def _get_runner(self):
if not self.runner:
logger.warning("Runner not supplied to BeamDataflowJobTask. " +
"Defaulting to DirectRunner.")
return "DirectRunner"
elif self.runner in [
"DataflowRunner",
"DirectRunner"
]:
return self.runner
else:
raise ValueError("Runner %s is unsupported." % self.runner)
def _get_dataflow_args(self):
def f(key, value):
return '--{}={}'.format(key, value)
output = []
output.append(f(self.dataflow_params.runner, self._get_runner()))
if self.project:
output.append(f(self.dataflow_params.project, self.project))
if self.zone:
output.append(f(self.dataflow_params.zone, self.zone))
if self.region:
output.append(f(self.dataflow_params.region, self.region))
if self.staging_location:
output.append(f(self.dataflow_params.staging_location, self.staging_location))
if self.temp_location:
output.append(f(self.dataflow_params.temp_location, self.temp_location))
if self.gcp_temp_location:
output.append(f(self.dataflow_params.gcp_temp_location, self.gcp_temp_location))
if self.num_workers:
output.append(f(self.dataflow_params.num_workers, self.num_workers))
if self.autoscaling_algorithm:
output.append(f(self.dataflow_params.autoscaling_algorithm, self.autoscaling_algorithm))
if self.max_num_workers:
output.append(f(self.dataflow_params.max_num_workers, self.max_num_workers))
if self.disk_size_gb:
output.append(f(self.dataflow_params.disk_size_gb, self.disk_size_gb))
if self.worker_machine_type:
output.append(f(self.dataflow_params.worker_machine_type, self.worker_machine_type))
if self.worker_disk_type:
output.append(f(self.dataflow_params.worker_disk_type, self.worker_disk_type))
if self.network:
output.append(f(self.dataflow_params.network, self.network))
if self.subnetwork:
output.append(f(self.dataflow_params.subnetwork, self.subnetwork))
if self.job_name:
output.append(f(self.dataflow_params.job_name, self.job_name))
if self.service_account:
output.append(f(self.dataflow_params.service_account, self.service_account))
if self.labels:
output.append(f(self.dataflow_params.labels, json.dumps(self.labels)))
return output
def _format_input_args(self):
"""
Parses the result(s) of self.input() into a string-serialized
key-value list passed to the Dataflow job. Valid inputs include:
return FooTarget()
return {"input1": FooTarget(), "input2": FooTarget2())
return ("input", FooTarget())
return [("input1", FooTarget()), ("input2": FooTarget2())]
return [FooTarget(), FooTarget2()]
Unlabeled input are passed in with under the default key "input".
"""
job_input = self.input()
if isinstance(job_input, luigi.Target):
job_input = {"input": job_input}
elif isinstance(job_input, tuple):
job_input = {job_input[0]: job_input[1]}
elif isinstance(job_input, list):
if all(isinstance(item, tuple) for item in job_input):
job_input = dict(job_input)
else:
job_input = {"input": job_input}
elif not isinstance(job_input, dict):
raise ValueError("Invalid job input requires(). Supported types: ["
"Target, tuple of (name, Target), "
"dict of (name: Target), list of Targets]")
if not isinstance(self.file_pattern(), dict):
raise ValueError('file_pattern() must return a dict type')
input_args = []
for (name, targets) in job_input.items():
uris = [
self.get_target_path(uri_target) for uri_target in luigi.task.flatten(targets)
]
if isinstance(targets, dict):
"""
If targets is a dict that means it had multiple outputs.
Make the input args in that case "<input key>-<task output key>"
"""
names = ["%s-%s" % (name, key) for key in targets.keys()]
else:
names = [name] * len(uris)
input_dict = {}
for (arg_name, uri) in zip(names, uris):
pattern = self.file_pattern().get(name, 'part-*')
input_value = input_dict.get(arg_name, [])
input_value.append(uri.rstrip('/') + '/' + pattern)
input_dict[arg_name] = input_value
for (key, paths) in input_dict.items():
input_args.append("--%s=%s" % (key, ','.join(paths)))
return input_args
def _format_output_args(self):
"""
Parses the result(s) of self.output() into a string-serialized
key-value list passed to the Dataflow job. Valid outputs include:
return FooTarget()
return {"output1": FooTarget(), "output2": FooTarget2()}
Unlabeled outputs are passed in with under the default key "output".
"""
job_output = self.output()
if isinstance(job_output, luigi.Target):
job_output = {"output": job_output}
elif not isinstance(job_output, dict):
raise ValueError(
"Task output must be a Target or a dict from String to Target")
output_args = []
for (name, target) in job_output.items():
uri = self.get_target_path(target)
output_args.append("--%s=%s" % (name, uri))
return output_args
@staticmethod
def get_target_path(target):
"""
Given a luigi Target, determine a stringly typed path to pass as a
Dataflow job argument.
"""
if isinstance(target, luigi.LocalTarget) or isinstance(target, gcs.GCSTarget):
return target.path
elif isinstance(target, bigquery.BigQueryTarget):
return "{}:{}.{}".format(target.table.project_id, target.table.dataset_id, target.table.table_id)
else:
raise ValueError("Target %s not supported" % target)
| |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008 Adroll.com and Valentino Volonghi <dialtone@adroll.com>
# 20090113 Frank Scholz <coherence@beebits.net>
# renamed watch() kwarg autoAdd back to auto_add, to not break
# existing applications
from twisted.internet import defer, reactor
from twisted.python import filepath
from twisted.trial import unittest
import inotify
class TestINotify(unittest.TestCase):
def setUp(self):
self.dirname = filepath.FilePath(self.mktemp())
self.dirname.createDirectory()
self.inotify = inotify.INotify()
def tearDown(self):
self.inotify.release()
self.inotify = None
self.dirname.remove()
def test_notifications(self):
"""
Test that a notification is actually delivered on a file
creation.
"""
NEW_FILENAME = "new_file.file"
EXTRA_ARG = "HELLO"
checkMask = inotify.IN_CREATE | inotify.IN_CLOSE_WRITE
calls = []
# We actually expect 2 calls here, one when we create
# and one when we close the file after writing it.
def _callback(wp, filename, mask, data):
try:
self.assertEquals(filename, NEW_FILENAME)
self.assertEquals(data, EXTRA_ARG)
calls.append(filename)
if len(calls) == 2:
self.assert_(mask & inotify.IN_CLOSE_WRITE)
d.callback(None)
elif len(calls) == 1:
self.assert_(mask & inotify.IN_CREATE)
except Exception, e:
d.errback(e)
self.inotify.watch(
self.dirname, mask=checkMask,
callbacks=(_callback, EXTRA_ARG)
)
d = defer.Deferred()
f = self.dirname.child(NEW_FILENAME).open('wb')
f.write("hello darling")
f.close()
return d
def test_simpleSubdirectoryAutoAdd(self):
"""
Test that when a subdirectory is added to a watched directory
it is also added to the watched list.
"""
def _callback(wp, filename, mask, data):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assert_(self.inotify.isWatched(SUBDIR.path))
d.callback(None)
except Exception, e:
d.errback(e)
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, auto_add=True,
callbacks=(_callback, None)
)
SUBDIR = self.dirname.child('test')
d = defer.Deferred()
SUBDIR.createDirectory()
return d
def test_simpleDeleteDirectory(self):
"""
Test that when a subdirectory is added and then removed it is
also removed from the watchlist
"""
calls = []
def _callback(wp, filename, mask, data):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assert_(self.inotify.isWatched(SUBDIR.path))
SUBDIR.remove()
except Exception, e:
print e
d.errback(e)
def _eb():
# second call, we have just removed the subdir
try:
self.assert_(not self.inotify.isWatched(SUBDIR.path))
d.callback(None)
except Exception, e:
print e
d.errback(e)
if not calls:
# first call, it's the create subdir
calls.append(filename)
reactor.callLater(0.1, _)
else:
reactor.callLater(0.1, _eb)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, auto_add=True,
callbacks=(_callback, None)
)
SUBDIR = self.dirname.child('test')
d = defer.Deferred()
SUBDIR.createDirectory()
return d
def test_ignoreDirectory(self):
"""
Test that ignoring a directory correctly removes it from the
watchlist without removing it from the filesystem.
"""
self.inotify.watch(
self.dirname, auto_add=True
)
self.assert_(self.inotify.isWatched(self.dirname))
self.inotify.ignore(self.dirname)
self.assert_(not self.inotify.isWatched(self.dirname))
def test_watchPoint(self):
"""
Test that Watch methods work as advertised
"""
w = inotify.Watch('/tmp/foobar')
f = lambda : 5
w.addCallback(f)
self.assert_(w.callbacks, [(f, None)])
def test_flagToHuman(self):
"""
Test the helper function
"""
for mask, value in inotify._FLAG_TO_HUMAN.iteritems():
self.assert_(inotify.flag_to_human(mask)[0], value)
checkMask = inotify.IN_CLOSE_WRITE|inotify.IN_ACCESS|inotify.IN_OPEN
self.assert_(
len(inotify.flag_to_human(checkMask)),
3
)
def test_recursiveWatch(self):
"""
Test that a recursive watch correctly adds all the paths in
the watched directory.
"""
SUBDIR = self.dirname.child('test')
SUBDIR2 = SUBDIR.child('test2')
SUBDIR3 = SUBDIR2.child('test3')
SUBDIR3.makedirs()
DIRS = [SUBDIR, SUBDIR2, SUBDIR3]
self.inotify.watch(self.dirname, recursive=True)
# let's even call this twice so that we test that nothing breaks
self.inotify.watch(self.dirname, recursive=True)
for d in DIRS:
self.assert_(self.inotify.isWatched(d))
def test_noAutoAddSubdirectory(self):
"""
Test that if auto_add is off we don't add a new directory
"""
def _callback(wp, filename, mask, data):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assert_(not self.inotify.isWatched(SUBDIR.path))
d.callback(None)
except Exception, e:
d.errback(e)
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, auto_add=False,
callbacks=(_callback, None)
)
SUBDIR = self.dirname.child('test')
d = defer.Deferred()
SUBDIR.createDirectory()
return d
def test_complexSubdirectoryAutoAdd(self):
"""
Test that when we add one subdirectory with other new children
and files we end up with the notifications for those files and
with all those directories watched.
This is basically the most critical testcase for inotify.
"""
calls = set()
def _callback(wp, filename, mask, data):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assert_(self.inotify.isWatched(SUBDIR.path))
self.assert_(self.inotify.isWatched(SUBDIR2.path))
self.assert_(self.inotify.isWatched(SUBDIR3.path))
CREATED = SOME_FILES.union(
set([SUBDIR.basename(),
SUBDIR2.basename(),
SUBDIR3.basename()
])
)
self.assert_(len(calls), len(CREATED))
self.assertEquals(calls, CREATED)
except Exception, e:
d.errback(e)
else:
d.callback(None)
if not calls:
# Just some delay to be sure, given how the algorithm
# works for this we know that there's a new extra cycle
# every subdirectory
reactor.callLater(0.1, _)
calls.add(filename)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, auto_add=True,
callbacks=(_callback, None)
)
SUBDIR = self.dirname.child('test')
SUBDIR2 = SUBDIR.child('test2')
SUBDIR3 = SUBDIR2.child('test3')
SOME_FILES = set(["file1.dat", "file2.dat", "file3.dat"])
d = defer.Deferred()
SUBDIR3.makedirs()
# Add some files in pretty much all the directories so that we
# see that we process all of them.
for i, filename in enumerate(SOME_FILES):
if not i:
S = SUBDIR
if i == 1:
S = SUBDIR2
else:
S = SUBDIR3
S.child(filename).setContent(filename)
return d
| |
"""
file: gnuplot.py
authors: Matt Rasmussen
date: 11/30/05
Plotting classes and functions: GNUPLOT wrapper
"""
import copy
import os
INF = 1e1000
class Gnuplot:
class Plot:
def __init__(self, xlist, ylist, zlist, options):
self.xlist = copy.copy(xlist)
self.ylist = copy.copy(ylist)
self.zlist = copy.copy(zlist)
self.options = copy.copy(options)
def __init__(self):
self.data = []
self.stream = None
self.margin = .1
self.enable = True
self.options = {
# plot options
"style": "points",
"main": "",
"xlab": "",
"ylab": "",
"zlab": "",
"plab": "",
"eqn": None,
# graph options
"xmin": None,
"xmax": None,
"ymin": None,
"ymax": None,
"zmax": None,
"zmin": None,
"xtics": None,
"ytics": None,
"ztics": None,
"xlog": None,
"ylog": None,
"zlog": None,
"margin": None
}
self.DATA_ONLY_OPTION = ["err", "errlow", "errhi"]
def set(self, **options):
if "noreplot" in options:
noreplot = False
del options["noreplot"]
else:
noreplot = True
for key in options:
if key in self.DATA_ONLY_OPTION:
continue
self.options[key] = options[key]
if not noreplot:
self.replot()
def gnuplot(self, text):
self.stream.write(text)
def xrange(self, start=None, end=None):
self.options["xmin"] = start
self.options["xmax"] = end
self.replot()
def yrange(self, start=None, end=None):
self.options["ymin"] = start
self.options["ymax"] = end
self.replot()
def zrange(self, start=None, end=None):
self.options["zmin"] = start
self.options["zmax"] = end
self.replot()
def unlog(self):
self.options["xlog"] = False
self.options["ylog"] = False
self.options["zlog"] = False
self.replot()
def xlog(self, base=10):
self.options["xlog"] = base
self.replot()
def ylog(self, base=10):
self.options["ylog"] = base
self.replot()
def zlog(self, base=10):
self.options["zlog"] = base
self.replot()
def loglog(self, base=10):
self.options["xlog"] = base
self.options["ylog"] = base
self.replot()
def clear(self):
self.data = []
def save(self, filename="", format="x11"):
if not self.enable:
return
if filename == "":
tmpfile = self.setTerminal(filename, format)
self.replot()
# wait until plot appears
self.wait()
text = file(tmpfile).read()
os.remove(tmpfile)
else:
self.setTerminal(filename, format)
self.replot()
text = None
# reset format
print >>self.stream, "set terminal x11"
return text
def savedata(self, filename):
"""Save gnuplot commands in filename"""
self.stream = file(filename, "w")
self.replot()
self.enableOutput()
def savetab(self, filename):
"""Save data in tab delimited format"""
from ramsus import util
out = util.open_stream(filename, "w")
for data in self.data:
print >>out, data.options["plab"]
if len(data.ylist) > 0:
if len(data.zlist) > 0:
rows = zip(data.xlist, data.ylist, data.zlist)
labels = [data.options[i]
for i in ["xlab", "ylab", "zlab"]]
else:
rows = zip(data.xlist, data.ylist)
labels = [data.options[i]
for i in["xlab", "ylab"]]
print >>out, "\t".join(labels)
for row in rows:
print >>out, "\t".join(map(str, row))
print >>out
def saveall(self, filename):
"""
Save gnuplot commands, tad delimited, and plot image in the
following files:
<filename>.ps
<filename>.tab
<filename>.png
"""
if not self.enable:
return
self.savetab(filename + ".tab")
self.save(filename + ".png")
self.save(filename + ".ps")
def setTerminal(self, filename="", format="x11"):
if not self.enable:
return
from rasmus import util
# auto detect format from filename
if filename != "":
print >>self.stream, "set output \"%s\"" % filename
# determine format
if filename.endswith(".ps"):
format = "ps"
if filename.endswith(".pdf"):
format = "pdf"
if filename.endswith(".gif"):
format = "gif"
if filename.endswith(".png"):
format = "png"
if filename.endswith(".jpg"):
format = "jpg"
else:
tmpfile = util.tempfile(".", "gnuplot", ".ps")
print >>self.stream, "set output \"%s\"" % tmpfile
return tmpfile
# set terminal format
if format == "ps":
print >>self.stream, "set terminal postscript color"
elif format == "pdf":
print >>self.stream, "set terminal pdf"
elif format == "gif":
print >>self.stream, "set terminal gif"
elif format == "jpg":
print >>self.stream, "set terminal jpeg"
else:
print >>self.stream, "set terminal %s" % format
def wait(self):
"""Wait until all commands are known to be excuted"""
from rasmus import util
tmpfile = util.tempfile(".", "gnuplot", ".ps")
print >>self.stream, "set output '%s'" % tmpfile
print >>self.stream, "set terminal postscript color"
print >>self.stream, "plot '-'\n0 0\ne\n"
self.stream.flush()
while not os.path.isfile(tmpfile):
pass
os.remove(tmpfile)
def findRange(self):
bestLeft = INF
bestRight = -INF
bestTop = -INF
bestBottom = INF
# find ranges for each graph that is plotted
for graph in self.data:
if graph.options["eqn"]:
continue
list1 = graph.xlist
list2 = graph.ylist
# find border
top = max(list2)
bottom = min(list2)
left = min(list1)
right = max(list1)
# record biggest range thus far
if top > bestTop:
bestTop = top
if bottom < bestBottom:
bestBottom = bottom
if left < bestLeft:
bestLeft = left
if right > bestRight:
bestRight = right
# find margin
ymargin = (bestTop - bestBottom) * self.margin
xmargin = (bestRight - bestLeft) * self.margin
if xmargin == 0:
xmargin = 1
if ymargin == 0:
ymargin = 1
# add margin to border
if xmargin > 0 and ymargin > 0:
bestTop += ymargin
bestBottom -= ymargin
bestLeft -= xmargin
bestRight += xmargin
# auto scale
if bestLeft >= INF:
bestLeft = "*"
if bestRight <= -INF:
bestRight = "*"
if bestTop <= -INF:
bestTop = "*"
if bestBottom >= INF:
bestBottom = "*"
if bestLeft == bestRight:
bestLeft = bestRight = "*"
if bestTop == bestBottom:
bestTop = bestBottom = "*"
return (bestTop, bestBottom, bestLeft, bestRight)
def replot(self):
# do nothing if no data or plotting is not enabled
if (len(self.data) == 0 or
not self.enable):
return
# configure
print >>self.stream, "set mouse"
print >>self.stream, "set mxtics"
print >>self.stream, "set mytics"
print >>self.stream, "set mztics"
# margins
if self.options["margin"]:
print >>self.stream, "set tmargin %f" % self.options["margin"]
print >>self.stream, "set bmargin %f" % self.options["margin"]
print >>self.stream, "set lmargin %f" % self.options["margin"]
print >>self.stream, "set rmargin %f" % self.options["margin"]
else:
print >>self.stream, "set tmargin"
print >>self.stream, "set bmargin"
print >>self.stream, "set lmargin"
print >>self.stream, "set rmargin"
# tics
if self.options["xtics"] is None:
print >>self.stream, "set xtics autofreq"
else:
print >>self.stream, "set xtics %f" % self.options["xtics"]
if self.options["ytics"] is None:
print >>self.stream, "set ytics autofreq"
else:
print >>self.stream, "set ytics %f" % self.options["ytics"]
if self.options["ztics"] is None:
print >>self.stream, "set ztics autofreq"
else:
print >>self.stream, "set ztics %f" % self.options["ztics"]
# log scale
print >>self.stream, "unset logscale xyz"
if self.options["xlog"]:
print >>self.stream, "set logscale x %d" % self.options["xlog"]
if self.options["ylog"]:
print >>self.stream, "set logscale y %d" % self.options["ylog"]
if self.options["zlog"]:
print >>self.stream, "set logscale z %d" % self.options["zlog"]
# setup ranges
(maxy, miny, minx, maxx) = self.findRange()
if self.options["xmin"] is not None:
minx = self.options["xmin"]
if self.options["xmax"] is not None:
maxx = self.options["xmax"]
if self.options["ymin"] is not None:
miny = self.options["ymin"]
if self.options["ymax"] is not None:
maxy = self.options["ymax"]
print >>self.stream, (
"set xrange[%s:%s]" % tuple(map(str, [minx, maxx])))
print >>self.stream, (
"set yrange[%s:%s]" % tuple(map(str, [miny, maxy])))
# TODO: add range z
# set labels
if self.options["main"] != "":
print >>self.stream, "set title \"" + self.options["main"] + "\""
if self.options["xlab"] != "":
print >>self.stream, "set xlabel \"" + self.options["xlab"] + "\""
if self.options["ylab"] != "":
print >>self.stream, "set ylabel \"" + self.options["ylab"] + "\""
if self.options["zlab"] != "":
print >>self.stream, "set zlabel \"" + self.options["zlab"] + "\""
# give plot command
if self.data[0].zlist == []:
print >>self.stream, "plot ",
else:
print >>self.stream, "splot ",
for i in range(len(self.data)):
graph = self.data[i]
if graph.options["eqn"]:
# specify direct equation
print >>self.stream, graph.options["eqn"],
else:
# specify inline data
print >>self.stream, "\"-\" ",
# specify style
if graph.options["style"] != "":
print >>self.stream, "with ", graph.options["style"],
# specify plot label
if graph.options["plab"] != "":
print >>self.stream, \
" title \"" + graph.options["plab"] + "\"",
else:
print >>self.stream, " notitle",
if i < len(self.data) - 1:
print >>self.stream, ",",
print >>self.stream, ""
# output data
for graph in self.data:
if graph.options["eqn"]:
continue
self.outputData(
graph.xlist, graph.ylist, graph.zlist, graph.options)
# need to make sure gnuplot gets what we have written
self.stream.flush()
def prepareData(self, list1, list2=[], list3=[]):
if list2 == []:
list2 = list1
list1 = range(len(list1))
if len(list1) != len(list2):
raise Exception("ERROR: arrays are not same length")
return list1, list2, list3
def outputData(self, list1, list2, list3=[], options={}):
for i in range(len(list1)):
if list3 == []:
print >>self.stream, list1[i], list2[i],
else:
print >>self.stream, list1[i], list2[i], list3[i],
# error bars
if "err" in options:
print >>self.stream, options["err"][i],
if "errlow" in options and "errhi" in options:
print >>self.stream, options["errlow"][i], options["errhi"][i],
# newline
print >>self.stream
print >>self.stream, "e"
def enableOutput(self, enable=True):
self.enable = enable
if enable:
self.stream = os.popen("gnuplot", "w")
def plot(self, list1, list2=[], list3=[], **options):
self.set(**options)
options2 = copy.copy(self.options)
options2.update(options)
list1, list2, list3 = self.prepareData(list1, list2, list3)
self.data.append(self.Plot(list1, list2, list3, options2))
if self.enable:
self.stream = os.popen("gnuplot", "w")
self.replot()
def plotfunc(self, func, start, end, step, **options):
x = []
y = []
while start < end:
try:
y.append(func(start))
x.append(start)
except ZeroDivisionError:
pass
start += step
options.setdefault("style", "lines")
self.plot(x, y, **options)
def plotdiag(self, start=None, end=None, **options):
if start is None:
start = INF
for graph in self.data:
if graph.options["eqn"]:
continue
start = min(start, min(graph.xlist))
start = min(start, min(graph.ylist))
if end is None:
end = -INF
for graph in self.data:
if graph.options["eqn"]:
continue
end = max(end, max(graph.xlist))
end = max(end, max(graph.ylist))
options2 = {"style": "lines",
"plab": ""}
options2.update(options)
self.plot([start, end], [start, end], **options2)
def gfit(self, func, eqn, params, list1, list2=[], list3=[], ** options):
"""
all syntax should be valid GNUPLOT syntax
func - a string of the function call i.e. "f(x)"
eqn - a string of a GNUPLOT equation "a*x**b"
params - a dictionary of parameters in eqn and their initial values
ex: {"a": 1, "b": 3}
"""
from rasmus import util
self.set(** options)
print len(list1), len(list2), len(list3)
if not self.enable:
raise Exception("must be output must be enabled for fitting")
list1, list2, list3 = self.prepareData(list1, list2, list3)
# add data to graph
self.data.append(
self.Plot(list1, list2, list3, copy.copy(self.options)))
# perform fitting
self.stream = os.popen("gnuplot", "w")
print >>self.stream, "%s = %s" % (func, eqn)
for param, value in params.items():
print >>self.stream, "%s = %f" % (param, value)
print >>self.stream, "fit %s '-' via %s" % \
(func, ",".join(params.keys()))
self.outputData(list1, list2, list3)
# save and read parameters
outfile = util.tempfile(".", "plot", ".txt")
print >>self.stream, "save var '%s'" % outfile
print >>self.stream, "print 'done'"
self.stream.flush()
# wait for variable file
while not os.path.isfile(outfile):
pass
params = self.readParams(outfile)
os.remove(outfile)
# build eqn for plotting
paramlist = ""
for param, value in params.items():
paramlist += "%s = %s, " % (param, value)
self.options["eqn"] = paramlist + "%s = %s, %s" % \
(func, eqn, func)
self.options["style"] = "lines"
# add fitted eqn to graph
self.data.append(self.Plot([], [], [], copy.copy(self.options)))
self.replot()
def readParams(self, filename):
params = {}
for line in file(filename):
if line[0] == "#":
continue
var, value = line.split("=")
if not var.startswith("MOUSE_"):
params[var.replace(" ", "")] = float(value)
return params
def plot(list1, list2=[], list3=[], **options):
g = options.setdefault("plot", Gnuplot())
g.plot(list1, list2, list3, **options)
return g
def plotfunc(func, start, end, step, **options):
"""Plot a function 'func' over the range (start, end)"""
g = options.setdefault("plot", Gnuplot())
g.plotfunc(func, start, end, step, ** options)
return g
def plothist(array, ndivs=None, low=None, width=None, **options):
"""Plot a histogram of array"""
from rasmus import util
h = util.hist(array, ndivs, low, width)
p = options.setdefault("plot", Gnuplot())
options.setdefault("style", "boxes fill solid")
p.plot(util.histbins(h[0]), h[1], **options)
return p
def plotdistrib(array, ndivs=None, low=None, width=None, **options):
"""Plot a distribution of array"""
from rasmus import util
d = util.distrib(array, ndivs, low, width)
p = options.setdefault("plot", Gnuplot())
options.setdefault("style", "boxes")
p.plot(util.histbins(d[0]), d[1], **options)
return p
def gfit(func, eqn, params, list1, list2=[], list3=[], ** options):
g = options.setdefault("plot", Gnuplot())
g.gfit(func, eqn, params, list1, list2, list3, ** options)
return g
| |
"""Debug Plugin for EasyEngine"""
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.shellexec import *
from ee.core.mysql import EEMysql
from ee.core.services import EEService
from ee.core.logging import Log
from ee.cli.plugins.site_functions import logwatch
from ee.core.variables import EEVariables
from ee.core.fileutils import EEFileUtils
import os
import configparser
import glob
import signal
import subprocess
def ee_debug_hook(app):
# do something with the ``app`` object here.
pass
class EEDebugController(CementBaseController):
class Meta:
label = 'debug'
description = 'Used for server level debugging'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--stop'],
dict(help='Stop debug', action='store_true')),
(['--start'],
dict(help='Start debug', action='store_true')),
(['--import-slow-log'],
dict(help='Import MySQL slow log to Anemometer database',
action='store_true')),
(['--nginx'],
dict(help='start/stop debugging nginx server '
'configuration for site',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--php'],
dict(help='start/stop debugging server php configuration',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--fpm'],
dict(help='start/stop debugging fastcgi configuration',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--mysql'],
dict(help='start/stop debugging mysql server',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--wp'],
dict(help='start/stop wordpress debugging for site',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['--rewrite'],
dict(help='start/stop debugging nginx rewrite rules for site',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['--all'],
dict(help='start/stop debugging all server parameters',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['-i', '--interactive'],
dict(help='Interactive debug', action='store_true')),
(['--import-slow-log-interval'],
dict(help='Import MySQL slow log to Anemometer',
action='store', dest='interval')),
(['site_name'],
dict(help='Website Name', nargs='?', default=None))
]
usage = "ee debug [<site_name>] [options] "
@expose(hide=True)
def debug_nginx(self):
"""Start/Stop Nginx debug"""
# start global debug
if (self.app.pargs.nginx == 'on' and not self.app.pargs.site_name):
try:
debug_address = (self.app.config.get('stack', 'ip-address')
.split())
except Exception as e:
debug_address = ['0.0.0.0/0']
# Check if IP address is 127.0.0.1 then enable debug globally
if debug_address == ['127.0.0.1'] or debug_address == []:
debug_address = ['0.0.0.0/0']
for ip_addr in debug_address:
if not ("debug_connection "+ip_addr in open('/etc/nginx/'
'nginx.conf', encoding='utf-8').read()):
Log.info(self, "Setting up Nginx debug connection"
" for "+ip_addr)
EEShellExec.cmd_exec(self, "sed -i \"/events {{/a\\ \\ \\ "
"\\ $(echo debug_connection "
"{ip}\;)\" /etc/nginx/"
"nginx.conf".format(ip=ip_addr))
self.trigger_nginx = True
if not self.trigger_nginx:
Log.info(self, "Nginx debug connection already enabled")
self.msg = self.msg + ["/var/log/nginx/*.error.log"]
# stop global debug
elif (self.app.pargs.nginx == 'off' and not self.app.pargs.site_name):
if "debug_connection " in open('/etc/nginx/nginx.conf',
encoding='utf-8').read():
Log.info(self, "Disabling Nginx debug connections")
EEShellExec.cmd_exec(self, "sed -i \"/debug_connection.*/d\""
" /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug connection already disabled")
# start site specific debug
elif (self.app.pargs.nginx == 'on'and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if os.path.isfile(config_path):
if not EEShellExec.cmd_exec(self, "grep \"error.log debug\" "
"{0}".format(config_path)):
Log.info(self, "Starting NGINX debug connection for "
"{0}".format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"s/error.log;/"
"error.log "
"debug;/\" {0}".format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug for site already enabled")
self.msg = self.msg + ['{0}{1}/logs/error.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
else:
Log.info(self, "{0} domain not valid"
.format(self.app.pargs.site_name))
# stop site specific debug
elif (self.app.pargs.nginx == 'off' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if os.path.isfile(config_path):
if EEShellExec.cmd_exec(self, "grep \"error.log debug\" {0}"
.format(config_path)):
Log.info(self, "Stoping NGINX debug connection for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"s/error.log debug;/"
"error.log;/\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug for site already disabled")
else:
Log.info(self, "{0} domain not valid"
.format(self.app.pargs.site_name))
@expose(hide=True)
def debug_php(self):
"""Start/Stop PHP debug"""
# PHP global debug start
if (self.app.pargs.php == 'on' and not self.app.pargs.site_name):
if not (EEShellExec.cmd_exec(self, "sed -n \"/upstream php"
"{/,/}/p \" /etc/nginx/"
"conf.d/upstream.conf "
"| grep 9001")):
Log.info(self, "Enabling PHP debug")
# Check HHVM is installed if not instlled then dont not enable
# it in upstream config
if os.path.isfile("/etc/nginx/common/wpfc-hhvm.conf"):
hhvmconf=True
else:
hhvmconf=False
data = dict(php="9001", debug="9001", hhvm="9001",
hhvmconf=hhvmconf)
Log.debug(self, 'Writting the Nginx debug configration to file'
' /etc/nginx/conf.d/upstream.conf ')
ee_nginx = open('/etc/nginx/conf.d/upstream.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'upstream.mustache', out=ee_nginx)
ee_nginx.close()
# Enable xdebug
EEFileUtils.searchreplace(self, "/etc/php5/mods-available/"
"xdebug.ini",
";zend_extension",
"zend_extension")
# Fix slow log is not enabled default in PHP5.6
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/pool.d/debug.conf')
config['debug']['slowlog'] = '/var/log/php5/slow.log'
config['debug']['request_slowlog_timeout'] = '10s'
with open('/etc/php5/fpm/pool.d/debug.conf',
encoding='utf-8', mode='w') as confifile:
Log.debug(self, "Writting debug.conf configuration into "
"/etc/php5/fpm/pool.d/debug.conf")
config.write(confifile)
self.trigger_php = True
self.trigger_nginx = True
else:
Log.info(self, "PHP debug is already enabled")
self.msg = self.msg + ['/var/log/php5/slow.log']
# PHP global debug stop
elif (self.app.pargs.php == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, " sed -n \"/upstream php {/,/}/p\" "
"/etc/nginx/conf.d/upstream.conf "
"| grep 9001"):
Log.info(self, "Disabling PHP debug")
# Check HHVM is installed if not instlled then dont not enable
# it in upstream config
if os.path.isfile("/etc/nginx/common/wpfc-hhvm.conf"):
hhvmconf=True
else:
hhvmconf=False
data = dict(php="9000", debug="9001", hhvm="8000",
hhvmconf=hhvmconf)
Log.debug(self, 'Writting the Nginx debug configration to file'
' /etc/nginx/conf.d/upstream.conf ')
ee_nginx = open('/etc/nginx/conf.d/upstream.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'upstream.mustache', out=ee_nginx)
ee_nginx.close()
# Disable xdebug
EEFileUtils.searchreplace(self, "/etc/php5/mods-available/"
"xdebug.ini",
"zend_extension",
";zend_extension")
self.trigger_php = True
self.trigger_nginx = True
else:
Log.info(self, "PHP debug is already disabled")
@expose(hide=True)
def debug_fpm(self):
"""Start/Stop PHP5-FPM debug"""
# PHP5-FPM start global debug
if (self.app.pargs.fpm == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "grep \"log_level = debug\" "
"/etc/php5/fpm/php-fpm.conf"):
Log.info(self, "Setting up PHP5-FPM log_level = debug")
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/php-fpm.conf')
config.remove_option('global', 'include')
config['global']['log_level'] = 'debug'
config['global']['include'] = '/etc/php5/fpm/pool.d/*.conf'
with open('/etc/php5/fpm/php-fpm.conf',
encoding='utf-8', mode='w') as configfile:
Log.debug(self, "Writting php5-FPM configuration into "
"/etc/php5/fpm/php-fpm.conf")
config.write(configfile)
self.trigger_php = True
else:
Log.info(self, "PHP5-FPM log_level = debug already setup")
self.msg = self.msg + ['/var/log/php5/fpm.log']
# PHP5-FPM stop global debug
elif (self.app.pargs.fpm == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "grep \"log_level = debug\" "
"/etc/php5/fpm/php-fpm.conf"):
Log.info(self, "Disabling PHP5-FPM log_level = debug")
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/php-fpm.conf')
config.remove_option('global', 'include')
config['global']['log_level'] = 'notice'
config['global']['include'] = '/etc/php5/fpm/pool.d/*.conf'
with open('/etc/php5/fpm/php-fpm.conf',
encoding='utf-8', mode='w') as configfile:
Log.debug(self, "writting php5 configuration into "
"/etc/php5/fpm/php-fpm.conf")
config.write(configfile)
self.trigger_php = True
else:
Log.info(self, "PHP5-FPM log_level = debug already disabled")
@expose(hide=True)
def debug_mysql(self):
"""Start/Stop MySQL debug"""
# MySQL start global debug
if (self.app.pargs.mysql == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "mysql -e \"show variables like"
" \'slow_query_log\';\" | "
"grep ON"):
Log.info(self, "Setting up MySQL slow log")
EEMysql.execute(self, "set global slow_query_log = "
"\'ON\';")
EEMysql.execute(self, "set global slow_query_log_file = "
"\'/var/log/mysql/mysql-slow.log\';")
EEMysql.execute(self, "set global long_query_time = 2;")
EEMysql.execute(self, "set global log_queries_not_using"
"_indexes = \'ON\';")
else:
Log.info(self, "MySQL slow log is already enabled")
self.msg = self.msg + ['/var/log/mysql/mysql-slow.log']
# MySQL stop global debug
elif (self.app.pargs.mysql == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "mysql -e \"show variables like \'"
"slow_query_log\';\" | grep ON"):
Log.info(self, "Disabling MySQL slow log")
EEMysql.execute(self, "set global slow_query_log = \'OFF\';")
EEMysql.execute(self, "set global slow_query_log_file = \'"
"/var/log/mysql/mysql-slow.log\';")
EEMysql.execute(self, "set global long_query_time = 10;")
EEMysql.execute(self, "set global log_queries_not_using_index"
"es = \'OFF\';")
EEShellExec.cmd_exec(self, "crontab -l | sed \'/#EasyEngine "
"start/,/#EasyEngine end/d\' | crontab -")
else:
Log.info(self, "MySQL slow log already disabled")
@expose(hide=True)
def debug_wp(self):
"""Start/Stop WordPress debug"""
if (self.app.pargs.wp == 'on' and self.app.pargs.site_name):
wp_config = ("{0}/{1}/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
webroot = "{0}{1}".format(EEVariables.ee_webroot,
self.app.pargs.site_name)
# Check wp-config.php file into htdocs folder
if not os.path.isfile(wp_config):
wp_config = ("{0}/{1}/htdocs/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
if os.path.isfile(wp_config):
if not EEShellExec.cmd_exec(self, "grep \"\'WP_DEBUG\'\" {0} |"
" grep true".format(wp_config)):
Log.info(self, "Starting WordPress debug")
open("{0}/htdocs/wp-content/debug.log".format(webroot),
encoding='utf-8', mode='a').close()
EEShellExec.cmd_exec(self, "chown {1}: {0}/htdocs/wp-"
"content/debug.log"
"".format(webroot,
EEVariables.ee_php_user))
EEShellExec.cmd_exec(self, "sed -i \"s/define(\'WP_DEBUG\'"
".*/define(\'WP_DEBUG\', true);\\n"
"define(\'WP_DEBUG_DISPLAY\', false);"
"\\ndefine(\'WP_DEBUG_LOG\', true);"
"\\ndefine(\'SAVEQUERIES\', true);/\""
" {0}".format(wp_config))
EEShellExec.cmd_exec(self, "cd {0}/htdocs/ && wp"
" plugin --allow-root install "
"developer query-monitor"
.format(webroot))
EEShellExec.cmd_exec(self, "chown -R {1}: {0}/htdocs/"
"wp-content/plugins"
.format(webroot,
EEVariables.ee_php_user))
self.msg = self.msg + ['{0}{1}/htdocs/wp-content'
'/debug.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
else:
Log.info(self, "Unable to find wp-config.php for site: {0}"
.format(self.app.pargs.site_name))
elif (self.app.pargs.wp == 'off' and self.app.pargs.site_name):
wp_config = ("{0}{1}/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
webroot = "{0}{1}".format(EEVariables.ee_webroot,
self.app.pargs.site_name)
# Check wp-config.php file into htdocs folder
if not os.path.isfile(wp_config):
wp_config = ("{0}/{1}/htdocs/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
if os.path.isfile(wp_config):
if EEShellExec.cmd_exec(self, "grep \"\'WP_DEBUG\'\" {0} | "
"grep true".format(wp_config)):
Log.info(self, "Disabling WordPress debug")
EEShellExec.cmd_exec(self, "sed -i \"s/define(\'WP_DEBUG\'"
", true);/define(\'WP_DEBUG\', "
"false);/\" {0}".format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"WP_DEBUG_DISPLAY\', false);/d\" {0}"
.format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"WP_DEBUG_LOG\', true);/d\" {0}"
.format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"SAVEQUERIES\', "
"true);/d\" {0}".format(wp_config))
else:
Log.info(self, "WordPress debug all already disabled")
else:
Log.error(self, "Missing argument site name")
@expose(hide=True)
def debug_rewrite(self):
"""Start/Stop Nginx rewrite rules debug"""
# Start Nginx rewrite debug globally
if (self.app.pargs.rewrite == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" "
"/etc/nginx/nginx.conf"):
Log.info(self, "Setting up Nginx rewrite logs")
EEShellExec.cmd_exec(self, "sed -i \'/http {/a \\\\t"
"rewrite_log on;\' /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs already enabled")
if '/var/log/nginx/*.error.log' not in self.msg:
self.msg = self.msg + ['/var/log/nginx/*.error.log']
# Stop Nginx rewrite debug globally
elif (self.app.pargs.rewrite == 'off'
and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" "
"/etc/nginx/nginx.conf"):
Log.info(self, "Disabling Nginx rewrite logs")
EEShellExec.cmd_exec(self, "sed -i \"/rewrite_log.*/d\""
" /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs already disabled")
# Start Nginx rewrite for site
elif (self.app.pargs.rewrite == 'on' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if not EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" {0}"
.format(config_path)):
Log.info(self, "Setting up Nginx rewrite logs for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"/access_log/i \\\\\\t"
"rewrite_log on;\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs for {0} already setup"
.format(self.app.pargs.site_name))
if ('{0}{1}/logs/error.log'.format(EEVariables.ee_webroot,
self.app.pargs.site_name)
not in self.msg):
self.msg = self.msg + ['{0}{1}/logs/error.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
# Stop Nginx rewrite for site
elif (self.app.pargs.rewrite == 'off' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" {0}"
.format(config_path)):
Log.info(self, "Disabling Nginx rewrite logs for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"/rewrite_log.*/d\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs for {0} already "
" disabled".format(self.app.pargs.site_name))
@expose(hide=True)
def signal_handler(self, signal, frame):
"""Handle Ctrl+c hevent for -i option of debug"""
self.start = False
if self.app.pargs.nginx:
self.app.pargs.nginx = 'off'
self.debug_nginx()
if self.app.pargs.php:
self.app.pargs.php = 'off'
self.debug_php()
if self.app.pargs.fpm:
self.app.pargs.fpm = 'off'
self.debug_fpm()
if self.app.pargs.mysql:
# MySQL debug will not work for remote MySQL
if EEVariables.ee_mysql_host is "localhost":
self.app.pargs.mysql = 'off'
self.debug_mysql()
else:
Log.warn(self, "Remote MySQL found, EasyEngine will not "
"enable remote debug")
if self.app.pargs.wp:
self.app.pargs.wp = 'off'
self.debug_wp()
if self.app.pargs.rewrite:
self.app.pargs.rewrite = 'off'
self.debug_rewrite()
# Reload Nginx
if self.trigger_nginx:
EEService.reload_service(self, 'nginx')
# Reload PHP
if self.trigger_php:
EEService.reload_service(self, 'php5-fpm')
self.app.close(0)
@expose(hide=True)
def default(self):
"""Default function of debug"""
# self.start = True
self.interactive = False
self.msg = []
self.trigger_nginx = False
self.trigger_php = False
if ((not self.app.pargs.nginx) and (not self.app.pargs.php)
and (not self.app.pargs.fpm) and (not self.app.pargs.mysql)
and (not self.app.pargs.wp) and (not self.app.pargs.rewrite)
and (not self.app.pargs.all)
and (not self.app.pargs.site_name)
and (not self.app.pargs.import_slow_log)
and (not self.app.pargs.interval)):
if self.app.pargs.stop or self.app.pargs.start:
print("--start/stop option is deprecated since ee3.0.5")
self.app.args.print_help()
else:
self.app.args.print_help()
if self.app.pargs.import_slow_log:
self.import_slow_log()
if self.app.pargs.interval:
try:
cron_time = int(self.app.pargs.interval)
except Exception as e:
cron_time = 5
try:
if not EEShellExec.cmd_exec(self, "crontab -l | grep "
"'ee debug --import-slow-log'"):
if not cron_time == 0:
Log.info(self, "setting up crontab entry,"
" please wait...")
EEShellExec.cmd_exec(self, "/bin/bash -c \"crontab -l "
"2> /dev/null | {{ cat; echo -e"
" \\\"#EasyEngine start MySQL "
"slow log \\n*/{0} * * * * "
"/usr/local/bin/ee debug"
" --import-slow-log\\n"
"#EasyEngine end MySQL slow log"
"\\\"; }} | crontab -\""
.format(cron_time))
else:
if not cron_time == 0:
Log.info(self, "updating crontab entry,"
" please wait...")
if not EEShellExec.cmd_exec(self, "/bin/bash -c "
"\"crontab "
"-l | sed '/EasyEngine "
"start MySQL slow "
"log/!b;n;c\*\/{0} "
"\* \* \* "
"\* \/usr"
"\/local\/bin\/ee debug "
"--import\-slow\-log' "
"| crontab -\""
.format(cron_time)):
Log.error(self, "failed to update crontab entry")
else:
Log.info(self, "removing crontab entry,"
" please wait...")
if not EEShellExec.cmd_exec(self, "/bin/bash -c "
"\"crontab "
"-l | sed '/EasyEngine "
"start MySQL slow "
"log/,+2d'"
"| crontab -\""
.format(cron_time)):
Log.error(self, "failed to remove crontab entry")
except CommandExecutionError as e:
Log.debug(self, str(e))
if self.app.pargs.all == 'on':
if self.app.pargs.site_name:
self.app.pargs.wp = 'on'
self.app.pargs.nginx = 'on'
self.app.pargs.php = 'on'
self.app.pargs.fpm = 'on'
self.app.pargs.mysql = 'on'
self.app.pargs.rewrite = 'on'
if self.app.pargs.all == 'off':
if self.app.pargs.site_name:
self.app.pargs.wp = 'off'
self.app.pargs.nginx = 'off'
self.app.pargs.php = 'off'
self.app.pargs.fpm = 'off'
self.app.pargs.mysql = 'off'
self.app.pargs.rewrite = 'off'
if ((not self.app.pargs.nginx) and (not self.app.pargs.php)
and (not self.app.pargs.fpm) and (not self.app.pargs.mysql)
and (not self.app.pargs.wp) and (not self.app.pargs.rewrite)
and self.app.pargs.site_name):
self.app.args.print_help()
# self.app.pargs.nginx = 'on'
# self.app.pargs.wp = 'on'
# self.app.pargs.rewrite = 'on'
if self.app.pargs.nginx:
self.debug_nginx()
if self.app.pargs.php:
self.debug_php()
if self.app.pargs.fpm:
self.debug_fpm()
if self.app.pargs.mysql:
# MySQL debug will not work for remote MySQL
if EEVariables.ee_mysql_host is "localhost":
self.debug_mysql()
else:
Log.warn(self, "Remote MySQL found, EasyEngine will not "
"enable remote debug")
if self.app.pargs.wp:
self.debug_wp()
if self.app.pargs.rewrite:
self.debug_rewrite()
if self.app.pargs.interactive:
self.interactive = True
# Reload Nginx
if self.trigger_nginx:
EEService.reload_service(self, 'nginx')
# Reload PHP
if self.trigger_php:
EEService.restart_service(self, 'php5-fpm')
if len(self.msg) > 0:
if not self.app.pargs.interactive:
disp_msg = ' '.join(self.msg)
Log.info(self, "Use following command to check debug logs:\n"
+ Log.ENDC + "tail -f {0}".format(disp_msg))
else:
signal.signal(signal.SIGINT, self.signal_handler)
watch_list = []
for w_list in self.msg:
watch_list = watch_list + glob.glob(w_list)
logwatch(self, watch_list)
@expose(hide=True)
def import_slow_log(self):
"""Default function for import slow log"""
if os.path.isdir("{0}22222/htdocs/db/anemometer"
.format(EEVariables.ee_webroot)):
if os.path.isfile("/var/log/mysql/mysql-slow.log"):
# Get Anemometer user name and password
Log.info(self, "Importing MySQL slow log to Anemometer")
host = os.popen("grep -e \"\'host\'\" {0}22222/htdocs/"
.format(EEVariables.ee_webroot)
+ "db/anemometer/conf/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
user = os.popen("grep -e \"\'user\'\" {0}22222/htdocs/"
.format(EEVariables.ee_webroot)
+ "db/anemometer/conf/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
password = os.popen("grep -e \"\'password\'\" {0}22222/"
.format(EEVariables.ee_webroot)
+ "htdocs/db/anemometer/conf"
"/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
# Import slow log Anemometer using pt-query-digest
try:
EEShellExec.cmd_exec(self, "pt-query-digest --user={0} "
"--password={1} "
"--review D=slow_query_log,"
"t=global_query_review "
"--history D=slow_query_log,t="
"global_query_review_history "
"--no-report --limit=0% "
"--filter=\" \\$event->{{Bytes}} = "
"length(\\$event->{{arg}}) "
"and \\$event->{{hostname}}=\\\""
"{2}\\\"\" "
"/var/log/mysql/mysql-slow.log"
.format(user, password, host))
except CommandExecutionError as e:
Log.debug(self, str(e))
Log.error(self, "MySQL slow log import failed.")
else:
Log.error(self, "MySQL slow log file not found,"
" so not imported slow logs")
else:
Log.error(self, "Anemometer is not installed." +
Log.ENDC + "\nYou can install Anemometer with "
"this command "
+ Log.BOLD + "\n `ee stack install --utils`"
+ Log.ENDC)
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(EEDebugController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', ee_debug_hook)
| |
# VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 28 21:10:10 2014 by generateDS.py version 2.12e.
#
# Command line options:
# ('-o', 'schema/vcim/sessionType.py')
#
# Command line arguments:
# schema/vcim/session.xsd
#
# Command line:
# ./generateDS-2.12e/generateDS.py -o "schema/vcim/sessionType.py" schema/vcim/session.xsd
#
# Current working directory (os.getcwd()):
# vchs-api-cli-cli
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class EntityType(GeneratedsSuper):
"""5.6 The base type for all objects in the VCHS model. Has an optional
list of links and href and type attributes. always The URI of
the entity. falsealways The MIME type of the entity. falsealways
The name type of the entity. false"""
subclass = None
superclass = None
def __init__(self, href=None, type_=None, name=None, Link=None, extensiontype_=None):
self.original_tagname_ = None
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.name = _cast(None, name)
if Link is None:
self.Link = []
else:
self.Link = Link
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityType.subclass:
return EntityType.subclass(*args_, **kwargs_)
else:
return EntityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Link(self): return self.Link
def set_Link(self, Link): self.Link = Link
def add_Link(self, value): self.Link.append(value)
def insert_Link_at(self, index, value): self.Link.insert(index, value)
def replace_Link_at(self, index, value): self.Link[index] = value
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Link
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Link_ in self.Link:
Link_.export(outfile, level, namespace_, name_='Link', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Link=[\n')
level += 1
for Link_ in self.Link:
showIndent(outfile, level)
outfile.write('model_.LinkType(\n')
Link_.exportLiteral(outfile, level, name_='LinkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Link':
obj_ = LinkType.factory()
obj_.build(child_)
self.Link.append(obj_)
obj_.original_tagname_ = 'Link'
# end class EntityType
class ReferenceType(GeneratedsSuper):
"""5.6 A reference to a entity. Contains an href attribute and optional
name and type attributes. always Contains the URI to the entity.
truealways The resource identifier, expressed in URN format. The
value of this attribute uniquely identifies the resource,
persists for the life of the entity, and is never reused.
falsealways Contains the type of the the entity. falsealways
Contains the name of the the entity. false"""
subclass = None
superclass = None
def __init__(self, href=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferenceType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReferenceType
class LinkType(ReferenceType):
"""5.6 Extends reference type by adding relation attribute. Defines a
hyper-link with a relationship, hyper-link reference, and an
optional MIME type. always Defines the relationship of the link
to the object that contains it. A relationship can be the name
of an operation on the object, a reference to a contained or
containing object, or a reference to an alternate representation
of the object. The relationship value implies the HTTP verb to
use when you use the link's href as a request URL. See the VCHS
API Programming Guide for a list of links and link relations.
true"""
subclass = None
superclass = ReferenceType
def __init__(self, href=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(LinkType, self).__init__(href, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if LinkType.subclass:
return LinkType.subclass(*args_, **kwargs_)
else:
return LinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(LinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LinkType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinkType'):
super(LinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='LinkType', fromsubclass_=False, pretty_print=True):
super(LinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='LinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(LinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LinkType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(LinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LinkType
class SessionType(EntityType):
"""5.6 Represents a VCHS session."""
subclass = None
superclass = EntityType
def __init__(self, href=None, type_=None, name=None, Link=None):
self.original_tagname_ = None
super(SessionType, self).__init__(href, type_, name, Link, )
def factory(*args_, **kwargs_):
if SessionType.subclass:
return SessionType.subclass(*args_, **kwargs_)
else:
return SessionType(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(SessionType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SessionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SessionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='SessionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SessionType'):
super(SessionType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='SessionType')
def exportChildren(self, outfile, level, namespace_='', name_='SessionType', fromsubclass_=False, pretty_print=True):
super(SessionType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SessionType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(SessionType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(SessionType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(SessionType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(SessionType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class SessionType
GDSClassesMapping = {
'Session': SessionType,
'Link': LinkType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SessionType'
rootClass = SessionType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SessionType'
rootClass = SessionType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SessionType'
rootClass = SessionType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'SessionType'
rootClass = SessionType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from sessionType import *\n\n')
sys.stdout.write('import sessionType as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"EntityType",
"LinkType",
"ReferenceType",
"SessionType"
]
| |
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['parilis.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assests
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='parilis <noreply@parilis.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[parilis] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Your production stuff: Below this line define 3rd party library settings
| |
"""Contains the base class for flippers."""
from typing import List
from typing import Optional
from mpf.core.events import event_handler
from mpf.core.platform_controller import HardwareRule, EosRuleSettings
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.platform_controller import SwitchRuleSettings, DriverRuleSettings, PulseRuleSettings, HoldRuleSettings
from mpf.core.system_wide_device import SystemWideDevice
@DeviceMonitor(_enabled="enabled")
class Flipper(SystemWideDevice):
"""Represents a flipper in a pinball machine. Subclass of Device.
Contains several methods for actions that can be performed on this flipper,
like :meth:`enable`, :meth:`disable`, etc.
Flippers have several options, including player buttons, EOS swtiches,
multiple coil options (pulsing, hold coils, etc.)
Args:
----
machine: A reference to the machine controller instance.
name: A string of the name you'll refer to this flipper object as.
"""
__slots__ = ["_enabled", "_active_rules", "_sw_flipped"]
config_section = 'flippers'
collection = 'flippers'
class_label = 'flipper'
def __init__(self, machine, name):
"""Initialise flipper."""
super().__init__(machine, name)
self._enabled = False
self._active_rules = [] # type: List[HardwareRule]
self._sw_flipped = False
async def _initialize(self):
await super()._initialize()
if self.config['include_in_ball_search']:
self.config['playfield'].ball_search.register(
self.config['ball_search_order'], self._ball_search, self.name)
@event_handler(1)
def event_enable(self, **kwargs):
"""Handle enable control event.
To prevent multiple rules at the same time we prioritize disable > enable.
"""
del kwargs
self.enable()
def enable(self):
"""Enable the flipper by writing the necessary hardware rules to the hardware controller.
The hardware rules for coils can be kind of complex given all the
options, so we've mapped all the options out here. We literally have
methods to enable the various rules based on the rule letters here,
which we've implemented below. Keeps it easy to understand. :)
Note there's a platform feature saved at:
self.machine.config['platform']['hw_enable_auto_disable']. If True, it
means that the platform hardware rules will automatically disable a coil
that has been enabled when the trigger switch is disabled. If False, it
means the hardware platform needs its own rule to disable the coil when
the switch is disabled. Methods F and G below check for that feature
setting and will not be applied to the hardware if it's True.
Two coils, using EOS switch to indicate the end of the power stroke:
Rule Type Coil Switch Action
A. Enable Main Button active
D. Enable Hold Button active
E. Disable Main EOS active
One coil, using EOS switch:
Rule Type Coil Switch Action
A. Enable Main Button active
H. PWM Main EOS active
Two coils, not using EOS switch:
Rule Type Coil Switch Action
B. Pulse Main Button active
D. Enable Hold Button active
One coil, not using EOS switch:
Rule Type Coil Switch Action
C. Pulse/PWM Main button active
Use EOS switch for safety (for platforms that support multiple switch
rules). Note that this rule is the letter "i", not a numeral 1.
I. Enable power if button is active and EOS is not active
"""
# prevent duplicate enable
if self._enabled:
return
self._enabled = True
self.debug_log('Enabling flipper with config: %s', self.config)
# Apply the proper hardware rules for our config
if self.config['activation_switch']:
# only add rules if we are using a switch
if self.config['use_eos']:
self._enable_main_coil_eos_cutoff_rule()
elif self.config['hold_coil']:
self._enable_main_coil_pulse_rule()
else:
self._enable_single_coil_rule()
if self.config['hold_coil']:
self._enable_hold_coil_rule()
@event_handler(10)
def event_disable(self, **kwargs):
"""Handle disable control event.
To prevent multiple rules at the same time we prioritize disable > enable.
"""
del kwargs
self.disable()
def disable(self):
"""Disable the flipper.
This method makes it so the cabinet flipper buttons no longer control
the flippers. Used when no game is active and when the player has
tilted.
"""
if not self._enabled:
return
self.debug_log("Disabling")
for rule in self._active_rules:
# disable all rules
self.machine.platform_controller.clear_hw_rule(rule)
if self._sw_flipped:
# disable the coils if activated via sw_flip
self.sw_release()
self._active_rules = []
self._enabled = False
def _get_pulse_ms(self) -> Optional[int]:
"""Return pulse_ms."""
pulse_ms = self.config['main_coil_overwrite'].get("pulse_ms", None)
if self.config['power_setting_name']:
settings_factor = self.machine.settings.get_setting_value(self.config['power_setting_name'])
if not pulse_ms:
pulse_ms = self.machine.config['mpf']['default_pulse_ms']
return int(pulse_ms * settings_factor)
return pulse_ms
def _get_hold_pulse_ms(self) -> Optional[int]:
"""Return pulse_ms for hold coil."""
pulse_ms = self.config['hold_coil_overwrite'].get("pulse_ms", None)
if self.config['power_setting_name']:
settings_factor = self.machine.settings.get_setting_value(self.config['power_setting_name'])
if not pulse_ms:
pulse_ms = self.machine.config['mpf']['default_pulse_ms']
return int(pulse_ms * settings_factor)
return pulse_ms
def _get_pulse_power(self) -> Optional[float]:
"""Return pulse_power."""
pulse_power = self.config['main_coil_overwrite'].get("pulse_power", None)
return pulse_power
def _get_hold_pulse_power(self) -> Optional[float]:
"""Return pulse_power for hold coil."""
pulse_power = self.config['hold_coil_overwrite'].get("pulse_power", None)
return pulse_power
def _get_hold_power(self) -> Optional[float]:
"""Return hold_power."""
hold_power = self.config['main_coil_overwrite'].get("hold_power", None)
return hold_power
def _enable_single_coil_rule(self):
self.debug_log('Enabling single coil rule')
rule = self.machine.platform_controller.set_pulse_on_hit_and_enable_and_release_rule(
SwitchRuleSettings(switch=self.config['activation_switch'], debounce=False, invert=False),
DriverRuleSettings(driver=self.config['main_coil'], recycle=False),
PulseRuleSettings(duration=self._get_pulse_ms(), power=self._get_pulse_power()),
HoldRuleSettings(power=self._get_hold_power())
)
self._active_rules.append(rule)
def _enable_main_coil_pulse_rule(self):
self.debug_log('Enabling main coil pulse rule')
rule = self.machine.platform_controller.set_pulse_on_hit_and_release_rule(
SwitchRuleSettings(switch=self.config['activation_switch'], debounce=False, invert=False),
DriverRuleSettings(driver=self.config['main_coil'], recycle=False),
PulseRuleSettings(duration=self._get_pulse_ms(), power=self._get_pulse_power())
)
self._active_rules.append(rule)
def _enable_hold_coil_rule(self):
self.debug_log('Enabling hold coil rule')
rule = self.machine.platform_controller.set_pulse_on_hit_and_enable_and_release_rule(
SwitchRuleSettings(switch=self.config['activation_switch'], debounce=False, invert=False),
DriverRuleSettings(driver=self.config['hold_coil'], recycle=False),
PulseRuleSettings(duration=self._get_hold_pulse_ms(), power=self._get_hold_pulse_power()),
HoldRuleSettings(power=self._get_hold_power())
)
self._active_rules.append(rule)
def _enable_main_coil_eos_cutoff_rule(self):
if self.config['hold_coil']:
self.debug_log('Enabling main coil EOS cutoff rule w/o hold')
rule = self.machine.platform_controller.set_pulse_on_hit_and_release_and_disable_rule(
SwitchRuleSettings(switch=self.config['activation_switch'], debounce=False, invert=False),
SwitchRuleSettings(switch=self.config['eos_switch'], debounce=False, invert=False),
DriverRuleSettings(driver=self.config['main_coil'], recycle=False),
PulseRuleSettings(duration=self._get_hold_pulse_ms(), power=self._get_hold_pulse_power()),
EosRuleSettings(enable_repulse=self.config["repulse_on_eos_open"],
debounce_ms=self.config["eos_active_ms_before_repulse"])
)
self._active_rules.append(rule)
else:
self.debug_log('Enabling main coil EOS cutoff rule w/ hold')
rule = self.machine.platform_controller.set_pulse_on_hit_and_enable_and_release_and_disable_rule(
SwitchRuleSettings(switch=self.config['activation_switch'], debounce=False, invert=False),
SwitchRuleSettings(switch=self.config['eos_switch'], debounce=False, invert=False),
DriverRuleSettings(driver=self.config['main_coil'], recycle=False),
PulseRuleSettings(duration=self._get_hold_pulse_ms(), power=self._get_hold_pulse_power()),
HoldRuleSettings(power=self._get_hold_power()),
EosRuleSettings(enable_repulse=self.config["repulse_on_eos_open"],
debounce_ms=self.config["eos_active_ms_before_repulse"])
)
self._active_rules.append(rule)
@event_handler(6)
def event_sw_flip(self, **kwargs):
"""Handle sw_flip control event."""
del kwargs
self.sw_flip()
def sw_flip(self):
"""Activate the flipper via software as if the flipper button was pushed.
This is needed because the real flipper activations are handled in
hardware, so if you want to flip the flippers with the keyboard or OSC
interfaces, you have to call this method.
Note this method will keep this flipper enabled until you call
sw_release().
"""
if not self._enabled:
return
self._sw_flipped = True
if self.config['hold_coil']:
self.config['main_coil'].pulse()
self.config['hold_coil'].enable()
else:
self.config['main_coil'].enable()
@event_handler(5)
def event_sw_release(self, **kwargs):
"""Handle sw_release control event."""
del kwargs
self.sw_release()
def sw_release(self):
"""Deactivate the flipper via software as if the flipper button was released.
See the documentation for sw_flip() for details.
"""
self._sw_flipped = False
# disable the flipper coil(s)
self.config['main_coil'].disable()
if self.config['hold_coil']:
self.config['hold_coil'].disable()
def _ball_search(self, phase, iteration):
del phase
del iteration
self.sw_flip()
self.machine.delay.add(self.config['ball_search_hold_time'],
self.sw_release,
'flipper_{}_ball_search'.format(self.name))
return True
| |
import cv2
import cv2.cv as cv
import numpy as np
import signal, os, subprocess, sys
import time
import threading
import requests
import io
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
#GPIO.setmode(GPIO.BCM)
from fractions import Fraction
#
#GPIO.setup(18, GPIO.OUT)
"""
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
'''namedWindow("cam-test",cv2.CV_WINDOW_AUTOSIZE)
imshow("cam-test",img)
waitKey(0)
destroyWindow("cam-test")'''
imwrite("filename.jpg",img) #save image
"""
camera = PiCamera()
camera.framerate = 32
#camera.framerate = Fraction(1,6)
raw_capture = PiRGBArray(camera)
output = PiRGBArray(camera)
time.sleep(0.1)
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(5,4), Fraction(4,3))
camera.shutter_speed = 32000
"""
#g = camera.awb_gains
g = (Fraction(1, 1), Fraction(1,1))
print g
camera.exposure_mode = 'off'
camera.shutter_speed = 500000
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture(output, format="bgr")
img = output.array
b,g,r = cv2.split(img)
cv2.imshow('frame',g)
key = cv2.waitKey(0) & 0xFF
"""
#redLower = np.array((0, 127, 58))
redLower = np.array((0,200, 200))
redUpper = np.array((330, 255,255))
#camera.awb_gains = (Fraction(2), Fraction(2))
for video_frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
frame = video_frame.array
#to detect red LED
#cv2.imshow('asdf', frame)
#cv2.waitKey(0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
#cv2.imshow('s', s)
#cv2.waitKey(0)
#construct mask, dialation, and erosion to reduce noise
#mask1 = cv2.in
mask = cv2.inRange(hsv, redLower, redUpper)
mask = cv2.dilate(mask, None, iterations = 4)
mask = cv2.erode(mask, None, iterations = 2)
#cv2.imshow('mask', mask)
#cv2.waitKey(0)
#find contours in mask, initialize current center
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = contours[-2]
center = None
b,g,r = cv2.split(frame)
b = cv2.bitwise_and(b, mask)
g = cv2.bitwise_and(g, mask)
r = cv2.bitwise_and(r, mask)
frame = cv2.merge((b,g,r))
averagemask = cv2.mean(frame, mask= mask)
#cv2.imshow('frame', new_frame)
if len (cnts) > 0:
#find largest contour, use it to compute min enclosed cirlce
#and centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#proceed if radius is min size --NEED TO FIGURE OUT
if radius > 1:
#draw the circle and centroid on the frame,
#then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
#update the points queue
cv2.imshow('frame', frame)
#cv2.imshow('masked', masked_frame)
cv2.waitKey(5)
#loop over the set of tracked points
#cap = cv2.VideoCapture(0)
#pwm = GPIO.PWM(18, 50)
#pwm.start(8)
#pwm.on()
#while(True):
# Capture frame-by-frame
#ret, frame = cap.read()
#print camera.awb_gains
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = frame
integral_table = cv2.integral(frame)
image_y = int(frame.shape[0])
image_x = int(frame.shape[1])
# print image_x
# print (integral_table[image_y][image_x] + integral_table[0][0] - integral_table[0][image_x] - integral_table[image_y][0])
#avg_value = integral_table[image_y][image_x][0] / (image_y*image_x)
#upper right quadrant
avg_value_1 = (integral_table[0][int(image_x/2)][0] + integral_table[int(image_y/2)][image_x][0] - integral_table[int(image_y/2)][int(image_x/2)][0] - integral_table[0][image_x][0]) / (image_y*image_x / 4.0)
avg_value_2 = (integral_table[image_y/2][int(image_x/2)][0] + integral_table[0][0][0] - integral_table[int(image_y/2)][0][0] - integral_table[0][image_x/2][0]) / (image_y*image_x / 4.0)
avg_value_3 = (integral_table[int(image_y)][int(image_x/2)][0] + integral_table[int(image_y/2)][0][0] - integral_table[int(image_y/2)][int(image_x/2)][0] - integral_table[image_y][0][0]) / (image_y*image_x / 4.0)
avg_value_4 = (integral_table[image_y][int(image_x)][0] + integral_table[int(image_y/2)][int(image_x/2)][0] - integral_table[int(image_y)][int(image_x/2)][0] - integral_table[int(image_y/2)][image_x][0]) / (image_y*image_x / 4.0)
quadrant_intensity = [(avg_value_1, 1), (avg_value_2, 2), (avg_value_3, 3), (avg_value_4, 4)]
quadrant_intensity.sort(key = lambda x:int(x[0]), reverse=True)
#print quadrant_intensity
#print (avg_value_1)
quadrant_no = quadrant_intensity[0][1]
#print 'Quadrant ' + str(quadrant_no) + ' is the most intense'
#print quadrant_intensity[quadrant_no-1][0] * 100/255
#pwm.ChangeDutyCycle(int(avg_value_1 * 100/255))
quadrant_center =(int(image_x/4) + int(image_x/2 * (quadrant_no == 1 or quadrant_no == 4) ) ,int(image_y/4) + int(image_y/2 * (quadrant_no > 2)) )
#print 'Quadrant center is at ' + str(quadrant_center)
cgray = cv2.medianBlur(gray, 5)
#cv2.circle(cgray, quadrant_center, 10, (255,255,255), -1)
cv2.circle(frame, quadrant_center, 10, (255,255,255), -1)
#cv2.imshow('frame',frame)
#encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
#ret = cv2.imencode('.jpg', cgray, buf)#, encode_param)
cv2.imwrite("temp.jpg", frame)
with open("temp.jpg", "rb") as content:
#jpeg_im = content.read()
files = {'media': content}
#brightness = {"brightness": quadrant_intensity[quadrant_no-1][0] * 100/255}
averagemask = {"brightness": averagemask}
#url = 'http://10.42.0.1:5000/upload_data'
#url2 = 'http://10.42.0.1:5000/brightness'
#url = 'http://citronnade.mooo.com/upload'
#url2 = 'http://citronnade.mooo.com/brightness'
#requests.post(url, files=files)
#requests.post(url2, data=averagemask)
#key = cv2.waitKey(30) & 0xFF
time.sleep(0.02)
# clear the stream in preparation for the next frame
raw_capture.truncate(0)
#if the `q` key was pressed, break from the loop
#if key == ord("q"):
# break
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# When everything done, release the capture
#cap.release()
cv2.destroyAllWindows()
#pwm.stop()
GPIO.cleanup()
#f.close()
| |
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Bench results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure()
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('time [s]')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| |
"""
kombu.pidbox
===============
Generic process mailbox.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
from copy import copy
from itertools import count
from .entity import Exchange, Queue
from .messaging import Consumer, Producer
from .utils import kwdict, uuid
__all__ = ["Node", "Mailbox"]
class Node(object):
#: hostname of the node.
hostname = None
#: the :class:`Mailbox` this is a node for.
mailbox = None
#: map of method name/handlers.
handlers = None
#: current context (passed on to handlers)
state = None
#: current channel.
channel = None
def __init__(self, hostname, state=None, channel=None, handlers=None,
mailbox=None):
self.channel = channel
self.mailbox = mailbox
self.hostname = hostname
self.state = state
if handlers is None:
handlers = {}
self.handlers = handlers
def Consumer(self, channel=None, **options):
options.setdefault("no_ack", True)
return Consumer(channel or self.channel,
[self.mailbox.get_queue(self.hostname)],
**options)
def handler(self, fun):
self.handlers[fun.__name__] = fun
return fun
def listen(self, channel=None, callback=None):
callback = callback or self.handle_message
consumer = self.Consumer(channel=channel,
callbacks=[callback or self.handle_message])
consumer.consume()
return consumer
def dispatch_from_message(self, message):
message = dict(message)
method = message["method"]
destination = message.get("destination")
reply_to = message.get("reply_to")
arguments = message.get("arguments")
if not destination or self.hostname in destination:
return self.dispatch(method, arguments, reply_to)
def dispatch(self, method, arguments=None, reply_to=None):
arguments = arguments or {}
handle = reply_to and self.handle_call or self.handle_cast
try:
reply = handle(method, kwdict(arguments))
except SystemExit:
raise
except Exception, exc:
reply = {"error": repr(exc)}
if reply_to:
self.reply({self.hostname: reply},
exchange=reply_to["exchange"],
routing_key=reply_to["routing_key"])
return reply
def handle(self, method, arguments={}):
return self.handlers[method](self.state, **arguments)
def handle_call(self, method, arguments):
return self.handle(method, arguments)
def handle_cast(self, method, arguments):
return self.handle(method, arguments)
def handle_message(self, body, message):
return self.dispatch_from_message(body)
def reply(self, data, exchange, routing_key, **kwargs):
self.mailbox._publish_reply(data, exchange, routing_key,
channel=self.channel)
class Mailbox(object):
node_cls = Node
exchange_fmt = "%s.pidbox"
reply_exchange_fmt = "reply.%s.pidbox"
#: Name of application.
namespace = None
#: Connection (if bound).
connection = None
#: Exchange type (usually direct, or fanout for broadcast).
type = "direct"
#: mailbox exchange (init by constructor).
exchange = None
#: exchange to send replies to.
reply_exchange = None
def __init__(self, namespace, type="direct", connection=None):
self.namespace = namespace
self.connection = connection
self.type = type
self.exchange = self._get_exchange(self.namespace, self.type)
self.reply_exchange = self._get_reply_exchange(self.namespace)
def __call__(self, connection):
bound = copy(self)
bound.connection = connection
return bound
def Node(self, hostname=None, state=None, channel=None, handlers=None):
hostname = hostname or socket.gethostname()
return self.node_cls(hostname, state, channel, handlers, mailbox=self)
def call(self, destination, command, kwargs={}, timeout=None,
callback=None, channel=None):
return self._broadcast(command, kwargs, destination,
reply=True, timeout=timeout,
callback=callback,
channel=channel)
def cast(self, destination, command, kwargs={}):
return self._broadcast(command, kwargs, destination, reply=False)
def abcast(self, command, kwargs={}):
return self._broadcast(command, kwargs, reply=False)
def multi_call(self, command, kwargs={}, timeout=1,
limit=None, callback=None, channel=None):
return self._broadcast(command, kwargs, reply=True,
timeout=timeout, limit=limit,
callback=callback,
channel=channel)
def get_reply_queue(self, ticket):
return Queue("%s.%s" % (ticket, self.reply_exchange.name),
exchange=self.reply_exchange,
routing_key=ticket,
durable=False,
auto_delete=True)
def get_queue(self, hostname):
return Queue("%s.%s.pidbox" % (hostname, self.namespace),
exchange=self.exchange,
durable=False,
auto_delete=True)
def _publish_reply(self, reply, exchange, routing_key, channel=None):
chan = channel or self.connection.channel()
try:
exchange = Exchange(exchange, exchange_type="direct",
delivery_mode="transient",
durable=False,
auto_delete=True)
producer = Producer(chan, exchange=exchange,
auto_declare=True)
producer.publish(reply, routing_key=routing_key)
finally:
channel or chan.close()
def _publish(self, type, arguments, destination=None, reply_ticket=None,
channel=None):
message = {"method": type,
"arguments": arguments,
"destination": destination}
if reply_ticket:
message["reply_to"] = {"exchange": self.reply_exchange.name,
"routing_key": reply_ticket}
chan = channel or self.connection.channel()
producer = Producer(chan, exchange=self.exchange)
try:
producer.publish(message)
finally:
channel or chan.close()
def _broadcast(self, command, arguments=None, destination=None,
reply=False, timeout=1, limit=None, callback=None, channel=None):
arguments = arguments or {}
reply_ticket = reply and uuid() or None
if destination is not None and \
not isinstance(destination, (list, tuple)):
raise ValueError("destination must be a list/tuple not %s" % (
type(destination)))
# Set reply limit to number of destinations (if specified)
if limit is None and destination:
limit = destination and len(destination) or None
chan = channel or self.connection.channel()
try:
if reply_ticket:
self.get_reply_queue(reply_ticket)(chan).declare()
self._publish(command, arguments, destination=destination,
reply_ticket=reply_ticket,
channel=chan)
if reply_ticket:
return self._collect(reply_ticket, limit=limit,
timeout=timeout,
callback=callback,
channel=chan)
finally:
channel or chan.close()
def _collect(self, ticket, limit=None, timeout=1,
callback=None, channel=None):
chan = channel or self.connection.channel()
queue = self.get_reply_queue(ticket)
consumer = Consumer(channel, [queue], no_ack=True)
responses = []
def on_message(body, message):
if callback:
callback(body)
responses.append(body)
try:
consumer.register_callback(on_message)
consumer.consume()
for i in limit and range(limit) or count():
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
break
chan.after_reply_message_received(queue.name)
return responses
finally:
channel or chan.close()
def _get_exchange(self, namespace, type):
return Exchange(self.exchange_fmt % namespace,
type=type,
durable=False,
auto_delete=True,
delivery_mode="transient")
def _get_reply_exchange(self, namespace):
return Exchange(self.reply_exchange_fmt % namespace,
type="direct",
durable=False,
auto_delete=True,
delivery_mode="transient")
| |
def option_price_call_american_binomial(S, K, r, sigma, t, steps):
"""American Option (Call) using binomial approximations
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param t: time to maturity
@param steps: Number of steps in binomial tree
@return: Option price
"""
R = exp(r*(t/steps))
Rinv = 1.0/R
u = exp(sigma*sqrt(t/steps))
d = 1.0/u
p_up = (R-d)/(u-d)
p_down = 1.0-p_up
prices = array.array('d', (0 for i in range(0,steps+1))) # price of underlying
prices[0] = S*pow(d, steps) # fill in the endnodes.
uu = u*u
for i in xrange(1, steps+1):
prices[i] = uu*prices[i-1]
call_values = array.array('d', (0 for i in range(0,steps+1))) # value of corresponding call
for i in xrange(0, steps+1):
call_values[i] = max(0.0, (prices[i]-K)) # call payoffs at maturity
for step in xrange(steps-1, -1, -1):
for i in xrange(0, step+1):
call_values[i] = (p_up*call_values[i+1]+p_down*call_values[i])*Rinv
prices[i] = d*prices[i+1]
call_values[i] = max(call_values[i],prices[i]-K) # check for exercise
return call_values[0]
def option_price_put_american_binomial(S, K, r, sigma, t, steps):
"""American Option (Put) using binomial approximations
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param t: time to maturity
@param steps: Number of steps in binomial tree
@return: Option price
"""
R = exp(r*(t/steps)) # interest rate for each step
Rinv = 1.0/R # inverse of interest rate
u = exp(sigma*sqrt(t/steps)) # up movement
uu = u*u
d = 1.0/u
p_up = (R-d)/(u-d)
p_down = 1.0-p_up
prices = array.array('d', (0 for i in range(0,steps+1))) # price of underlying
prices[0] = S*pow(d, steps)
for i in xrange(1, steps+1):
prices[i] = uu*prices[i-1]
put_values = array.array('d', (0 for i in range(0,steps+1))) # value of corresponding put
for i in xrange(0, steps+1):
put_values[i] = max(0.0, (K-prices[i])) # put payoffs at maturity
for step in xrange(steps-1, -1, -1):
for i in xrange(0, step+1):
put_values[i] = (p_up*put_values[i+1]+p_down*put_values[i])*Rinv
prices[i] = d*prices[i+1]
put_values[i] = max(put_values[i],(K-prices[i])) # check for exercise
return put_values[0]
def option_price_call_american_discrete_dividends_binomial(S, K, r, sigma, t, steps, dividend_times, dividend_amounts):
"""American Option (Call) for dividends with specific (discrete) dollar amounts
using binomial approximations
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param t: time to maturity
@param steps: Number of steps in binomial tree
@param dividend_times: Array of dividend times. (Ex: [0.25, 0.75] for 1/4 and 3/4 of a year)
@param dividend_amounts: Array of dividend amounts for the 'dividend_times'
@return: Option price
"""
no_dividends = len(dividend_times)
if (no_dividends==0):
return option_price_call_american_binomial(S,K,r,sigma,t,steps) # just do regular
steps_before_dividend = (int)(dividend_times[0]/t*steps)
R = exp(r*(t/steps))
Rinv = 1.0/R
u = exp(sigma*sqrt(t/steps))
d = 1.0/u
pUp = (R-d)/(u-d)
pDown = 1.0 - pUp
dividend_amount = dividend_amounts[0]
tmp_dividend_times = array.array('d', (0 for i in range(0,no_dividends-1))) # temporaries with
tmp_dividend_amounts = array.array('d', (0 for i in range(0,no_dividends-1))) # one less dividend
for i in xrange(0, no_dividends-1):
tmp_dividend_amounts[i] = dividend_amounts[i+1]
tmp_dividend_times[i] = dividend_times[i+1] - dividend_times[0]
prices = array.array('d', (0 for i in range(0,steps_before_dividend+1)))
call_values = array.array('d', (0 for i in range(0,steps_before_dividend+1)))
prices[0] = S*pow(d, steps_before_dividend)
for i in xrange(1, steps_before_dividend+1):
prices[i] = u*u*prices[i-1]
for i in xrange(0, steps_before_dividend+1):
value_alive = option_price_call_american_discrete_dividends_binomial(prices[i]-dividend_amount,K, r, sigma,
t-dividend_times[0], # time after first dividend
steps-steps_before_dividend,
tmp_dividend_times,
tmp_dividend_amounts)
call_values[i] = max(value_alive,(prices[i]-K)) # compare to exercising now
for step in xrange(steps_before_dividend-1, -1, -1):
for i in xrange(0, step+1):
prices[i] = d*prices[i+1]
call_values[i] = (pDown*call_values[i]+pUp*call_values[i+1])*Rinv
call_values[i] = max(call_values[i], prices[i]-K)
return call_values[0]
def option_price_put_american_discrete_dividends_binomial(S, K, r, sigma, t, steps, dividend_times, dividend_amounts):
"""American Option (Put) for dividends with specific (discrete) dollar amounts
using binomial approximations.
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param t: time to maturity
@param steps: Number of steps in binomial tree
@param dividend_times: Array of dividend times. (Ex: [0.25, 0.75] for 1/4 and 3/4 of a year)
@param dividend_amounts: Array of dividend amounts for the 'dividend_times'
@return: Option price
"""
# given an amount of dividend, the binomial tree does not recombine, have to
# start a new tree at each ex-dividend date.
# do this recursively, at each ex dividend date, at each step, put the
# binomial formula starting at that point to calculate the value of the live
# option, and compare that to the value of exercising now.
no_dividends = len(dividend_times)
if (no_dividends == 0): # just take the regular binomial
return option_price_put_american_binomial(S,K,r,sigma,t,steps)
steps_before_dividend = (int)(dividend_times[0]/t*steps);
R = exp(r*(t/steps))
Rinv = 1.0/R
u = exp(sigma*sqrt(t/steps))
uu= u*u
d = 1.0/u
pUp = (R-d)/(u-d)
pDown = 1.0 - pUp
dividend_amount = dividend_amounts[0]
tmp_dividend_times = array.array('d', (0 for i in range(0,no_dividends-1))) # temporaries with
tmp_dividend_amounts = array.array('d', (0 for i in range(0,no_dividends-1))) # one less dividend
for i in xrange(0, no_dividends-1):
tmp_dividend_amounts[i] = dividend_amounts[i+1]
tmp_dividend_times[i]= dividend_times[i+1] - dividend_times[0]
prices = array.array('d', (0 for i in range(0,steps_before_dividend+1)))
put_values = array.array('d', (0 for i in range(0,steps_before_dividend+1)))
prices[0] = S*pow(d, steps_before_dividend)
for i in xrange(1, steps_before_dividend+1):
prices[i] = uu*prices[i-1]
for i in xrange(0, steps_before_dividend+1):
value_alive = option_price_put_american_discrete_dividends_binomial(
prices[i]-dividend_amount, K, r, sigma,
t-dividend_times[0], # time after first dividend
steps-steps_before_dividend,
tmp_dividend_times, tmp_dividend_amounts)
# what is the value of keeping the option alive? Found recursively,
# with one less dividend, the stock price is current value
# less the dividend.
put_values[i] = max(value_alive,(K-prices[i])) # compare to exercising now
for step in xrange(steps_before_dividend-1, -1, -1):
for i in xrange(0, step+1):
prices[i] = d*prices[i+1]
put_values[i] = (pDown*put_values[i]+pUp*put_values[i+1])*Rinv
put_values[i] = max(put_values[i], K-prices[i]) # check for exercise
return put_values[0]
def option_price_call_american_proportional_dividends_binomial(S, K, r, sigma,
time, no_steps, dividend_times, dividend_yields):
"""American Option (Call) with proportional dividend payments
using binomial approximations.
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param time: time to maturity
@param no_steps: Number of steps in binomial tree
@param dividend_times: Array of dividend times. (Ex: [0.25, 0.75] for 1/4 and 3/4 of a year)
@param dividend_yields: Array of dividend yields for the 'dividend_times'
@return: Option price
"""
# note that the last dividend date should be before the expiry date, problems if dividend at terminal node
no_dividends=len(dividend_times)
if (no_dividends == 0):
return option_price_call_american_binomial(S,K,r,sigma,time,no_steps) # price w/o dividends
delta_t = time/no_steps
R = exp(r*delta_t)
Rinv = 1.0/R
u = exp(sigma*sqrt(delta_t))
uu= u*u
d = 1.0/u
pUp = (R-d)/(u-d)
pDown = 1.0 - pUp
dividend_steps = array.array('d', (0 for i in range(0,no_dividends))) # when dividends are paid
for i in xrange(0, no_dividends):
dividend_steps[i] = (int)(dividend_times[i]/time*no_steps)
prices = array.array('d', (0 for i in range(0,no_steps+1)))
call_prices = array.array('d', (0 for i in range(0,no_steps+1)))
prices[0] = S*pow(d, no_steps)# adjust downward terminal prices by dividends
for i in xrange(0, no_dividends):
prices[0]*=(1.0-dividend_yields[i])
for i in xrange(1, no_steps+1):
prices[i] = uu*prices[i-1]
for i in xrange(1, no_steps+1):
call_prices[i] = max(0.0, (prices[i]-K))
for step in xrange(no_steps-1, -1, -1):
for i in xrange(0, no_dividends): # check whether dividend paid
if (step==dividend_steps[i]):
for j in xrange(0, step+2):
prices[j]*=(1.0/(1.0-dividend_yields[i]))
for i in xrange(0, step+1):
call_prices[i] = (pDown*call_prices[i]+pUp*call_prices[i+1])*Rinv
prices[i] = d*prices[i+1]
call_prices[i] = max(call_prices[i], prices[i]-K) #check for exercise
return call_prices[0]
def option_price_put_american_proportional_dividends_binomial(S, K, r, sigma,
time, no_steps, dividend_times, dividend_yields):
"""American Option (Put) with proportional dividend payments
using binomial approximations.
Converted to Python from "Financial Numerical Recipes in C" by:
Bernt Arne Odegaard
http://finance.bi.no/~bernt/gcc_prog/index.html
@param S: spot (underlying) price
@param K: strike (exercise) price,
@param r: interest rate
@param sigma: volatility
@param time: time to maturity
@param no_steps: Number of steps in binomial tree
@param dividend_times: Array of dividend times. (Ex: [0.25, 0.75] for 1/4 and 3/4 of a year)
@param dividend_yields: Array of dividend yields for the 'dividend_times'
@return: Option price
"""
# when one assume a dividend yield, the binomial tree recombines
# note that the last dividend date should be before the expiry date
no_dividends=len(dividend_times);
if (no_dividends == 0): # just take the regular binomial
return option_price_put_american_binomial(S,K,r,sigma,time,no_steps)
R = exp(r*(time/no_steps))
Rinv = 1.0/R
u = exp(sigma*sqrt(time/no_steps))
uu= u*u
d = 1.0/u
pUp = (R-d)/(u-d)
pDown = 1.0 - pUp
dividend_steps = array.array('d', (0 for i in range(0,no_dividends))) # when dividends are paid
for i in xrange(0, no_dividends):
dividend_steps[i] = (int)(dividend_times[i]/time*no_steps);
prices = array.array('d', (0 for i in range(0,no_steps+1)))
put_prices = array.array('d', (0 for i in range(0,no_steps+1)))
prices[0] = S*pow(d, no_steps);
for i in xrange(0, no_dividends):
prices[0]*=(1.0-dividend_yields[i])
for i in xrange(1, no_steps+1):
prices[i] = uu*prices[i-1] #terminal tree nodes
for i in xrange(1, no_steps+1):
put_prices[i] = max(0.0, (K-prices[i]))
for step in xrange(no_steps-1, -1, -1):
for i in xrange(0, no_dividends): # check whether dividend paid
if (step==dividend_steps[i]):
for j in xrange(0, step+2):
prices[j]*=(1.0/(1.0-dividend_yields[i]))
for i in xrange(0, step+1):
prices[i] = d*prices[i+1]
put_prices[i] = (pDown*put_prices[i]+pUp*put_prices[i+1])*Rinv
put_prices[i] = max(put_prices[i], K-prices[i]) # check for exercise
return put_prices[0]
| |
import subprocess
import tempfile
import shutil
#import fdfgen
import re
import string
import codecs
import logging
import uuid
from xfdfgen import Xfdf
import yaml
import PyPDF2 as pypdf
#import pypdftk
from PIL import Image
from docassemble.base.error import DAError
from docassemble.base.pdfa import pdf_to_pdfa
from docassemble.base.logger import logmessage
from docassemble.base.functions import word
from docassemble.base.config import daconfig
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import resolve1, PDFObjRef
from pdfminer.pdfpage import PDFPage
logging.getLogger('pdfminer').setLevel(logging.ERROR)
PDFTK_PATH = 'pdftk'
QPDF_PATH = 'qpdf'
def set_pdftk_path(path):
global PDFTK_PATH
PDFTK_PATH = path
def set_qpdf_path(path):
global QPDF_PATH
QPDF_PATH = path
def read_fields(pdffile):
outfields = []
fp = open(pdffile, 'rb')
id_to_page = {}
parser = PDFParser(fp)
doc = PDFDocument(parser)
pageno = 1
for page in PDFPage.create_pages(doc):
id_to_page[page.pageid] = pageno
pageno += 1
if 'AcroForm' not in doc.catalog:
return []
fields = resolve1(doc.catalog['AcroForm'])['Fields']
recursively_add_fields(fields, id_to_page, outfields)
return sorted(outfields, key=fieldsorter)
def fieldsorter(x):
if x[3] and isinstance(x[3], list):
x_coord = x[3][0]
y_coord = -1 * x[3][1]
else:
x_coord = 0
y_coord = 0
return (x[2], y_coord, x_coord)
def recursively_add_fields(fields, id_to_page, outfields, prefix=''):
if isinstance(fields, PDFObjRef):
fields = resolve1(fields)
for i in fields:
field = resolve1(i)
if isinstance(field, PDFObjRef):
field = resolve1(field)
try:
name, value, rect, page, field_type = field.get('T'), field.get('V'), field.get('Rect'), field.get('P'), field.get('FT')
except:
logmessage("Skipping field " + repr(field))
continue
if isinstance(rect, PDFObjRef):
rect = resolve1(rect)
if isinstance(rect, list):
new_list = []
for item in rect:
if isinstance(item, PDFObjRef):
new_list.append(resolve1(item))
else:
new_list.append(item)
rect = new_list
else:
rect = []
if name is not None:
if not isinstance(name, bytes):
name = bytes(str(name), encoding='utf-8')
name = remove_nonprintable_bytes_limited(name)
if value is not None:
if not isinstance(value, bytes):
value = bytes(str(value), encoding='utf-8')
value = remove_nonprintable_bytes_limited(value)
#logmessage("name is " + repr(name) + " and FT is |" + repr(str(field_type)) + "| and value is " + repr(value))
if page is not None and hasattr(page, 'objid'):
try:
pageno = id_to_page[page.objid]
except:
pageno = 1
else:
pageno = 1
export_value = None
if str(field_type) in ('/Btn', "/'Btn'"):
export_value = 'Yes'
try:
for key in list(field['AP']['N'].keys()):
if key in ('Off', 'off', 'No', 'no'):
continue
export_value = key
break
except:
pass
if value == '/Yes':
default = export_value
else:
default = "No"
elif str(field_type) in ('/Sig', "/'Sig'"):
default = '${ user.signature }'
else:
if value is not None:
#for val in value:
# logmessage("Got a " + str(ord(val)))
#logmessage(repr(value.decode('utf8')))
#default = re.sub(r'^\xc3\xbe\xc3\xbf', '', value)
default = value
if not default:
default = word("something")
else:
default = word("something")
kids = field.get('Kids')
if kids:
if name is None:
recursively_add_fields(kids, id_to_page, outfields, prefix=prefix)
else:
if prefix == '':
recursively_add_fields(kids, id_to_page, outfields, prefix=name)
else:
recursively_add_fields(kids, id_to_page, outfields, prefix=prefix + '.' + name)
else:
if prefix != '' and name is not None:
outfields.append((prefix + '.' + name, default, pageno, rect, field_type, export_value))
elif prefix == '':
outfields.append((name, default, pageno, rect, field_type, export_value))
else:
outfields.append((prefix, default, pageno, rect, field_type, export_value))
def read_fields_pdftk(pdffile):
output = subprocess.check_output([PDFTK_PATH, pdffile, 'dump_data_fields']).decode()
fields = []
if len(output) == 0:
return None
for field in yaml.load_all(output, Loader=yaml.FullLoader):
if 'FieldType' in field and field['FieldType'] == 'Button':
default = "No"
else:
default = word("something")
if 'FieldName' in field:
fields.append((field['FieldName'], default))
return fields
pdf_parts = ['/AcroForm', '/Metadata', '/OCProperties', '/StructTreeRoot', '/OpenAction', '/AA', '/MarkInfo', '/Lang']
def recursive_get_pages(indirect_obj, result):
obj = indirect_obj.getObject()
if '/Type' in obj and obj['/Type'] == '/Page':
result.append(indirect_obj)
if '/Kids' in obj:
for kid in obj['/Kids']:
recursive_get_pages(kid, result)
def get_page_hash(obj):
page_list = []
recursive_get_pages(obj['/Root']['/Pages'], page_list)
result = {}
indexno = 1
for item in page_list:
result[item.idnum] = indexno
indexno += 1
return result
def recursive_add_bookmark(reader, writer, outlines, parent=None):
#logmessage("recursive_add_bookmark")
cur_bm = None
for destination in outlines:
if isinstance(destination, list):
#logmessage("Going into subbookmark")
recursive_add_bookmark(reader, writer, destination, parent=cur_bm)
else:
#logmessage("page is " + str(destination.page))
if isinstance(destination.page, pypdf.generic.NullObject):
#logmessage("continue 1")
continue
if not isinstance(destination.page, pypdf.generic.IndirectObject):
#logmessage("continue 2")
continue
if destination.page.idnum not in reader.idnum_to_page:
#logmessage("continue 3")
continue
if reader.idnum_to_page[destination.page.idnum] > len(writer.page_list):
#logmessage("continue 4")
continue
destination_page = writer.page_list[reader.idnum_to_page[destination.page.idnum] - 1]
if destination.typ in ('/FitH', '/FitBH'):
cur_bm = writer.addBookmark(destination.title, destination_page, parent, None, False, False, destination.typ, destination.top)
elif destination.typ in ('/FitV', '/FitBV'):
cur_bm = writer.addBookmark(destination.title, destination_page, parent, None, False, False, destination.typ, destination.left)
elif destination.typ == '/FitR':
cur_bm = writer.addBookmark(destination.title, destination_page, parent, None, False, False, destination.typ, destination.left, destination.bottom, destination.right, destination.top)
elif destination.typ == '/XYZ':
cur_bm = writer.addBookmark(destination.title, destination_page, parent, None, False, False, destination.typ, destination.left, destination.top, destination.zoom)
else:
cur_bm = writer.addBookmark(destination.title, destination_page, parent, None, False, False, destination.typ)
#logmessage("Added bookmark " + destination.title)
def safe_pypdf_reader(filename):
try:
return pypdf.PdfFileReader(open(filename, 'rb'), overwriteWarnings=False)
except pypdf.utils.PdfReadError:
new_filename = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
qpdf_subprocess_arguments = [QPDF_PATH, filename, new_filename.name]
try:
result = subprocess.run(qpdf_subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to qpdf took too long")
if result != 0:
logmessage("Failed to convert PDF template " + str(filename))
raise DAError("Call to qpdf failed for template " + str(filename) + " where arguments were " + " ".join(qpdf_subprocess_arguments))
return pypdf.PdfFileReader(open(new_filename.name, 'rb'), overwriteWarnings=False)
def fill_template(template, data_strings=None, data_names=None, hidden=None, readonly=None, images=None, pdf_url=None, editable=True, pdfa=False, password=None, template_password=None, default_export_value=None):
if data_strings is None:
data_strings = []
if data_names is None:
data_names = []
if hidden is None:
hidden = []
if readonly is None:
readonly = []
if images is None:
images = []
if pdf_url is None:
pdf_url = 'file.pdf'
if not pdf_url.endswith('.pdf'):
pdf_url += '.pdf'
the_fields = read_fields(template)
if len(the_fields) == 0:
raise DAError("PDF template has no fields in it.")
export_values = {}
for field, default, pageno, rect, field_type, export_value in the_fields:
field_type = re.sub(r'[^/A-Za-z]', '', str(field_type))
if field_type in ('/Btn', "/'Btn'"):
export_values[field] = export_value or default_export_value or 'Yes'
if len(export_values) > 0:
new_data_strings = []
for key, val in data_strings:
if key in export_values:
if str(val) in ('Yes', 'yes', 'True', 'true', 'On', 'on', export_values[key]):
val = export_values[key]
else:
if export_values[key] == 'On':
val = 'Off'
elif export_values[key] == 'on':
val = 'off'
elif export_values[key] == 'yes':
val = 'no'
else:
val = 'No'
new_data_strings.append((key, val))
data_strings = new_data_strings
data_dict = {}
for key, val in data_strings:
data_dict[key] = val
fdf = Xfdf(pdf_url, data_dict)
#fdf = fdfgen.forge_fdf(pdf_url, data_strings, data_names, hidden, readonly)
fdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".xfdf", delete=False)
#fdf_file.write(fdf)
fdf_file.close()
fdf.write_xfdf(fdf_file.name)
# if False:
# fdf_dict = {}
# for key, val in data_strings:
# fdf_dict[key] = val
# xfdf_temp_filename = pypdftk.gen_xfdf(fdf_dict)
# xfdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=\
# ".xfdf", delete=False)
# shutil.copyfile(xfdf_temp_filename, xfdf_file.name)
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
if template_password is not None:
template_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
qpdf_subprocess_arguments = [QPDF_PATH, '--decrypt', '--password=' + template_password, template, template_file.name]
try:
result = subprocess.run(qpdf_subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to qpdf took too long")
if result != 0:
logmessage("Failed to decrypt PDF template " + str(template))
raise DAError("Call to qpdf failed for template " + str(template) + " where arguments were " + " ".join(qpdf_subprocess_arguments))
template = template_file.name
subprocess_arguments = [PDFTK_PATH, template, 'fill_form', fdf_file.name, 'output', pdf_file.name]
#logmessage("Arguments are " + str(subprocess_arguments))
if editable or len(images) > 0:
subprocess_arguments.append('need_appearances')
else:
subprocess_arguments.append('flatten')
try:
result = subprocess.run(subprocess_arguments, timeout=600, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to pdftk fill_form took too long")
if result != 0:
logmessage("Failed to fill PDF form " + str(template))
raise DAError("Call to pdftk failed for template " + str(template) + " where arguments were " + " ".join(subprocess_arguments))
if len(images) > 0:
fields = {}
for field, default, pageno, rect, field_type, export_value in the_fields:
if str(field_type) in ('/Sig', "/'Sig'"):
fields[field] = {'pageno': pageno, 'rect': rect}
image_todo = []
for field, file_info in images:
if field not in fields:
logmessage("field name " + str(field) + " not found in PDF file")
continue
#logmessage("Need to put image on page " + str(fields[field]['pageno']))
temp_png = tempfile.NamedTemporaryFile(mode="wb", suffix=".png")
args = [daconfig.get('imagemagick', 'convert'), file_info['fullpath'], "-trim", "+repage", "+profile", '*', '-density', '0', temp_png.name]
try:
result = subprocess.run(args, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
logmessage("fill_template: convert took too long")
result = 1
if result == 1:
logmessage("failed to trim file: " + " ".join(args))
continue
im = Image.open(temp_png.name)
width, height = im.size
xone, yone, xtwo, ytwo = fields[field]['rect']
dppx = width/(xtwo-xone)
dppy = height/(ytwo-yone)
if dppx > dppy:
dpp = dppx
else:
dpp = dppy
extent_x, extent_y = xone*dpp+width, yone*dpp+height
overlay_pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
args = [daconfig.get('imagemagick', 'convert'), temp_png.name, "-background", "none", "-density", str(int(dpp*72)), "-gravity", "NorthEast", "-extent", str(int(extent_x)) + 'x' + str(int(extent_y)), overlay_pdf_file.name]
try:
result = subprocess.run(args, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to convert took too long")
if result == 1:
logmessage("failed to make overlay: " + " ".join(args))
continue
image_todo.append({'overlay_file': overlay_pdf_file.name, 'pageno': fields[field]['pageno']})
if len(image_todo) > 0:
new_pdf_file = tempfile.NamedTemporaryFile(mode="wb", suffix=".pdf")
original = safe_pypdf_reader(pdf_file.name)
original.idnum_to_page = get_page_hash(original.trailer)
catalog = original.trailer["/Root"]
writer = DAPdfFileWriter()
tree = {}
for part in pdf_parts:
if part in catalog:
tree[part] = catalog[part]
for i in range(original.getNumPages()):
for item in image_todo:
if (item['pageno'] - 1) == i:
page = original.getPage(i)
foreground_file = safe_pypdf_reader(item['overlay_file'])
foreground_page = foreground_file.getPage(0)
page.mergePage(foreground_page)
for i in range(original.getNumPages()):
newpage = original.getPage(i)
writer.addPage(newpage)
for key, val in tree.items():
writer._root_object.update({pypdf.generic.NameObject(key): val})
writer.page_list = []
recursive_get_pages(writer._root_object['/Pages'], writer.page_list)
recursive_add_bookmark(original, writer, original.getOutlines())
with open(new_pdf_file.name, "wb") as outFile:
writer.write(outFile)
shutil.copyfile(new_pdf_file.name, pdf_file.name)
if (not editable) and len(images) > 0:
flatten_pdf(pdf_file.name)
if pdfa:
pdf_to_pdfa(pdf_file.name)
if editable:
replicate_js_and_calculations(template, pdf_file.name, password)
elif password:
pdf_encrypt(pdf_file.name, password)
return pdf_file.name
def get_passwords(password):
if password is None:
return (None, None)
if isinstance(password, (str, bool, int, float)):
owner_password = str(password).strip()
user_password = str(password).strip()
elif isinstance(password, list):
owner_password = str(password[0]).strip()
user_password = str(password[1]).strip()
elif isinstance(password, dict):
owner_password = str(password.get('owner', 'password')).strip()
user_password = str(password.get('user', 'password')).strip()
else:
raise DAError("get_passwords: invalid password")
return (owner_password, user_password)
def pdf_encrypt(filename, password):
#logmessage("pdf_encrypt: running; password is " + repr(password))
(owner_password, user_password) = get_passwords(password)
outfile = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".pdf", delete=False)
if owner_password == user_password:
commands = ['pdftk', filename, 'output', outfile.name, 'user_pw', user_password, 'allow', 'printing']
else:
commands = ['pdftk', filename, 'output', outfile.name, 'owner_pw', owner_password, 'user_pw', user_password, 'allow', 'printing']
try:
output = subprocess.check_output(commands, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
output = err.output
raise DAError("pdf_encrypt: error running pdftk. " + output)
#logmessage(' '.join(commands))
#logmessage(output)
shutil.move(outfile.name, filename)
class DAPdfFileWriter(pypdf.PdfFileWriter):
def DAGetFields(self, tree=None, results=None):
if results is None:
results = {}
if tree is None:
if '/AcroForm' not in self._root_object:
self._root_object.update({pypdf.generic.NameObject('/AcroForm'): pypdf.generic.DictionaryObject()})
tree = self._root_object['/AcroForm']
if isinstance(tree, pypdf.generic.IndirectObject):
the_tree = tree.getObject()
else:
the_tree = tree
self.DABuildField(tree, results=results)
if "/Fields" in the_tree:
fields = the_tree["/Fields"]
for f in fields:
self.DABuildField(f, results)
return results
def DABuildField(self, f, results):
if isinstance(f, pypdf.generic.IndirectObject):
field = f.getObject()
else:
field = f
self.DACheckKids(field, results=results)
try:
key = field["/TM"]
except KeyError:
try:
key = field["/T"]
except KeyError:
return
results[key] = f
def DACheckKids(self, tree, results):
if "/Kids" in tree:
for kid in tree["/Kids"]:
self.DAGetFields(tree=kid, results=results)
def addBookmark(self, title, pagenum, parent=None, color=None, bold=False, italic=False, fit='/Fit', *args):
"""
Add a bookmark to this PDF file.
:param str title: Title to use for this bookmark.
:param int pagenum: Page number this bookmark will point to.
:param parent: A reference to a parent bookmark to create nested
bookmarks.
:param tuple color: Color of the bookmark as a red, green, blue tuple
from 0.0 to 1.0
:param bool bold: Bookmark is bold
:param bool italic: Bookmark is italic
:param str fit: The fit of the destination page. See
:meth:`addLink()<addLink>` for details.
"""
action = pypdf.generic.DictionaryObject()
zoomArgs = []
for a in args:
if a is not None and a.__class__.__name__ != 'NullObject':
zoomArgs.append(pypdf.generic.NumberObject(a))
else:
zoomArgs.append(pypdf.generic.NullObject())
dest = pypdf.generic.Destination(pypdf.generic.NameObject("/"+str(uuid.uuid4())), pagenum, pypdf.generic.NameObject(fit), *zoomArgs)
destArray = dest.getDestArray()
action.update({
pypdf.generic.NameObject('/D') : destArray,
pypdf.generic.NameObject('/S') : pypdf.generic.NameObject('/GoTo')
})
actionRef = self._addObject(action)
outlineRef = self.getOutlineRoot()
if parent is None:
parent = outlineRef
bookmark = pypdf.generic.TreeObject()
bookmark.update({
pypdf.generic.NameObject('/A'): actionRef,
pypdf.generic.NameObject('/Title'): pypdf.generic.createStringObject(title),
})
if color is not None:
bookmark.update({pypdf.generic.NameObject('/C'): pypdf.generic.ArrayObject([pypdf.generic.FloatObject(c) for c in color])})
the_format = 0
if italic:
the_format += 1
if bold:
the_format += 2
if the_format:
bookmark.update({pypdf.generic.NameObject('/F'): pypdf.generic.NumberObject(the_format)})
bookmarkRef = self._addObject(bookmark)
parent = parent.getObject()
parent.addChild(bookmarkRef, self)
return bookmarkRef
def remove_nonprintable(text):
final = str()
for char in text:
if char in string.printable:
final += char
return final
def remove_nonprintable_bytes(byte_list):
if isinstance(byte_list, str):
return bytearray(remove_nonprintable(byte_list), 'utf-8')
final = str()
for the_int in byte_list:
if chr(the_int) in string.printable:
final += chr(the_int)
return bytearray(final, 'utf-8')
def remove_nonprintable_bytes_limited(byte_list):
final = bytes()
if len(byte_list) >= 2 and byte_list[0] == 254 and byte_list[1] == 255:
byte_list = byte_list[2:]
for the_int in byte_list:
if the_int > 0:
final += bytes([the_int])
return codecs.decode(final, 'latin1')
def remove_nonprintable_limited(text):
text = re.sub(r'^\xfe\xff', '', text)
text = re.sub(r'\x00', '', text)
return codecs.decode(text, 'latin1')
def replicate_js_and_calculations(template_filename, original_filename, password):
#logmessage("replicate_js_and_calculations where template_filename is " + template_filename + " and original_filename is " + original_filename + " and password is " + repr(password))
template = safe_pypdf_reader(template_filename)
co_field_names = []
if '/AcroForm' in template.trailer['/Root']:
#logmessage("Found AcroForm")
acroform = template.trailer['/Root']['/AcroForm'].getObject()
if '/CO' in acroform:
#logmessage("Found CO in AcroForm")
for f in acroform['/CO']:
field = f.getObject()
if '/TM' in field:
name = field['/TM']
elif '/T' in field:
name = field['/T']
else:
continue
#logmessage("Found CO name " + str(name))
co_field_names.append(name)
js_to_write = []
if '/Names' in template.trailer['/Root'] and '/JavaScript' in template.trailer['/Root']['/Names']:
#logmessage("Found name in root and javascript in names")
js_names = template.trailer['/Root']['/Names']['/JavaScript'].getObject()
if '/Names' in js_names:
#logmessage("Found names in javascript")
js_list = js_names['/Names']
while len(js_list) > 0:
name = js_list.pop(0)
obj = js_list.pop(0)
js_obj = obj.getObject()
if '/S' in js_obj and js_obj['/S'] == '/JavaScript' and '/JS' in js_obj:
if isinstance(js_obj['/JS'], (pypdf.generic.ByteStringObject, pypdf.generic.TextStringObject)):
js_to_write.append((name, remove_nonprintable_bytes(js_obj['/JS'])))
elif isinstance(js_obj['/JS'], (pypdf.generic.EncodedStreamObject, pypdf.generic.DecodedStreamObject)):
js_to_write.append((name, remove_nonprintable_bytes(js_obj['/JS'].getData())))
if len(js_to_write) == 0 and len(co_field_names) == 0:
#logmessage("Nothing to do here")
if password:
pdf_encrypt(original_filename, password)
return
original = safe_pypdf_reader(original_filename)
#logmessage("Opening " + original_filename)
writer = DAPdfFileWriter()
writer.cloneReaderDocumentRoot(original)
if len(co_field_names) > 0:
#logmessage("Cloning CO")
fields = writer.DAGetFields()
co = []
for field_name in co_field_names:
if field_name in fields:
co.append(fields[field_name])
#writer._root_object['/AcroForm'][pypdf.generic.NameObject("/CO")] = pypdf.generic.ArrayObject(co)
if '/AcroForm' not in writer._root_object:
writer._root_object.update({pypdf.generic.NameObject('/AcroForm'): pypdf.generic.DictionaryObject()})
writer._root_object['/AcroForm'].update({
pypdf.generic.NameObject("/CO"): pypdf.generic.ArrayObject(co)
})
if len(js_to_write) > 0:
#logmessage("Cloning JS")
name_array = []
for js_string_name, js_text in js_to_write:
js_object = pypdf.generic.DecodedStreamObject()
js_object.setData(js_text)
js = pypdf.generic.DictionaryObject()
js.update({
pypdf.generic.NameObject("/Type"): pypdf.generic.NameObject("/Action"),
pypdf.generic.NameObject("/S"): pypdf.generic.NameObject("/JavaScript"),
pypdf.generic.NameObject("/JS"): js_object
})
js_indirect_object = writer._addObject(js)
name_array.append(pypdf.generic.createStringObject(js_string_name))
name_array.append(js_indirect_object)
js_name_tree = pypdf.generic.DictionaryObject()
js_name_tree.update({
pypdf.generic.NameObject("/JavaScript"): pypdf.generic.DictionaryObject({
pypdf.generic.NameObject("/Names"): pypdf.generic.ArrayObject(name_array)
})
})
writer._addObject(js_name_tree)
writer._root_object.update({
pypdf.generic.NameObject("/Names"): js_name_tree
})
if password is not None:
(owner_password, user_password) = get_passwords(password)
if owner_password == user_password:
#logmessage("Password for encryption is " + str(user_password))
writer.encrypt(str(user_password))
else:
#logmessage("Passwords for encryption are " + str(user_password) + " and " + str(owner_password))
writer.encrypt(str(user_password), owner_pwd=str(owner_password))
outfile = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
with open(outfile.name, 'wb') as fp:
writer.write(fp)
shutil.move(outfile.name, original_filename)
def flatten_pdf(filename):
#logmessage("flatten_pdf: running")
outfile = tempfile.NamedTemporaryFile(prefix="datemp", suffix=".pdf", delete=False)
subprocess_arguments = [PDFTK_PATH, filename, 'output', outfile.name, 'flatten']
#logmessage("Arguments are " + str(subprocess_arguments))
try:
result = subprocess.run(subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("flatten_pdf: call to pdftk took too long")
if result != 0:
logmessage("Failed to flatten PDF form")
raise DAError("Call to pdftk failed for template where arguments were " + " ".join(subprocess_arguments))
shutil.move(outfile.name, filename)
def overlay_pdf_multi(main_file, logo_file, out_file):
subprocess_arguments = [PDFTK_PATH, main_file, 'multistamp', logo_file, 'output', out_file]
try:
result = subprocess.run(subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("overlay_pdf_multi: call to pdftk took too long")
if result != 0:
logmessage("Failed to overlay PDF")
raise DAError("Call to pdftk failed for overlay where arguments were " + " ".join(subprocess_arguments))
def overlay_pdf(main_file, logo_file, out_file, first_page=None, last_page=None, logo_page=None, only=None):
main_pdf = safe_pypdf_reader(main_file)
logo_pdf = safe_pypdf_reader(logo_file)
output_pdf = pypdf.PdfFileWriter()
if first_page is None or first_page < 1:
first_page = 1
if last_page is None or last_page < 1:
last_page = main_pdf.getNumPages()
if first_page > main_pdf.getNumPages():
first_page = main_pdf.getNumPages()
last_page = max(last_page, first_page)
if logo_page is None or logo_page < 1:
logo_page = 1
if logo_page > logo_pdf.getNumPages():
logo_page = logo_pdf.getNumPages()
for page_no in range(first_page - 1, last_page):
if only == 'even':
if page_no % 2 == 0:
continue
elif only == 'odd':
if page_no % 2 != 0:
continue
page = main_pdf.getPage(page_no)
page.mergePage(logo_pdf.getPage(logo_page - 1))
for page_no in range(main_pdf.getNumPages()):
page = main_pdf.getPage(page_no)
output_pdf.addPage(page)
with open(out_file, 'wb') as fp:
output_pdf.write(fp)
def apply_qpdf(filename):
try:
pypdf.PdfFileReader(open(filename, 'rb'), overwriteWarnings=False)
pdf_ok = True
except pypdf.utils.PdfReadError:
pdf_ok = False
if pdf_ok:
return
try:
new_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
qpdf_subprocess_arguments = [QPDF_PATH, filename, new_file.name]
try:
result = subprocess.run(qpdf_subprocess_arguments, timeout=60, check=False).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("fill_template: call to qpdf took too long")
if result != 0:
logmessage("Failed to convert PDF template " + str(filename))
logmessage("Call to qpdf failed for template " + str(filename) + " where arguments were " + " ".join(qpdf_subprocess_arguments))
raise Exception("qpdf error")
pypdf.PdfFileReader(open(new_file.name, 'rb'), overwriteWarnings=False)
except:
raise DAError("Could not fix PDF")
shutil.copyfile(new_file.name, filename)
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
from django.contrib.admin.models import CHANGE, LogEntry
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import clear_url_caches, reverse, resolve
from django.test.utils import override_settings
from django.utils import six
from django.utils.timezone import now
from cms.api import create_page, create_title
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.appresolver import applications_page_check, clear_app_resolvers, get_app_patterns
from cms.cms_toolbars import PlaceholderToolbar
from cms.models import Title, Page
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import CMSTestCase, ClearURLs
from cms.tests.test_menu_utils import DumbPageLanguageUrl
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.urlutils import admin_reverse
from menus.utils import DefaultLanguageChanger
APP_NAME = 'SampleApp'
NS_APP_NAME = 'NamespacedApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_apps"
class ApphooksTestCase(ClearURLs, CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
self.apphook_clear()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
self.apphook_clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
# TODO: Add here intermediary modules which may
# include() the 'cms.urls' if it isn't included
# directly in the root urlconf.
# '...',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
'cms.test_utils.project.urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def _fake_logentry(self, instance_id, user, text, model=Page):
LogEntry.objects.log_action(
user_id=user.id,
content_type_id=ContentType.objects.get_for_model(model).pk,
object_id=instance_id,
object_repr=text,
action_flag=CHANGE,
)
entry = LogEntry.objects.filter(user=user, action_flag__in=(CHANGE,))[0]
session = self.client.session
session['cms_log_latest'] = entry.pk
session.save()
def create_base_structure(self, apphook, title_langs, namespace=None):
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
self.superuser = superuser
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True)
create_title('de', page.get_title(), page)
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, published=True, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, published=True, parent=child_page, apphook=apphook,
apphook_namespace=namespace)
create_title("de", child_child_page.get_title(), child_child_page)
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
if isinstance(title_langs, six.string_types):
titles = child_child_page.publisher_public.get_title_obj(title_langs)
else:
titles = [child_child_page.publisher_public.get_title_obj(l) for l in title_langs]
self.reload_urls()
return titles
@override_settings(CMS_APPHOOKS=['%s.%s' % (APP_MODULE, APP_NAME)])
def test_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
self.apphook_clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
self.apphook_clear()
@override_settings(
INSTALLED_APPS=['cms.test_utils.project.sampleapp'],
ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests',
)
def test_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS cms_apps.py
"""
self.apphook_clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 6)
self.assertIn(NS_APP_NAME, app_names)
self.assertIn(APP_NAME, app_names)
self.apphook_clear()
def test_apphook_on_root(self):
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
blank_page = create_page("not-apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="", slug='blankapp')
english_title = page.title_set.all()[0]
self.assertEqual(english_title.language, 'en')
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('en'))
self.assertTrue(page.publish('de'))
self.assertTrue(blank_page.publish('en'))
with force_language("en"):
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, '<--noplaceholder-->')
response = self.client.get('/en/blankapp/')
self.assertTemplateUsed(response, 'nav_playground.html')
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests')
def test_apphook_on_root_reverse(self):
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('de'))
self.assertTrue(page.publish('en'))
self.reload_urls()
self.assertFalse(reverse('sample-settings').startswith('//'))
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_page_for_apphook(self):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash and language prefix
self.assertEqual(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_apphook_permissions(self):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
page = en_title.page.publisher_public
page.login_required = True
page.save()
page.publish('en')
response = self.client.get(path)
self.assertEqual(response.status_code, 302)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_apphook_permissions_preserves_view_name(self):
self.create_base_structure(APP_NAME, ['en', 'de'])
view_names = (
('sample-settings', 'sample_view'),
('sample-class-view', 'ClassView'),
('sample-class-based-view', 'ClassBasedView'),
)
with force_language("en"):
for url_name, view_name in view_names:
path = reverse(url_name)
match = resolve(path)
self.assertEqual(match.func.__name__, view_name)
def test_apphooks_with_excluded_permissions(self):
en_title = self.create_base_structure('SampleAppWithExcludedPermissions', 'en')
with force_language("en"):
excluded_path = reverse('excluded:example')
not_excluded_path = reverse('not_excluded:example')
page = en_title.page.publisher_public
page.login_required = True
page.save()
page.publish('en')
excluded_response = self.client.get(excluded_path)
not_excluded_response = self.client.get(not_excluded_path)
self.assertEqual(excluded_response.status_code, 200)
self.assertEqual(not_excluded_response.status_code, 302)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls_3')
def test_get_page_for_apphook_on_preview_or_edit(self):
if get_user_model().USERNAME_FIELD == 'email':
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin@admin.com')
else:
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True, apphook=APP_NAME)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
page.save()
public_page = page.get_public_object()
with self.login_user_context(superuser):
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
response = self.client.get(path+"?edit")
self.assertContains(response, '?redirect=')
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, public_page.pk)
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_root_page_for_apphook_with_instance_namespace(self):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
self.reload_urls()
with force_language("en"):
reverse("example_app:example")
reverse("example1:example")
reverse("example2:example")
path = reverse('namespaced_app_ns:sample-root')
path_instance = reverse('instance_ns:sample-root')
self.assertEqual(path, path_instance)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_child_page_for_apphook_with_instance_namespace(self):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
path_instance1 = reverse('instance_ns:sample-settings')
path_instance2 = reverse('namespaced_app_ns:sample-settings', current_app='instance_ns')
self.assertEqual(path, path_instance1)
self.assertEqual(path, path_instance2)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page_id)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_sub_page_for_apphook_with_implicit_current_app(self):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'namespaced_app_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'namespaced_app_ns')
self.assertContains(response, path)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_default_language_changer_with_implicit_current_app(self):
self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'namespaced_app_ns')
self.reload_urls()
with force_language("en"):
path = reverse('namespaced_app_ns:translated-url')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
url = DefaultLanguageChanger(request)('en')
self.assertEqual(url, path)
url = DefaultLanguageChanger(request)('de')
self.assertEqual(url, '/de%s' % path[3:].replace('/page', '/Seite'))
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_i18n_apphook_with_explicit_current_app(self):
titles = self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
public_de_title = titles[1]
de_title = Title.objects.get(page=public_de_title.page.publisher_draft, language="de")
de_title.slug = "de"
de_title.save()
de_title.page.publish('de')
page2 = create_page("page2", "nav_playground.html",
"en", created_by=self.superuser, published=True, parent=de_title.page.parent,
apphook=NS_APP_NAME,
apphook_namespace="instance_2")
create_title("de", "de_title", page2, slug="slug")
page2.publish('de')
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
with force_language("de"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
with force_language("en"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_apphook_include_extra_parameters(self):
self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
with force_language("en"):
path = reverse('namespaced_app_ns:extra_second')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, 'someopts')
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_get_sub_page_for_apphook_with_explicit_current_app(self):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'instance_ns')
self.assertContains(response, path)
self.apphook_clear()
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_include_urlconf(self):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
self.apphook_clear()
@override_settings(CMS_PERMISSION=False, ROOT_URLCONF='cms.test_utils.project.urls_2')
def test_apphook_breaking_under_home_with_new_path_caching(self):
home = create_page("home", "nav_playground.html", "en", published=True)
child = create_page("child", "nav_playground.html", "en", published=True, parent=home)
# not-home is what breaks stuff, because it contains the slug of the home page
not_home = create_page("not-home", "nav_playground.html", "en", published=True, parent=child)
create_page("subchild", "nav_playground.html", "en", published=True, parent=not_home, apphook='SampleApp')
with force_language("en"):
self.reload_urls()
urlpatterns = get_app_patterns()
resolver = urlpatterns[0]
url = resolver.reverse('sample-root')
self.assertEqual(url, 'child/not-home/subchild/')
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls')
def test_apphook_urlpattern_order(self):
# this one includes the actual cms.urls, so it can be tested if
# they are loaded in the correct order (the cms page pattern must be last)
# (the other testcases replicate the inclusion code and thus don't test this)
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls')
def test_apphooks_receive_url_params(self):
# make sure that urlparams actually reach the apphook views
self.create_base_structure(APP_NAME, 'en')
path = reverse('sample-params', kwargs=dict(my_params='is-my-param-really-in-the-context-QUESTIONMARK'))
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, 'my_params: is-my-param-really-in-the-context-QUESTIONMARK')
@override_settings(ROOT_URLCONF='cms.test_utils.project.third_urls_for_apphook_tests')
def test_multiple_apphooks(self):
# test for #1538
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
create_page("home", "nav_playground.html", "en", created_by=superuser, published=True, )
create_page("apphook1-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_page("apphook2-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp2")
reverse('sample-root')
reverse('sample2-root')
self.apphook_clear()
def test_apphook_pool_register_returns_apphook(self):
@apphook_pool.register
class TestApp(CMSApp):
name = "Test App"
self.assertIsNotNone(TestApp)
# Now test the quick return codepath, when apphooks is not empty
apphook_pool.apphooks.append("foo")
@apphook_pool.register
class TestApp2(CMSApp):
name = "Test App 2"
self.assertIsNotNone(TestApp2)
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_toolbar_current_app_namespace(self):
self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].is_current_app)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].is_current_app)
# Testing a decorated view
with force_language("en"):
path = reverse('namespaced_app_ns:sample-exempt')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].is_current_app)
@override_settings(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests')
def test_toolbar_current_app_apphook_with_implicit_current_app(self):
self.create_base_structure(NS_APP_NAME, 'en', 'namespaced_app_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].is_current_app)
@override_settings(ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls')
def test_toolbar_no_namespace(self):
# Test with a basic application with no defined app_name and no namespace
self.create_base_structure(APP_NAME, 'en')
path = reverse('detail', kwargs={'id': 20})
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].is_current_app)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].is_current_app)
self.assertTrue(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbars.Example1Toolbar'].is_current_app)
@override_settings(ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls')
def test_toolbar_multiple_supported_apps(self):
# Test with a basic application with no defined app_name and no namespace
self.create_base_structure(APP_NAME, 'en')
path = reverse('detail', kwargs={'id': 20})
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbars.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyTitleExtensionToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyPageExtensionToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbars.MyPageExtensionToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbars.Example1Toolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbars.Example1Toolbar'].is_current_app)
@override_settings(
CMS_APPHOOKS=['cms.test_utils.project.placeholderapp.cms_apps.Example1App'],
ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls',
)
def atest_toolbar_staff(self):
# Test that the toolbar contains edit mode switcher if placeholders are available
apphooks = (
'cms.test_utils.project.placeholderapp.cms_apps.Example1App',
)
with self.settings(CMS_APPHOOKS=apphooks, ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls'):
self.create_base_structure('Example1App', 'en')
ex1 = Example1.objects.create(char_1='1', char_2='2', char_3='3', char_4='4', date_field=now())
path = reverse('example_detail', kwargs={'pk': ex1.pk})
self.user = self._create_user('admin_staff', True, True)
with self.login_user_context(self.user):
response = self.client.get(path+"?edit")
toolbar = CMSToolbar(response.context['request'])
toolbar.populate()
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 1)
self.user = self._create_user('staff', True, False)
with self.login_user_context(self.user):
response = self.client.get(path+"?edit")
response.context['request'].user = get_user_model().objects.get(pk=self.user.pk)
toolbar = CMSToolbar(response.context['request'])
toolbar.populate()
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 0)
self.user.user_permissions.add(Permission.objects.get(codename='change_example1'))
with self.login_user_context(self.user):
response = self.client.get(path+"?edit")
response.context['request'].user = get_user_model().objects.get(pk=self.user.pk)
toolbar = CMSToolbar(response.context['request'])
toolbar.populate()
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 0)
permission = Permission.objects.get(codename='use_structure')
self.user.user_permissions.add(permission)
response.context['request'].user = get_user_model().objects.get(pk=self.user.pk)
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 1)
self.user = None
def test_page_edit_redirect_models(self):
apphooks = (
'cms.test_utils.project.placeholderapp.cms_apps.Example1App',
)
ex1 = Example1.objects.create(char_1="char_1", char_2="char_2",
char_3="char_3", char_4="char_4")
with self.settings(CMS_APPHOOKS=apphooks, ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls'):
self.create_base_structure('Example1App', 'en')
url = admin_reverse('cms_page_resolve')
self.user = self._create_user('admin_staff', True, True)
with self.login_user_context(self.user):
# parameters - non page object
response = self.client.post(url, {'pk': ex1.pk, 'model': 'placeholderapp.example1'})
self.assertEqual(response.content.decode('utf-8'), ex1.get_absolute_url())
def test_nested_apphooks_urls(self):
# make sure that urlparams actually reach the apphook views
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls'):
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
create_page("home", "nav_playground.html", "en", created_by=superuser, published=True, )
parent_page = create_page("parent-apphook-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="ParentApp")
create_page("child-apphook-page", "nav_playground.html", "en", parent=parent_page,
created_by=superuser, published=True, apphook="ChildApp")
parent_app_path = reverse('parentapp_view', kwargs={'path': 'parent/path/'})
child_app_path = reverse('childapp_view', kwargs={'path': 'child-path/'})
# Ensure the page structure is ok before getting responses
self.assertEqual(parent_app_path, '/en/parent-apphook-page/parent/path/')
self.assertEqual(child_app_path, '/en/parent-apphook-page/child-apphook-page/child-path/')
# Get responses for both paths and ensure that the right view will answer
response = self.client.get(parent_app_path)
self.assertContains(response, 'parent app content', status_code=200)
response = self.client.get(child_app_path)
self.assertContains(response, 'child app content', status_code=200)
self.apphook_clear()
class ApphooksPageLanguageUrlTestCase(ClearURLs, CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.apphook_clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def test_page_language_url_for_apphook(self):
self.apphook_clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('en')
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, parent=child_page, apphook='SampleApp')
create_title("de", '%s_de' % child_child_page.get_title(), child_child_page)
child_child_page.publish('en')
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with one to one reverse
child_child_page = self.reload(child_child_page)
with force_language("en"):
path = reverse('extra_first')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
request.current_page = child_child_page
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/child_page/child_child_page/extra_1/')
output = tag.get_context(fake_context, 'de')
url = output['content']
# look the extra "_de"
self.assertEqual(url, '/de/child_page/child_child_page_de/extra_1/')
output = tag.get_context(fake_context, 'fr')
url = output['content']
self.assertEqual(url, '/fr/child_page/child_child_page/extra_1/')
self.apphook_clear()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training as train
class MemoryOptimizerSwapTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = variables.VariableV1(10, name='a')
b = variables.VariableV1(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
nodes = [node.name for node in mg.graph_def.node]
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes)
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
a = variables.VariableV1(10, name='a')
b = variables.VariableV1(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op._set_attr('_swap_to_host', attr_value_pb2.AttrValue(i=0))
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL,
min_graph_nodes=-1)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size + 2)
self.assertTrue(
set([node.name for node in graph.node]) > set(
['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b/read', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b/read', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
class MemoryOptimizerRecomputeTest(test.TestCase):
"""Tests the Python interface to recomputation rewrites.
See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.
"""
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
def testRewritingDefaultGradientNames(self):
"""Tests that rewriting occurs with default gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph()
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1,
memory_optimization=rewriter_config_pb2.RewriterConfig.
RECOMPUTATION_HEURISTICS), original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNames(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph(
optimizer_scope_name='optimizer')
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
min_graph_nodes=-1,
memory_optimization=rewriter_config_pb2.RewriterConfig.
RECOMPUTATION_HEURISTICS,
# Checks that name scope "gradients/" also match sub-scope.
memory_optimizer_target_node_name_scope='gradients/'),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNamesScope(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _,
_) = self._GetMetaGraph(optimizer_scope_name='foo/bar')
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.
RECOMPUTATION_HEURISTICS,
# This should not match anything.
memory_optimizer_target_node_name_scope='r/gradients/'),
original_metagraph)
self.assertEqual(
len(rewritten_graph_def.node), len(original_metagraph.graph_def.node))
self.assertEqual(0,
len([
node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name
]))
self.assertEqual(0,
len([
node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name
]))
def _GetMemoryOptimizerSessionConfig(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)
graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)
return config_pb2.ConfigProto(graph_options=graph_options)
def _RunMetaGraphWithConfig(
self, config, metagraph, init_op_name, train_op_name, loss_op_name):
graph = ops.Graph()
with graph.as_default():
train.import_meta_graph(metagraph)
init_op = graph.get_operation_by_name(init_op_name)
train_op = graph.get_operation_by_name(train_op_name)
loss_op = graph.get_tensor_by_name(loss_op_name)
with session.Session(config=config, graph=graph) as sess:
sess.run(init_op)
sess.run(train_op)
sess.run(train_op)
return sess.run(loss_op)
def testRecomputationRewritingNoErrors(self):
"""Tests that graph output is not significantly different with rewriting."""
(original_metagraph, init_op_name, train_op_name, loss_op_name
) = self._GetMetaGraph()
original_loss = self._RunMetaGraphWithConfig(
config=config_pb2.ConfigProto(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
memory_optimized_loss = self._RunMetaGraphWithConfig(
config=self._GetMemoryOptimizerSessionConfig(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-2)
def _annotated_graph(self):
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(2)
current_activation = variable_scope.get_variable(
name='start', shape=[1, 2, 2, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(3):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation.op._set_attr(
'_recompute_hint',
# The value of the attribute does not matter; just that the key
# exists in the op's attributes.
attr_value_pb2.AttrValue(i=1))
current_activation += 5.
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=0))
current_activation = nn.relu(current_activation)
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=1))
loss = math_ops.reduce_mean(current_activation)
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
return graph, init_op, train_op
def testHintNoMetaGraph(self):
# Closer to expected usage, but does not check that a re-write actually
# happens; see testHintDoesRewrite.
graph, init_op, train_op = self._annotated_graph()
with graph.as_default():
manual_memory_config = rewriter_config_pb2.RewriterConfig(
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph_options = config_pb2.GraphOptions(
rewrite_options=manual_memory_config)
session_config = config_pb2.ConfigProto(graph_options=graph_options)
with session.Session(config=session_config) as sess:
sess.run(init_op)
sess.run(train_op)
def testHintDoesRewrite(self):
graph = self._annotated_graph()[0]
with graph.as_default():
metagraph = train.export_meta_graph()
self.assertEqual(
0,
len([node for node in metagraph.graph_def.node
if 'Recomputed/' in node.name]))
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
min_graph_nodes=-1,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL),
metagraph)
self.assertEqual(
9,
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
if __name__ == '__main__':
test.main()
| |
import pytest
import re
from spacy.lang.en import English
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span
pattern1 = [{"ORTH": "A"}, {"ORTH": "A", "OP": "*"}]
pattern2 = [{"ORTH": "A", "OP": "*"}, {"ORTH": "A"}]
pattern3 = [{"ORTH": "A"}, {"ORTH": "A"}]
pattern4 = [{"ORTH": "B"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
pattern5 = [{"ORTH": "B", "OP": "*"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
re_pattern1 = "AA*"
re_pattern2 = "A*A"
re_pattern3 = "AA"
re_pattern4 = "BA*B"
re_pattern5 = "B*A*B"
longest1 = "A A A A A"
longest2 = "A A A A A"
longest3 = "A A"
longest4 = "B A A A A A B" # "FIRST" would be "B B"
longest5 = "B B A A A A A B"
@pytest.fixture
def text():
return "(BBAAAAAB)."
@pytest.fixture
def doc(en_tokenizer, text):
doc = en_tokenizer(" ".join(text))
return doc
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_greedy_matching_first(doc, text, pattern, re_pattern):
"""Test that the greedy matching behavior "FIRST" is consistent with
other re implementations."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
for (key, m_s, m_e), (re_s, re_e) in zip(matches, re_matches):
# matching the string, not the exact position
assert doc[m_s:m_e].text == doc[re_s:re_e].text
@pytest.mark.parametrize(
"pattern,longest",
[
(pattern1, longest1),
(pattern2, longest2),
(pattern3, longest3),
(pattern4, longest4),
(pattern5, longest5),
],
)
def test_greedy_matching_longest(doc, text, pattern, longest):
"""Test the "LONGEST" greedy matching behavior"""
matcher = Matcher(doc.vocab)
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
for (key, s, e) in matches:
assert doc[s:e].text == longest
def test_greedy_matching_longest_first(en_tokenizer):
"""Test that "LONGEST" matching prefers the first of two equally long matches"""
doc = en_tokenizer(" ".join("CCC"))
matcher = Matcher(doc.vocab)
pattern = [{"ORTH": "C"}, {"ORTH": "C"}]
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
# out of 0-2 and 1-3, the first should be picked
assert len(matches) == 1
assert matches[0][1] == 0
assert matches[0][2] == 2
def test_invalid_greediness(doc, text):
matcher = Matcher(doc.vocab)
with pytest.raises(ValueError):
matcher.add("RULE", [pattern1], greedy="GREEDY")
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_match_consuming(doc, text, pattern, re_pattern):
"""Test that matcher.__call__ consumes tokens on a match similar to
re.findall."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
assert len(matches) == len(re_matches)
def test_operator_combos(en_vocab):
cases = [
("aaab", "a a a b", True),
("aaab", "a+ b", True),
("aaab", "a+ a+ b", True),
("aaab", "a+ a+ a b", True),
("aaab", "a+ a+ a+ b", True),
("aaab", "a+ a a b", True),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaa", "a+ b", False),
("aaa", "a+ a+ b", False),
("aaa", "a+ a+ a+ b", False),
("aaa", "a+ a b", False),
("aaa", "a+ a a b", False),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaab", "a+ a b", True),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern])
matches = matcher(doc)
if result:
assert matches, (string, pattern_str)
else:
assert not matches, (string, pattern_str)
def test_matcher_end_zero_plus(en_vocab):
"""Test matcher works when patterns end with * operator. (issue 1450)"""
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher.add("TSTEND", [pattern])
nlp = lambda string: Doc(matcher.vocab, words=string.split())
assert len(matcher(nlp("a"))) == 1
assert len(matcher(nlp("a b"))) == 2
assert len(matcher(nlp("a c"))) == 1
assert len(matcher(nlp("a b c"))) == 2
assert len(matcher(nlp("a b b c"))) == 3
assert len(matcher(nlp("a b b"))) == 3
def test_matcher_sets_return_correct_tokens(en_vocab):
matcher = Matcher(en_vocab)
patterns = [
[{"LOWER": {"IN": ["zero"]}}],
[{"LOWER": {"IN": ["one"]}}],
[{"LOWER": {"IN": ["two"]}}],
]
matcher.add("TEST", patterns)
doc = Doc(en_vocab, words="zero one two three".split())
matches = matcher(doc)
texts = [Span(doc, s, e, label=L).text for L, s, e in matches]
assert texts == ["zero", "one", "two"]
def test_matcher_remove():
nlp = English()
matcher = Matcher(nlp.vocab)
text = "This is a test case."
pattern = [{"ORTH": "test"}, {"OP": "?"}]
assert len(matcher) == 0
matcher.add("Rule", [pattern])
assert "Rule" in matcher
# should give two matches
results1 = matcher(nlp(text))
assert len(results1) == 2
# removing once should work
matcher.remove("Rule")
# should not return any maches anymore
results2 = matcher(nlp(text))
assert len(results2) == 0
# removing again should throw an error
with pytest.raises(ValueError):
matcher.remove("Rule")
| |
# -*- coding: utf-8 -*-
"""
nexmo sms gateway backend. (https://www.nexmo.com/)
Author: Alican Toprak (a.toprak@northernbitcoin.com)
~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from django.conf import settings
import requests
from .base import BaseSmsBackend
logger = logging.getLogger("nexmo")
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
NEXMO_API_URL = "https://rest.nexmo.com/sms/json"
NEXMO_API_KEY = getattr(settings, "SENDSMS_ACCOUNT_SID", "")
NEXMO_API_SECRET = getattr(settings, "SENDSMS_AUTH_TOKEN", "")
nexmo_error_codes = {
1: [
"Throttled",
"You have exceeded the submission capacity allowed on this account. Please wait and retry.",
],
2: [
"Missing params",
"Your request is incomplete and missing some mandatory parameters.",
],
3: ["Invalid params", "The value of one or more parameters is invalid."],
4: [
"Invalid credentials",
"The api_key / api_secret you supplied is either invalid or disabled.",
],
5: [
"Internal error",
"There was an error processing your request in the Platform.",
],
6: [
"Invalid message",
"The Platform was unable to process your request. For example, due to an unrecognised prefix for the phone number.",
],
7: [
"Number barred",
"The number you are trying to submit to is blacklisted and may not receive messages.",
],
8: [
"Partner account barred",
"The api_key you supplied is for an account that has been barred from submitting messages.",
],
9: [
"Partner quota exceeded",
"Your pre-paid account does not have sufficient credit to process this message.",
],
11: [
"Account not enabled for REST",
"This account is not provisioned for REST submission, you should use SMPP instead.",
],
12: [
"Message too long",
"The length of udh and body was greater than 140 octets for a binary type SMS request.",
],
13: [
"Communication Failed",
"Message was not submitted because there was a communication failure.",
],
14: [
"Invalid Signature",
"Message was not submitted due to a verification failure in the submitted signature.",
],
15: [
"Illegal Sender Address - rejected",
"Due to local regulations, the SenderID you set in from in the request was not accepted. Please check the Global messaging section.",
],
16: ["Invalid TTL", "The value of ttl in your request was invalid."],
19: [
"Facility not allowed",
"Your request makes use of a facility that is not enabled on your account.",
],
20: [
"Invalid Message class",
"The value of message-class in your request was out of range. See https://en.wikipedia.org/wiki/Data_Coding_Scheme.",
],
23: [
"Bad callback :: Missing Protocol",
"You did not include https in the URL you set in callback.",
],
29: [
"Non White-listed Destination",
"The phone number you set in to is not in your pre-approved destination list. To send messages to this phone number, add it using Dashboard.",
],
34: [
"Invalid or Missing Msisdn Param",
"The phone number you supplied in the to parameter of your request was either missing or invalid.",
],
}
class SmsBackend(BaseSmsBackend):
def get_api_key(self):
return NEXMO_API_KEY
def get_api_secret(self):
return NEXMO_API_SECRET
def _parse_response(self, response):
"""
Parse http raw respone into python
dictionary object.
:param str response: http response
:returns: response dict
:rtype: dict
"""
response_dict = {}
for line in response.splitlines():
key, value = response.split("=", 1)
response_dict[key] = value
return response_dict
def parse(self, host, response):
if not response.status_code == 200:
if self.fail_silently:
logger.warning("Error: %s %r", response.status_code, response.content)
return False
raise Error("Error: %s %r", response.status_code, response.content)
status_code = int(response.json().get("messages")[0].get("status"))
if status_code == 0:
return True, response
error_type = nexmo_error_codes.get(status_code)
if self.fail_silently:
logger.warning("Error: %s %r", response.status_code, response.content)
return False, requests
raise ClientError(
"Error Code {status_code}: {text}: {meaning}".format(
status_code=status_code, text=error_type[0], meaning=error_type[1]
)
)
def _send(self, message):
"""
A helper method that does the actual sending
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool
"""
params = {
"from": message.from_phone,
"to": ",".join(message.to),
"text": message.body,
"api_key": self.get_api_key(),
"api_secret": self.get_api_secret(),
}
print(params)
logger.debug("POST to %r with body: %r", NEXMO_API_URL, params)
return self.parse(NEXMO_API_URL, requests.post(NEXMO_API_URL, data=params))
def send_messages(self, messages):
"""
Send messages.
:param list messages: List of SmsMessage instances.
:returns: number of messages sended successful.
:rtype: int
"""
counter = 0
for message in messages:
res, _ = self._send(message)
if res:
counter += 1
return counter
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, no-member, len-as-condition
import re
from functools import partial
from rebulk.pattern import FunctionalPattern, StringPattern, RePattern
from ..rebulk import Rebulk
from ..validators import chars_surround
def test_chain_close():
rebulk = Rebulk()
ret = rebulk.chain().close()
assert ret == rebulk
assert len(rebulk.effective_patterns()) == 1
def test_build_chain():
rebulk = Rebulk()
def digit(input_string):
i = input_string.find("1849")
if i > -1:
return i, i + len("1849")
ret = rebulk.chain() \
.functional(digit) \
.string("test").repeater(2) \
.string("x").repeater('{1,3}') \
.string("optional").repeater('?') \
.regex("f?x").repeater('+') \
.close()
assert ret == rebulk
assert len(rebulk.effective_patterns()) == 1
chain = rebulk.effective_patterns()[0]
assert len(chain.parts) == 5
assert isinstance(chain.parts[0].pattern, FunctionalPattern)
assert chain.parts[0].repeater_start == 1
assert chain.parts[0].repeater_end == 1
assert isinstance(chain.parts[1].pattern, StringPattern)
assert chain.parts[1].repeater_start == 2
assert chain.parts[1].repeater_end == 2
assert isinstance(chain.parts[2].pattern, StringPattern)
assert chain.parts[2].repeater_start == 1
assert chain.parts[2].repeater_end == 3
assert isinstance(chain.parts[3].pattern, StringPattern)
assert chain.parts[3].repeater_start == 0
assert chain.parts[3].repeater_end == 1
assert isinstance(chain.parts[4].pattern, RePattern)
assert chain.parts[4].repeater_start == 1
assert chain.parts[4].repeater_end is None
def test_chain_defaults():
rebulk = Rebulk()
rebulk.defaults(validator=lambda x: x.value.startswith('t'), ignore_names=['testIgnore'], children=True)
rebulk.chain() \
.regex("(?P<test>test)") \
.regex(" ").repeater("*") \
.regex("(?P<best>best)") \
.regex(" ").repeater("*") \
.regex("(?P<testIgnore>testIgnore)")
matches = rebulk.matches("test best testIgnore")
assert len(matches) == 1
assert matches[0].name == "test"
def test_chain_with_validators():
def chain_validator(match):
return match.value.startswith('t') and match.value.endswith('t')
def default_validator(match):
return match.value.startswith('t') and match.value.endswith('g')
def custom_validator(match):
return match.value.startswith('b') and match.value.endswith('t')
rebulk = Rebulk()
rebulk.defaults(children=True, validator=default_validator)
rebulk.chain(validate_all=True, validator={'__parent__': chain_validator}) \
.regex("(?P<test>testing)", validator=default_validator).repeater("+") \
.regex(" ").repeater("+") \
.regex("(?P<best>best)", validator=custom_validator).repeater("+")
matches = rebulk.matches("some testing best end")
assert len(matches) == 2
assert matches[0].name == "test"
assert matches[1].name == "best"
def test_matches_docs():
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE) \
.defaults(children=True, formatter={'episode': int, 'version': int}) \
.chain() \
.regex(r'e(?P<episode>\d{1,4})').repeater(1) \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'[ex-](?P<episode>\d{1,4})').repeater('*') \
.close() # .repeater(1) could be omitted as it's the default behavior
result = rebulk.matches("This is E14v2-15-16-17").to_dict() # converts matches to dict
assert 'episode' in result
assert result['episode'] == [14, 15, 16, 17]
assert 'version' in result
assert result['version'] == 2
def test_matches():
rebulk = Rebulk()
def digit(input_string):
i = input_string.find("1849")
if i > -1:
return i, i + len("1849")
input_string = "1849testtestxxfixfux_foxabc1849testtestxoptionalfoxabc"
chain = rebulk.chain() \
.functional(digit) \
.string("test").hidden().repeater(2) \
.string("x").hidden().repeater('{1,3}') \
.string("optional").hidden().repeater('?') \
.regex("f.?x", name='result').repeater('+') \
.close()
matches = chain.matches(input_string)
assert len(matches) == 2
children = matches[0].children
assert children[0].value == '1849'
assert children[1].value == 'fix'
assert children[2].value == 'fux'
children = matches[1].children
assert children[0].value == '1849'
assert children[1].value == 'fox'
input_string = "_1850testtestxoptionalfoxabc"
matches = chain.matches(input_string)
assert len(matches) == 0
input_string = "_1849testtesttesttestxoptionalfoxabc"
matches = chain.matches(input_string)
assert len(matches) == 0
input_string = "_1849testtestxxxxoptionalfoxabc"
matches = chain.matches(input_string)
assert len(matches) == 0
input_string = "_1849testtestoptionalfoxabc"
matches = chain.matches(input_string)
assert len(matches) == 0
input_string = "_1849testtestxoptionalabc"
matches = chain.matches(input_string)
assert len(matches) == 0
input_string = "_1849testtestxoptionalfaxabc"
matches = chain.matches(input_string)
assert len(matches) == 1
children = matches[0].children
assert children[0].value == '1849'
assert children[1].value == 'fax'
def test_matches_2():
rebulk = Rebulk() \
.regex_defaults(flags=re.IGNORECASE) \
.defaults(children=True, formatter={'episode': int, 'version': int}) \
.chain() \
.regex(r'e(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'[ex-](?P<episode>\d{1,4})').repeater('*') \
.close()
matches = rebulk.matches("This is E14v2-15E16x17")
assert len(matches) == 5
assert matches[0].name == 'episode'
assert matches[0].value == 14
assert matches[1].name == 'version'
assert matches[1].value == 2
assert matches[2].name == 'episode'
assert matches[2].value == 15
assert matches[3].name == 'episode'
assert matches[3].value == 16
assert matches[4].name == 'episode'
assert matches[4].value == 17
def test_matches_3():
alt_dash = (r'@', r'[\W_]') # abbreviation
match_names = ['season', 'episode']
other_names = ['screen_size', 'video_codec', 'audio_codec', 'audio_channels', 'container', 'date']
rebulk = Rebulk()
rebulk.defaults(formatter={'season': int, 'episode': int},
tags=['SxxExx'],
abbreviations=[alt_dash],
private_names=['episodeSeparator', 'seasonSeparator'],
children=True,
private_parent=True,
conflict_solver=lambda match, other: match
if match.name in match_names and other.name in other_names
else '__default__')
rebulk.chain() \
.defaults(children=True, private_parent=True) \
.regex(r'(?P<season>\d+)@?x@?(?P<episode>\d+)') \
.regex(r'(?P<episodeSeparator>x|-|\+|&)(?P<episode>\d+)').repeater('*') \
.close() \
.chain() \
.defaults(children=True, private_parent=True) \
.regex(r'S(?P<season>\d+)@?(?:xE|Ex|E|x)@?(?P<episode>\d+)') \
.regex(r'(?:(?P<episodeSeparator>xE|Ex|E|x|-|\+|&)(?P<episode>\d+))').repeater('*') \
.close() \
.chain() \
.defaults(children=True, private_parent=True) \
.regex(r'S(?P<season>\d+)') \
.regex(r'(?P<seasonSeparator>S|-|\+|&)(?P<season>\d+)').repeater('*')
matches = rebulk.matches("test-01x02-03")
assert len(matches) == 3
assert matches[0].name == 'season'
assert matches[0].value == 1
assert matches[1].name == 'episode'
assert matches[1].value == 2
assert matches[2].name == 'episode'
assert matches[2].value == 3
matches = rebulk.matches("test-S01E02-03")
assert len(matches) == 3
assert matches[0].name == 'season'
assert matches[0].value == 1
assert matches[1].name == 'episode'
assert matches[1].value == 2
assert matches[2].name == 'episode'
assert matches[2].value == 3
matches = rebulk.matches("test-S01-02-03-04")
assert len(matches) == 4
assert matches[0].name == 'season'
assert matches[0].value == 1
assert matches[1].name == 'season'
assert matches[1].value == 2
assert matches[2].name == 'season'
assert matches[2].value == 3
assert matches[3].name == 'season'
assert matches[3].value == 4
def test_matches_4():
seps_surround = partial(chars_surround, " ")
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(validate_all=True, children=True)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], private_parent=True)
rebulk.chain(validator={'__parent__': seps_surround}, formatter={'episode': int, 'version': int}) \
.defaults(formatter={'episode': int, 'version': int}) \
.regex(r'e(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('*')
matches = rebulk.matches("Some Series E01E02E03")
assert len(matches) == 3
assert matches[0].value == 1
assert matches[1].value == 2
assert matches[2].value == 3
def test_matches_5():
seps_surround = partial(chars_surround, " ")
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.chain(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True,
validator={'__parent__': seps_surround}, children=True, private_parent=True,
formatter={'episode': int, 'version': int}) \
.defaults(children=True, private_parent=True) \
.regex(r'e(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('{2,3}')
matches = rebulk.matches("Some Series E01E02E03")
assert len(matches) == 3
matches = rebulk.matches("Some Series E01E02")
assert len(matches) == 0
matches = rebulk.matches("Some Series E01E02E03E04E05E06") # Parent can't be validated, so no results at all
assert len(matches) == 0
def test_matches_6():
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True,
validator=None, children=True, private_parent=True)
rebulk.chain(formatter={'episode': int, 'version': int}) \
.defaults(children=True, private_parent=True) \
.regex(r'e(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('{2,3}')
matches = rebulk.matches("Some Series E01E02E03")
assert len(matches) == 3
matches = rebulk.matches("Some Series E01E02")
assert len(matches) == 0
matches = rebulk.matches("Some Series E01E02E03E04E05E06") # No validator on parent, so it should give 4 episodes.
assert len(matches) == 4
def test_matches_7():
seps_surround = partial(chars_surround, ' .-/')
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(children=True, private_parent=True)
rebulk.chain(). \
regex(r'S(?P<season>\d+)', validate_all=True, validator={'__parent__': seps_surround}). \
regex(r'[ -](?P<season>\d+)', validator=seps_surround).repeater('*')
matches = rebulk.matches("Some S01")
assert len(matches) == 1
matches[0].value = 1
matches = rebulk.matches("Some S01-02")
assert len(matches) == 2
matches[0].value = 1
matches[1].value = 2
matches = rebulk.matches("programs4/Some S01-02")
assert len(matches) == 2
matches[0].value = 1
matches[1].value = 2
matches = rebulk.matches("programs4/SomeS01middle.S02-03.andS04here")
assert len(matches) == 2
matches[0].value = 2
matches[1].value = 3
matches = rebulk.matches("Some 02.and.S04-05.here")
assert len(matches) == 2
matches[0].value = 4
matches[1].value = 5
def test_chain_breaker():
def chain_breaker(matches):
seasons = matches.named('season')
if len(seasons) > 1:
if seasons[-1].value - seasons[-2].value > 10:
return True
return False
seps_surround = partial(chars_surround, ' .-/')
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(children=True, private_parent=True, formatter={'season': int})
rebulk.chain(chain_breaker=chain_breaker). \
regex(r'S(?P<season>\d+)', validate_all=True, validator={'__parent__': seps_surround}). \
regex(r'[ -](?P<season>\d+)', validator=seps_surround).repeater('*')
matches = rebulk.matches("Some S01-02-03-50-51")
assert len(matches) == 3
matches[0].value = 1
matches[1].value = 2
matches[2].value = 3
def test_chain_breaker_defaults():
def chain_breaker(matches):
seasons = matches.named('season')
if len(seasons) > 1:
if seasons[-1].value - seasons[-2].value > 10:
return True
return False
seps_surround = partial(chars_surround, ' .-/')
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(chain_breaker=chain_breaker, children=True, private_parent=True, formatter={'season': int})
rebulk.chain(). \
regex(r'S(?P<season>\d+)', validate_all=True, validator={'__parent__': seps_surround}). \
regex(r'[ -](?P<season>\d+)', validator=seps_surround).repeater('*')
matches = rebulk.matches("Some S01-02-03-50-51")
assert len(matches) == 3
matches[0].value = 1
matches[1].value = 2
matches[2].value = 3
def test_chain_breaker_defaults2():
def chain_breaker(matches):
seasons = matches.named('season')
if len(seasons) > 1:
if seasons[-1].value - seasons[-2].value > 10:
return True
return False
seps_surround = partial(chars_surround, ' .-/')
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE)
rebulk.chain_defaults(chain_breaker=chain_breaker)
rebulk.defaults(children=True, private_parent=True, formatter={'season': int})
rebulk.chain(). \
regex(r'S(?P<season>\d+)', validate_all=True, validator={'__parent__': seps_surround}). \
regex(r'[ -](?P<season>\d+)', validator=seps_surround).repeater('*')
matches = rebulk.matches("Some S01-02-03-50-51")
assert len(matches) == 3
matches[0].value = 1
matches[1].value = 2
matches[2].value = 3
| |
"""This example shows how to browse the object tree and enlarge tables.
Before to run this program you need to execute first tutorial1-1.py
that create the tutorial1.h5 file needed here.
"""
import tables as tb
print()
print('-**-**-**-**- open the previous tutorial file -**-**-**-**-**-')
# Reopen the file in append mode
h5file = tb.open_file("tutorial1.h5", "a")
# Print the object tree created from this filename
print("Object tree from filename:", h5file.filename)
print(h5file)
print()
print('-**-**-**-**-**-**- traverse tree methods -**-**-**-**-**-**-**-')
# List all the nodes (Group and Leaf objects) on tree
print(h5file)
# List all the nodes (using File iterator) on tree
print("Nodes in file:")
for node in h5file:
print(node)
print()
# Now, only list all the groups on tree
print("Groups in file:")
for group in h5file.walk_groups():
print(group)
print()
# List only the arrays hanging from /
print("Arrays in file (I):")
for group in h5file.walk_groups("/"):
for array in h5file.list_nodes(group, classname='Array'):
print(array)
# This do the same result
print("Arrays in file (II):")
for array in h5file.walk_nodes("/", "Array"):
print(array)
print()
# And finally, list only leafs on /detector group (there should be one!)
print("Leafs in group '/detector' (I):")
for leaf in h5file.list_nodes("/detector", 'Leaf'):
print(leaf)
# Other way using iterators and natural naming
print("Leafs in group '/detector' (II):")
for leaf in h5file.root.detector._f_walknodes('Leaf'):
print(leaf)
print()
print('-**-**-**-**-**-**- setting/getting object attributes -**-**--**-**-')
# Get a pointer to '/detector/readout' node
table = h5file.root.detector.readout
# Attach it a string (date) attribute
table.attrs.gath_date = "Wed, 06/12/2003 18:33"
# Attach a floating point attribute
table.attrs.temperature = 18.4
table.attrs.temp_scale = "Celsius"
# Get a pointer to '/detector' node
detector = h5file.root.detector
# Attach a general object to the parent (/detector) group
detector._v_attrs.stuff = [5, (2.3, 4.5), "Integer and tuple"]
# Now, get the attributes
print("gath_date attribute of /detector/readout:", table.attrs.gath_date)
print("temperature attribute of /detector/readout:", table.attrs.temperature)
print("temp_scale attribute of /detector/readout:", table.attrs.temp_scale)
print("stuff attribute in /detector:", detector._v_attrs.stuff)
print()
# Delete permanently the attribute gath_date of /detector/readout
print("Deleting /detector/readout gath_date attribute")
del table.attrs.gath_date
# Print a representation of all attributes in /detector/table
print("AttributeSet instance in /detector/table:", repr(table.attrs))
# Get the (user) attributes of /detector/table
print("List of user attributes in /detector/table:", table.attrs._f_list())
# Get the (sys) attributes of /detector/table
print("List of user attributes in /detector/table:",
table.attrs._f_list("sys"))
print()
# Rename an attribute
print("renaming 'temp_scale' attribute to 'tempScale'")
table.attrs._f_rename("temp_scale", "tempScale")
print(table.attrs._f_list())
# Try to rename a system attribute:
try:
table.attrs._f_rename("VERSION", "version")
except:
print("You can not rename a VERSION attribute: it is read only!.")
print()
print('-**-**-**-**-**-**- getting object metadata -**-**-**-**-**-**-')
# Get a pointer to '/detector/readout' data
table = h5file.root.detector.readout
# Get metadata from table
print("Object:", table)
print("Table name:", table.name)
print("Table title:", table.title)
print("Number of rows in table:", table.nrows)
print("Table variable names with their type and shape:")
for name in table.colnames:
print(name, ':= {}, {}'.format(table.coldtypes[name],
table.coldtypes[name].shape))
print()
# Get the object in "/columns pressure"
pressureObject = h5file.get_node("/columns", "pressure")
# Get some metadata on this object
print("Info on the object:", repr(pressureObject))
print(" shape: ==>", pressureObject.shape)
print(" title: ==>", pressureObject.title)
print(" atom: ==>", pressureObject.atom)
print()
print('-**-**-**-**-**- reading actual data from arrays -**-**-**-**-**-**-')
# Read the 'pressure' actual data
pressureArray = pressureObject.read()
print(repr(pressureArray))
# Check the kind of object we have created (it should be a numpy array)
print("pressureArray is an object of type:", type(pressureArray))
# Read the 'name' Array actual data
nameArray = h5file.root.columns.name.read()
# Check the kind of object we have created (it should be a numpy array)
print("nameArray is an object of type:", type(nameArray))
print()
# Print the data for both arrays
print("Data on arrays nameArray and pressureArray:")
for i in range(pressureObject.shape[0]):
print(nameArray[i], "-->", pressureArray[i])
print()
print('-**-**-**-**-**- reading actual data from tables -**-**-**-**-**-**-')
# Create a shortcut to table object
table = h5file.root.detector.readout
# Read the 'energy' column of '/detector/readout'
print("Column 'energy' of '/detector/readout':\n", table.cols.energy)
print()
# Read the 3rd row of '/detector/readout'
print("Third row of '/detector/readout':\n", table[2])
print()
# Read the rows from 3 to 9 of row of '/detector/readout'
print("Rows from 3 to 9 of '/detector/readout':\n", table[2:9])
print()
print('-**-**-**-**- append records to existing table -**-**-**-**-**-')
# Get the object row from table
table = h5file.root.detector.readout
particle = table.row
# Append 5 new particles to table
for i in range(10, 15):
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = float(i * i)
particle['energy'] = float(particle['pressure'] ** 4)
particle['idnumber'] = i * (2 ** 34) # This exceeds long integer range
particle.append()
# Flush this table
table.flush()
# Print the data using the table iterator:
for r in table:
print("%-16s | %11.1f | %11.4g | %6d | %6d | %8d |" %
(r['name'], r['pressure'], r['energy'], r['grid_i'], r['grid_j'],
r['TDCcount']))
print()
print("Total number of entries in resulting table:", table.nrows)
print()
print('-**-**-**-**- modify records of a table -**-**-**-**-**-')
# Single cells
print("First row of readout table.")
print("Before modif-->", table[0])
table.cols.TDCcount[0] = 1
print("After modifying first row of TDCcount-->", table[0])
table.cols.energy[0] = 2
print("After modifying first row of energy-->", table[0])
# Column slices
table.cols.TDCcount[2:5] = [2, 3, 4]
print("After modifying slice [2:5] of ADCcount-->", table[0:5])
table.cols.energy[1:9:3] = [2, 3, 4]
print("After modifying slice [1:9:3] of energy-->", table[0:9])
# Modifying complete Rows
table.modify_rows(start=1, step=3,
rows=[(1, 2, 3.0, 4, 5, 6, 'Particle: None', 8.0),
(2, 4, 6.0, 8, 10, 12, 'Particle: None*2', 16.0)])
print("After modifying the complete third row-->", table[0:5])
# Modifying columns inside table iterators
for row in table.where('TDCcount <= 2'):
row['energy'] = row['TDCcount'] * 2
row.update()
print("After modifying energy column (where TDCcount <=2)-->", table[0:4])
print()
print('-**-**-**-**- modify elements of an array -**-**-**-**-**-')
print("pressure array")
pressureObject = h5file.root.columns.pressure
print("Before modif-->", pressureObject[:])
pressureObject[0] = 2
print("First modif-->", pressureObject[:])
pressureObject[1:3] = [2.1, 3.5]
print("Second modif-->", pressureObject[:])
pressureObject[::2] = [1, 2]
print("Third modif-->", pressureObject[:])
print("name array")
nameObject = h5file.root.columns.name
print("Before modif-->", nameObject[:])
nameObject[0] = ['Particle: None']
print("First modif-->", nameObject[:])
nameObject[1:3] = ['Particle: 0', 'Particle: 1']
print("Second modif-->", nameObject[:])
nameObject[::2] = ['Particle: -3', 'Particle: -5']
print("Third modif-->", nameObject[:])
print()
print('-**-**-**-**- remove records from a table -**-**-**-**-**-')
# Delete some rows on the Table (yes, rows can be removed!)
table.remove_rows(5, 10)
# Print some table columns, for comparison with array data
print("Some columns in final table:")
print()
# Print the headers
print("%-16s | %11s | %11s | %6s | %6s | %8s |" %
('name', 'pressure', 'energy', 'grid_i', 'grid_j',
'TDCcount'))
print("%-16s + %11s + %11s + %6s + %6s + %8s +" %
('-' * 16, '-' * 11, '-' * 11, '-' * 6, '-' * 6, '-' * 8))
# Print the data using the table iterator:
for r in table.iterrows():
print("%-16s | %11.1f | %11.4g | %6d | %6d | %8d |" %
(r['name'], r['pressure'], r['energy'], r['grid_i'], r['grid_j'],
r['TDCcount']))
print()
print("Total number of entries in final table:", table.nrows)
# Close the file
h5file.close()
| |
"""
Stack-In-A-WSGI: stackinawsgi.session.session.Session testing
"""
import unittest
from threading import Lock
import uuid
import ddt
import mock
from stackinabox.stack import StackInABox
from stackinabox.services.hello import HelloService
from stackinawsgi.exceptions import (
InvalidSessionId,
InvalidServiceList,
NoServicesProvided
)
from stackinawsgi.session.session import Session
@ddt.ddt
class TestSessionSession(unittest.TestCase):
"""
Test the interaction of StackInAWSGI's Session object
"""
def setUp(self):
"""
configure env for the test
"""
self.session_id = str(uuid.uuid4())
self.services = [HelloService]
def tearDown(self):
"""
clean up after the test
"""
pass
def test_contstruction_invalid_session_id(self):
"""
Test construction of a session object with an invalid session id
"""
with self.assertRaises(InvalidSessionId):
Session(None, [HelloService])
@ddt.unpack
@ddt.data(
([], NoServicesProvided),
(None, InvalidServiceList),
)
def test_construction_with_invalid_services(self, invalid_service_value,
expected_exception):
"""
test construction of a session object with a series of invalid values
"""
with self.assertRaises(expected_exception):
Session('invalid-services-list-testing', invalid_service_value)
def test_construction(self):
"""
Test successful construction and init_services()
"""
session = Session(self.session_id, self.services)
self.assertEqual(self.session_id, session.session_id)
self.assertEqual(self.services, session.services)
self.assertTrue(isinstance(session.lock, type(Lock())))
self.assertTrue(isinstance(session.stack, StackInABox))
self.assertEqual(session.session_id, session.stack.base_url)
self.assertEqual(len(self.services), len(session.stack.services))
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
tuple_services = tuple(self.services)
for _, v in session.stack.services.items():
_, svc = v
self.assertIsInstance(svc, tuple_services)
def test_base_url(self):
"""
Test Base URL
"""
def validate(s, b):
self.assertEqual(0, s.access_count)
self.assertEqual(s.created_at, s.last_accessed_at)
self.assertEqual(
b,
s.base_url
)
self.assertEqual(
b,
s.stack.base_url
)
session = Session(self.session_id, self.services)
validate(session, self.session_id)
new_base_url = 'howdy-from-the-test'
session.base_url = new_base_url
validate(session, new_base_url)
def test_reset(self):
"""
test resetting
"""
def get_service_instance_info(s):
list_ids = {}
for k, v in s.stack.services.items():
m, svc = v
list_ids[(k, m)] = id(svc)
return list_ids
session = Session(self.session_id, self.services)
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
original_session_ids = get_service_instance_info(session)
session.reset()
self.assertEqual(1, session.access_count)
self.assertLess(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
new_session_ids = get_service_instance_info(session)
for k, v in original_session_ids.items():
self.assertIn(k, new_session_ids)
self.assertNotEqual(v, new_session_ids[k])
def test_call(self):
"""
test calling into the session
"""
result = (200, {}, "we're all good")
mock_session_stack_call = mock.Mock()
mock_session_stack_call.return_value = result
session = Session(self.session_id, self.services)
session.stack.call = mock_session_stack_call
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
self.assertEqual(mock_session_stack_call.call_count, 0)
self.assertEqual(session.call('hello', 'world'), result)
self.assertTrue(mock_session_stack_call.called)
self.assertEqual(mock_session_stack_call.call_count, 1)
self.assertEqual(
mock_session_stack_call.call_args,
(('hello', 'world'),)
)
self.assertEqual(1, session.access_count)
self.assertLess(session.created_at, session.last_accessed_at)
self.assertIn(result[0], session.status_tracker)
self.assertEqual(1, session.status_tracker[result[0]])
def test_try_handle_route(self):
"""
test calling the session request handler
"""
result = (200, {}, "we're all good")
mock_session_try_handle_route = mock.Mock()
mock_session_try_handle_route.return_value = result
session = Session(self.session_id, self.services)
session.stack.try_handle_route = mock_session_try_handle_route
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
self.assertEqual(mock_session_try_handle_route.call_count, 0)
self.assertEqual(session.try_handle_route('hello', 'world'), result)
self.assertTrue(mock_session_try_handle_route.called)
self.assertEqual(mock_session_try_handle_route.call_count, 1)
self.assertEqual(
mock_session_try_handle_route.call_args,
(('hello', 'world'),)
)
self.assertEqual(1, session.access_count)
self.assertLess(session.created_at, session.last_accessed_at)
self.assertIn(result[0], session.status_tracker)
self.assertEqual(1, session.status_tracker[result[0]])
def test_request(self):
"""
test calling the session request handler
"""
result = (200, {}, "we're all good")
mock_session_request = mock.Mock()
mock_session_request.return_value = result
session = Session(self.session_id, self.services)
session.stack.request = mock_session_request
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
self.assertEqual(mock_session_request.call_count, 0)
self.assertEqual(session.request('hello', 'world'), result)
self.assertTrue(mock_session_request.called)
self.assertEqual(mock_session_request.call_count, 1)
self.assertEqual(
mock_session_request.call_args,
(('hello', 'world'),)
)
self.assertEqual(1, session.access_count)
self.assertLess(session.created_at, session.last_accessed_at)
self.assertIn(result[0], session.status_tracker)
self.assertEqual(1, session.status_tracker[result[0]])
def test_sub_request(self):
"""
test calling the session sub_request handler
"""
result = (200, {}, "we're all good")
mock_session_sub_request = mock.Mock()
mock_session_sub_request.return_value = result
session = Session(self.session_id, self.services)
session.stack.sub_request = mock_session_sub_request
self.assertEqual(0, session.access_count)
self.assertEqual(session.created_at, session.last_accessed_at)
self.assertEqual(0, len(session.status_tracker))
self.assertEqual(mock_session_sub_request.call_count, 0)
self.assertEqual(session.sub_request('hello', 'world'), result)
self.assertTrue(mock_session_sub_request.called)
self.assertEqual(mock_session_sub_request.call_count, 1)
self.assertEqual(
mock_session_sub_request.call_args,
(('hello', 'world'),)
)
self.assertEqual(1, session.access_count)
self.assertLess(session.created_at, session.last_accessed_at)
self.assertIn(result[0], session.status_tracker)
self.assertEqual(1, session.status_tracker[result[0]])
| |
import inspect
import types as python_types
import warnings
from ..engine.topology import Layer, InputSpec
from .. import backend as K
from ..utils.generic_utils import func_dump, func_load
from .. import regularizers
from .. import constraints
from .. import activations
from .. import initializers
class Merge(Layer):
"""A `Merge` layer can be used to merge a list of tensors
into a single tensor, following some merge `mode`.
# Example
```python
model1 = Sequential()
model1.add(Dense(32, input_dim=32))
model2 = Sequential()
model2.add(Dense(32, input_dim=32))
merged_model = Sequential()
merged_model.add(Merge([model1, model2], mode='concat', concat_axis=1))
```
# Arguments
layers: Can be a list of Keras tensors or
a list of layer instances. Must be more
than one layer/tensor.
mode: String or lambda/function. If string, must be one
of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'.
If lambda/function, it should take as input a list of tensors
and return a single tensor.
concat_axis: Integer, axis to use in mode `concat`.
dot_axes: Integer or tuple of integers,
axes to use in mode `dot` or `cos`.
output_shape: Either a shape tuple (tuple of integers),
or a lambda/function
to compute `output_shape`
(only if merge mode is a lambda/function).
If the argument is a tuple,
it should be expected output shape, *not* including the batch size
(same convention as the `input_shape` argument in layers).
If the argument is callable,
it should take as input a list of shape tuples
(1:1 mapping to input tensors)
and return a single shape tuple, including the
batch size (same convention as the
`compute_output_shape` method of layers).
node_indices: Optional list of integers containing
the output node index for each input layer
(in case some input layers have multiple output nodes).
will default to an array of 0s if not provided.
tensor_indices: Optional list of indices of output tensors
to consider for merging
(in case some input layer node returns multiple tensors).
output_mask: Mask or lambda/function to compute the output mask (only
if merge mode is a lambda/function). If the latter case, it should
take as input a list of masks and return a single mask.
"""
def __init__(self, layers=None, mode='sum', concat_axis=-1,
dot_axes=-1, output_shape=None, output_mask=None,
arguments=None, node_indices=None, tensor_indices=None,
name=None):
warnings.warn('The `Merge` layer is deprecated '
'and will be removed after 08/2017. '
'Use instead layers from `keras.layers.merge`, '
'e.g. `add`, `concatenate`, etc.', stacklevel=2)
self.layers = layers
self.mode = mode
self.concat_axis = concat_axis
self.dot_axes = dot_axes
self._output_shape = output_shape
self.node_indices = node_indices
self._output_mask = output_mask
self.arguments = arguments if arguments else {}
self._initial_weights = None
self._updates = []
self._losses = []
self._per_input_updates = {}
self._per_input_losses = {}
# Layer parameters.
self.inbound_nodes = []
self.outbound_nodes = []
self.constraints = {}
self._trainable_weights = []
self._non_trainable_weights = []
self.supports_masking = True
self.uses_learning_phase = False
self.input_spec = None # Compatible with anything.
if not name:
prefix = self.__class__.__name__.lower()
name = prefix + '_' + str(K.get_uid(prefix))
self.name = name
if layers:
# This exists for backwards compatibility.
# equivalent to:
# merge = Merge(layers=None)
# output = merge([input_tensor_1, input_tensor_2])
if not node_indices:
# By default we connect to
# the 1st output stream in the input layer.
node_indices = [0 for _ in range(len(layers))]
if not tensor_indices:
tensor_indices = [0 for _ in range(len(layers))]
self._arguments_validation(layers, mode,
concat_axis, dot_axes,
node_indices, tensor_indices)
self.built = True
input_tensors = []
input_masks = []
for i, layer in enumerate(layers):
node_index = node_indices[i]
tensor_index = tensor_indices[i]
inbound_node = layer.inbound_nodes[node_index]
input_tensors.append(inbound_node.output_tensors[tensor_index])
input_masks.append(inbound_node.output_masks[tensor_index])
self(input_tensors, mask=input_masks)
else:
self.built = False
def _arguments_validation(self, layers, mode, concat_axis, dot_axes,
node_indices, tensor_indices):
"""Validates user-passed arguments and raises exceptions
as appropriate.
"""
if not callable(mode):
if mode not in {'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'}:
raise ValueError('Invalid merge mode: ' + str(mode))
if not isinstance(layers, (list, tuple)) or len(layers) < 2:
raise TypeError('A Merge should only be applied to a list of '
'layers with at least 2 elements. Found: ' +
str(layers))
if tensor_indices is None:
tensor_indices = [None for _ in range(len(layers))]
input_shapes = []
for i, layer in enumerate(layers):
layer_output_shape = layer.get_output_shape_at(node_indices[i])
if isinstance(layer_output_shape, list):
# Case: the layer has multiple output tensors
# and we only need a specific one.
layer_output_shape = layer_output_shape[tensor_indices[i]]
input_shapes.append(layer_output_shape)
if mode in {'sum', 'mul', 'ave', 'cos', 'max'}:
input_shapes_set = set(input_shapes)
if len(input_shapes_set) > 1:
raise ValueError('Only layers of same output shape can '
'be merged using ' + mode + ' mode. ' +
'Layer shapes: %s' % input_shapes)
if mode in {'cos', 'dot'}:
if len(layers) > 2:
raise ValueError(mode + ' merge takes exactly 2 layers')
shape1 = input_shapes[0]
shape2 = input_shapes[1]
n1 = len(shape1)
n2 = len(shape2)
if isinstance(dot_axes, int):
if dot_axes < 0:
self.dot_axes = [dot_axes % n1, dot_axes % n2]
else:
self.dot_axes = [dot_axes, ] * 2
if not isinstance(self.dot_axes, (list, tuple)):
raise TypeError('Invalid type for dot_axes - '
'should be a list.')
if len(self.dot_axes) != 2:
raise ValueError('Invalid format for dot_axes - '
'should contain two elements.')
if not isinstance(self.dot_axes[0], int) or not isinstance(self.dot_axes[1], int):
raise ValueError('Invalid format for dot_axes - '
'list elements should be "int".')
if shape1[self.dot_axes[0]] != shape2[self.dot_axes[1]]:
raise ValueError('Dimension incompatibility using dot mode: '
'%s != %s. ' % (shape1[self.dot_axes[0]], shape2[self.dot_axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
elif mode == 'concat':
reduced_inputs_shapes = [list(shape) for shape in input_shapes]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.concat_axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('"concat" mode can only merge '
'layers with matching '
'output shapes except for the concat axis. '
'Layer shapes: %s' % (input_shapes))
def call(self, inputs, mask=None):
if not isinstance(inputs, list) or len(inputs) <= 1:
raise TypeError('Merge must be called on a list of tensors '
'(at least 2). Got: ' + str(inputs))
# Case: "mode" is a lambda or function.
if callable(self.mode):
arguments = self.arguments
arg_spec = inspect.getargspec(self.mode)
if 'mask' in arg_spec.args:
arguments['mask'] = mask
return self.mode(inputs, **arguments)
if self.mode == 'sum' or self.mode == 'ave':
s = inputs[0]
for i in range(1, len(inputs)):
s += inputs[i]
if self.mode == 'ave':
s /= len(inputs)
return s
elif self.mode == 'concat':
return K.concatenate(inputs, axis=self.concat_axis)
elif self.mode == 'mul':
s = inputs[0]
for i in range(1, len(inputs)):
s *= inputs[i]
return s
elif self.mode == 'max':
s = inputs[0]
for i in range(1, len(inputs)):
s = K.maximum(s, inputs[i])
return s
elif self.mode == 'dot':
l1 = inputs[0]
l2 = inputs[1]
output = K.batch_dot(l1, l2, self.dot_axes)
return output
elif self.mode == 'cos':
l1 = inputs[0]
l2 = inputs[1]
denominator = K.sqrt(K.batch_dot(l1, l1, self.dot_axes) *
K.batch_dot(l2, l2, self.dot_axes))
denominator = K.maximum(denominator, K.epsilon())
output = K.batch_dot(l1, l2, self.dot_axes) / denominator
output = K.expand_dims(output, 1)
return output
else:
raise ValueError('Unknown merge mode.')
def compute_output_shape(self, input_shape):
# Must have multiple input shape tuples.
assert isinstance(input_shape, list)
# Case: callable self._output_shape.
if callable(self.mode):
if callable(self._output_shape):
output_shape = self._output_shape(input_shape)
return output_shape
elif self._output_shape is not None:
return (input_shape[0][0],) + tuple(self._output_shape)
else:
raise ValueError('The Merge layer ' + self.name +
' has a callable `mode` argument, '
'and we cannot infer its output shape '
'because no `output_shape` '
'argument was provided. '
'Make sure to pass a shape tuple '
'(or callable) '
'`output_shape` to Merge.')
# Pre-defined merge modes.
input_shapes = input_shape
if self.mode in ['sum', 'mul', 'ave', 'max']:
# All tuples in input_shapes should be the same.
return input_shapes[0]
elif self.mode == 'concat':
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.concat_axis] is None or shape[self.concat_axis] is None:
output_shape[self.concat_axis] = None
break
output_shape[self.concat_axis] += shape[self.concat_axis]
return tuple(output_shape)
elif self.mode in ['dot', 'cos']:
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
shape1.pop(self.dot_axes[0])
shape2.pop(self.dot_axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None or all([m is None for m in mask]):
return None
assert hasattr(mask, '__len__') and len(mask) == len(inputs)
if self.mode in ['sum', 'mul', 'ave', 'max']:
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
elif self.mode == 'concat':
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.concat_axis)
return K.all(concatenated, axis=-1, keepdims=False)
elif self.mode in ['cos', 'dot']:
return None
elif callable(self.mode):
if callable(self._output_mask):
return self._output_mask(mask)
else:
return self._output_mask
else:
# This should have been caught earlier.
raise ValueError('Invalid merge mode: {}'.format(self.mode))
def get_config(self):
if isinstance(self.mode, python_types.LambdaType):
mode = func_dump(self.mode)
mode_type = 'lambda'
elif callable(self.mode):
mode = self.mode.__name__
mode_type = 'function'
else:
mode = self.mode
mode_type = 'raw'
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = func_dump(self._output_shape)
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
else:
output_shape = self._output_shape
output_shape_type = 'raw'
if isinstance(self._output_mask, python_types.LambdaType):
output_mask = func_dump(self._output_mask)
output_mask_type = 'lambda'
elif callable(self._output_mask):
output_mask = self._output_mask.__name__
output_mask_type = 'function'
else:
output_mask = self._output_mask
output_mask_type = 'raw'
return {'name': self.name,
'mode': mode,
'mode_type': mode_type,
'concat_axis': self.concat_axis,
'dot_axes': self.dot_axes,
'output_shape': output_shape,
'output_shape_type': output_shape_type,
'output_mask': output_mask,
'output_mask_type': output_mask_type,
'arguments': self.arguments}
@classmethod
def from_config(cls, config):
config = config.copy()
mode_type = config.pop('mode_type')
if mode_type == 'function':
mode = globals()[config['mode']]
elif mode_type == 'lambda':
mode = func_load(config['mode'], globs=globals())
else:
mode = config['mode']
output_shape_type = config.pop('output_shape_type', None)
if output_shape_type == 'function':
output_shape = globals()[config['output_shape']]
elif output_shape_type == 'lambda':
output_shape = func_load(config['output_shape'],
globs=globals())
else:
output_shape = config.get('output_shape')
output_mask_type = config.pop('output_mask_type', None)
if output_mask_type == 'function':
output_mask = globals()[config['output_mask']]
elif output_mask_type == 'lambda':
output_mask = func_load(config['output_mask'],
globs=globals())
else:
output_mask = config.get('output_mask')
config['mode'] = mode
config['output_shape'] = output_shape
config['output_mask'] = output_mask
return super(Merge, cls).from_config(config)
def merge(inputs, mode='sum', concat_axis=-1,
dot_axes=-1, output_shape=None, output_mask=None,
arguments=None, name=None):
"""Functional merge, to apply to Keras tensors (NOT layers).
Returns a Keras tensor.
# Example
```python
tensor_a = Input(shape=(32,))
tensor_b = Input(shape=(32,))
merged_tensor = merge([tensor_a, tensor_b], mode='concat', concat_axis=1)
```
# Arguments
mode: String or lambda/function. If string, must be one
of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'.
If lambda/function, it should take as input a list of tensors
and return a single tensor.
concat_axis: Integer, axis to use in mode `concat`.
dot_axes: Integer or tuple of integers,
axes to use in mode `dot` or `cos`.
output_shape: Shape tuple (tuple of integers), or lambda/function
to compute output_shape (only if merge mode is a lambda/function).
If the latter case, it should take as input a list of shape tuples
(1:1 mapping to input tensors) and return a single shape tuple,
including the batch size
(same convention as the `compute_output_shape` method of layers).
node_indices: Optional list of integers containing
the output node index for each input layer
(in case some input layers have multiple output nodes).
will default to an array of 0s if not provided.
tensor_indices: Optional list of indices of output tensors
to consider for merging
(in case some input layer node returns multiple tensors).
"""
warnings.warn('The `merge` function is deprecated '
'and will be removed after 08/2017. '
'Use instead layers from `keras.layers.merge`, '
'e.g. `add`, `concatenate`, etc.', stacklevel=2)
all_keras_tensors = True
for x in inputs:
if not hasattr(x, '_keras_history'):
all_keras_tensors = False
break
if all_keras_tensors:
input_layers = []
node_indices = []
tensor_indices = []
for x in inputs:
input_layer, node_index, tensor_index = x._keras_history
input_layers.append(input_layer)
node_indices.append(node_index)
tensor_indices.append(tensor_index)
merge_layer = Merge(input_layers, mode=mode,
concat_axis=concat_axis,
dot_axes=dot_axes,
output_shape=output_shape,
output_mask=output_mask,
arguments=arguments,
node_indices=node_indices,
tensor_indices=tensor_indices,
name=name)
return merge_layer.inbound_nodes[0].output_tensors[0]
else:
merge_layer = Merge(mode=mode,
concat_axis=concat_axis,
dot_axes=dot_axes,
output_shape=output_shape,
output_mask=output_mask,
arguments=arguments,
name=name)
return merge_layer(inputs)
class MaxoutDense(Layer):
"""A dense maxout layer.
A `MaxoutDense` layer takes the element-wise maximum of
`nb_feature` `Dense(input_dim, output_dim)` linear layers.
This allows the layer to learn a convex,
piecewise linear activation function over the inputs.
Note that this is a *linear* layer;
if you wish to apply activation function
(you shouldn't need to --they are universal function approximators),
an `Activation` layer must be added after.
# Arguments
output_dim: int > 0.
nb_feature: number of Dense layers to use internally.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# References
- [Maxout Networks](http://arxiv.org/abs/1302.4389)
"""
def __init__(self, output_dim,
nb_feature=4,
init='glorot_uniform',
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
warnings.warn('The `MaxoutDense` layer is deprecated '
'and will be removed after 06/2017.')
self.output_dim = output_dim
self.nb_feature = nb_feature
self.init = initializers.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MaxoutDense, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((self.nb_feature, input_dim, self.output_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.nb_feature, self.output_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def call(self, x):
# no activation, this layer is only linear.
output = K.dot(x, self.W)
if self.bias:
output += self.b
output = K.max(output, axis=1)
return output
def get_config(self):
config = {'output_dim': self.output_dim,
'init': initializers.serialize(self.init),
'nb_feature': self.nb_feature,
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(MaxoutDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Highway(Layer):
"""Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
# Arguments
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, input_dim)`.
# References
- [Highway Networks](http://arxiv.org/abs/1505.00387v2)
"""
def __init__(self,
init='glorot_uniform',
activation=None,
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
warnings.warn('The `Highway` layer is deprecated '
'and will be removed after 06/2017.')
if 'transform_bias' in kwargs:
kwargs.pop('transform_bias')
warnings.warn('`transform_bias` argument is deprecated and '
'has been removed.')
self.init = initializers.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Highway, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.W_carry = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W_carry')
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.b_carry = self.add_weight((input_dim,),
initializer='one',
name='b_carry')
else:
self.b_carry = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x):
y = K.dot(x, self.W_carry)
if self.bias:
y += self.b_carry
transform_weight = activations.sigmoid(y)
y = K.dot(x, self.W)
if self.bias:
y += self.b
act = self.activation(y)
act *= transform_weight
output = act + (1 - transform_weight) * x
return output
def get_config(self):
config = {'init': initializers.serialize(self.init),
'activation': activations.serialize(self.activation),
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(Highway, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def AtrousConvolution1D(*args, **kwargs):
from ..layers import Conv1D
if 'atrous_rate' in kwargs:
rate = kwargs.pop('atrous_rate')
else:
rate = 1
kwargs['dilation_rate'] = rate
warnings.warn('The `AtrousConvolution1D` layer '
' has been deprecated. Use instead '
'the `Conv1D` layer with the `dilation_rate` '
'argument.')
return Conv1D(*args, **kwargs)
def AtrousConvolution2D(*args, **kwargs):
from ..layers import Conv2D
if 'atrous_rate' in kwargs:
rate = kwargs.pop('atrous_rate')
else:
rate = 1
kwargs['dilation_rate'] = rate
warnings.warn('The `AtrousConvolution2D` layer '
' has been deprecated. Use instead '
'the `Conv2D` layer with the `dilation_rate` '
'argument.')
return Conv2D(*args, **kwargs)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the pipeline options module."""
# pytype: skip-file
import json
import logging
import unittest
import hamcrest as hc
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
class PipelineOptionsTest(unittest.TestCase):
def setUp(self):
# Reset runtime options to avoid side-effects caused by other tests.
# Note that is_accessible assertions require runtime_options to
# be uninitialized.
RuntimeValueProvider.set_runtime_options(None)
def tearDown(self):
# Reset runtime options to avoid side-effects in other tests.
RuntimeValueProvider.set_runtime_options(None)
TEST_CASES = [
{
'flags': ['--num_workers', '5'],
'expected': {
'num_workers': 5,
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [DisplayDataItemMatcher('num_workers', 5)]
},
{
'flags': ['--direct_num_workers', '5'],
'expected': {
'direct_num_workers': 5,
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [DisplayDataItemMatcher('direct_num_workers', 5)]
},
{
'flags': ['--direct_running_mode', 'multi_threading'],
'expected': {
'direct_running_mode': 'multi_threading',
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [
DisplayDataItemMatcher('direct_running_mode', 'multi_threading')
]
},
{
'flags': ['--direct_running_mode', 'multi_processing'],
'expected': {
'direct_running_mode': 'multi_processing',
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [
DisplayDataItemMatcher('direct_running_mode', 'multi_processing')
]
},
{
'flags': [
'--profile_cpu', '--profile_location', 'gs://bucket/', 'ignored'
],
'expected': {
'profile_cpu': True,
'profile_location': 'gs://bucket/',
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [
DisplayDataItemMatcher('profile_cpu', True),
DisplayDataItemMatcher('profile_location', 'gs://bucket/')
]
},
{
'flags': ['--num_workers', '5', '--mock_flag'],
'expected': {
'num_workers': 5,
'mock_flag': True,
'mock_option': None,
'mock_multi_option': None
},
'display_data': [
DisplayDataItemMatcher('num_workers', 5),
DisplayDataItemMatcher('mock_flag', True)
]
},
{
'flags': ['--mock_option', 'abc'],
'expected': {
'mock_flag': False,
'mock_option': 'abc',
'mock_multi_option': None
},
'display_data': [DisplayDataItemMatcher('mock_option', 'abc')]
},
{
'flags': ['--mock_option', ' abc def '],
'expected': {
'mock_flag': False,
'mock_option': ' abc def ',
'mock_multi_option': None
},
'display_data': [DisplayDataItemMatcher('mock_option', ' abc def ')]
},
{
'flags': ['--mock_option= abc xyz '],
'expected': {
'mock_flag': False,
'mock_option': ' abc xyz ',
'mock_multi_option': None
},
'display_data': [DisplayDataItemMatcher('mock_option', ' abc xyz ')]
},
{
'flags': [
'--mock_option=gs://my bucket/my folder/my file',
'--mock_multi_option=op1',
'--mock_multi_option=op2'
],
'expected': {
'mock_flag': False,
'mock_option': 'gs://my bucket/my folder/my file',
'mock_multi_option': ['op1', 'op2']
},
'display_data': [
DisplayDataItemMatcher(
'mock_option', 'gs://my bucket/my folder/my file'),
DisplayDataItemMatcher('mock_multi_option', ['op1', 'op2'])
]
},
{
'flags': ['--mock_multi_option=op1', '--mock_multi_option=op2'],
'expected': {
'mock_flag': False,
'mock_option': None,
'mock_multi_option': ['op1', 'op2']
},
'display_data': [
DisplayDataItemMatcher('mock_multi_option', ['op1', 'op2'])
]
},
{
'flags': ['--mock_json_option={"11a": 0, "37a": 1}'],
'expected': {
'mock_flag': False,
'mock_option': None,
'mock_multi_option': None,
'mock_json_option': {
'11a': 0, '37a': 1
},
},
'display_data': [
DisplayDataItemMatcher('mock_json_option', {
'11a': 0, '37a': 1
})
]
},
]
# Used for testing newly added flags.
class MockOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--mock_flag', action='store_true', help='mock flag')
parser.add_argument('--mock_option', help='mock option')
parser.add_argument(
'--mock_multi_option', action='append', help='mock multi option')
parser.add_argument('--option with space', help='mock option with space')
parser.add_argument('--mock_json_option', type=json.loads, default={})
# Use with MockOptions in test cases where multiple option classes are needed.
class FakeOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--fake_flag', action='store_true', help='fake flag')
parser.add_argument('--fake_option', help='fake option')
parser.add_argument(
'--fake_multi_option', action='append', help='fake multi option')
@unittest.skip("TODO(BEAM-12515): Flaky test.")
def test_display_data(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
dd = DisplayData.create_from(options)
hc.assert_that(dd.items, hc.contains_inanyorder(*case['display_data']))
def test_get_all_options_subclass(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptionsTest.MockOptions(flags=case['flags'])
self.assertDictContainsSubset(case['expected'], options.get_all_options())
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_flag,
case['expected']['mock_flag'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_option,
case['expected']['mock_option'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_multi_option,
case['expected']['mock_multi_option'])
def test_get_all_options(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
self.assertDictContainsSubset(case['expected'], options.get_all_options())
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_flag,
case['expected']['mock_flag'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_option,
case['expected']['mock_option'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_multi_option,
case['expected']['mock_multi_option'])
def test_sublcalsses_of_pipeline_options_can_be_instantiated(self):
for case in PipelineOptionsTest.TEST_CASES:
mock_options = PipelineOptionsTest.MockOptions(flags=case['flags'])
self.assertEqual(mock_options.mock_flag, case['expected']['mock_flag'])
self.assertEqual(
mock_options.mock_option, case['expected']['mock_option'])
self.assertEqual(
mock_options.mock_multi_option, case['expected']['mock_multi_option'])
def test_views_can_be_constructed_from_pipeline_option_subclasses(self):
for case in PipelineOptionsTest.TEST_CASES:
fake_options = PipelineOptionsTest.FakeOptions(flags=case['flags'])
mock_options = fake_options.view_as(PipelineOptionsTest.MockOptions)
self.assertEqual(mock_options.mock_flag, case['expected']['mock_flag'])
self.assertEqual(
mock_options.mock_option, case['expected']['mock_option'])
self.assertEqual(
mock_options.mock_multi_option, case['expected']['mock_multi_option'])
def test_views_do_not_expose_options_defined_by_other_views(self):
flags = ['--mock_option=mock_value', '--fake_option=fake_value']
options = PipelineOptions(flags)
assert options.view_as(
PipelineOptionsTest.MockOptions).mock_option == 'mock_value'
assert options.view_as(
PipelineOptionsTest.FakeOptions).fake_option == 'fake_value'
assert options.view_as(PipelineOptionsTest.MockOptions).view_as(
PipelineOptionsTest.FakeOptions).fake_option == 'fake_value'
self.assertRaises(
AttributeError,
lambda: options.view_as(PipelineOptionsTest.MockOptions).fake_option)
self.assertRaises(
AttributeError,
lambda: options.view_as(PipelineOptionsTest.MockOptions).view_as(
PipelineOptionsTest.FakeOptions).view_as(
PipelineOptionsTest.MockOptions).fake_option)
def test_from_dictionary(self):
for case in PipelineOptionsTest.TEST_CASES:
options = PipelineOptions(flags=case['flags'])
all_options_dict = options.get_all_options()
options_from_dict = PipelineOptions.from_dictionary(all_options_dict)
self.assertEqual(
options_from_dict.view_as(PipelineOptionsTest.MockOptions).mock_flag,
case['expected']['mock_flag'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_option,
case['expected']['mock_option'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_multi_option,
case['expected']['mock_multi_option'])
self.assertEqual(
options.view_as(PipelineOptionsTest.MockOptions).mock_json_option,
case['expected'].get('mock_json_option', {}))
def test_option_with_space(self):
options = PipelineOptions(flags=['--option with space= value with space'])
self.assertEqual(
getattr(
options.view_as(PipelineOptionsTest.MockOptions),
'option with space'),
' value with space')
options_from_dict = PipelineOptions.from_dictionary(
options.get_all_options())
self.assertEqual(
getattr(
options_from_dict.view_as(PipelineOptionsTest.MockOptions),
'option with space'),
' value with space')
def test_retain_unknown_options_binary_store_string(self):
options = PipelineOptions(['--unknown_option', 'some_value'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['unknown_option'], 'some_value')
def test_retain_unknown_options_binary_equals_store_string(self):
options = PipelineOptions(['--unknown_option=some_value'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['unknown_option'], 'some_value')
def test_retain_unknown_options_binary_multi_equals_store_string(self):
options = PipelineOptions(['--unknown_option=expr = "2 + 2 = 5"'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['unknown_option'], 'expr = "2 + 2 = 5"')
def test_retain_unknown_options_binary_single_dash_store_string(self):
options = PipelineOptions(['-i', 'some_value'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['i'], 'some_value')
def test_retain_unknown_options_unary_store_true(self):
options = PipelineOptions(['--unknown_option'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['unknown_option'], True)
def test_retain_unknown_options_consecutive_unary_store_true(self):
options = PipelineOptions(['--option_foo', '--option_bar'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['option_foo'], True)
self.assertEqual(result['option_bar'], True)
def test_retain_unknown_options_unary_single_dash_store_true(self):
options = PipelineOptions(['-i'])
result = options.get_all_options(retain_unknown_options=True)
self.assertEqual(result['i'], True)
def test_retain_unknown_options_unary_missing_prefix(self):
options = PipelineOptions(['bad_option'])
with self.assertRaises(SystemExit):
options.get_all_options(retain_unknown_options=True)
def test_override_options(self):
base_flags = ['--num_workers', '5']
options = PipelineOptions(base_flags)
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertEqual(options.get_all_options()['mock_flag'], False)
options.view_as(PipelineOptionsTest.MockOptions).mock_flag = True
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertTrue(options.get_all_options()['mock_flag'])
def test_override_init_options(self):
base_flags = ['--num_workers', '5']
options = PipelineOptions(base_flags, mock_flag=True)
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertEqual(options.get_all_options()['mock_flag'], True)
def test_invalid_override_init_options(self):
base_flags = ['--num_workers', '5']
options = PipelineOptions(base_flags, mock_invalid_flag=True)
self.assertEqual(options.get_all_options()['num_workers'], 5)
self.assertEqual(options.get_all_options()['mock_flag'], False)
def test_experiments(self):
options = PipelineOptions(['--experiment', 'abc', '--experiment', 'def'])
self.assertEqual(
sorted(options.get_all_options()['experiments']), ['abc', 'def'])
options = PipelineOptions(['--experiments', 'abc', '--experiments', 'def'])
self.assertEqual(
sorted(options.get_all_options()['experiments']), ['abc', 'def'])
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['experiments'], None)
def test_worker_options(self):
options = PipelineOptions(['--machine_type', 'abc', '--disk_type', 'def'])
worker_options = options.view_as(WorkerOptions)
self.assertEqual(worker_options.machine_type, 'abc')
self.assertEqual(worker_options.disk_type, 'def')
options = PipelineOptions(
['--worker_machine_type', 'abc', '--worker_disk_type', 'def'])
worker_options = options.view_as(WorkerOptions)
self.assertEqual(worker_options.machine_type, 'abc')
self.assertEqual(worker_options.disk_type, 'def')
def test_option_modifications_are_shared_between_views(self):
pipeline_options = PipelineOptions([
'--mock_option',
'value',
'--mock_flag',
'--mock_multi_option',
'value1',
'--mock_multi_option',
'value2',
])
mock_options = PipelineOptionsTest.MockOptions([
'--mock_option',
'value',
'--mock_flag',
'--mock_multi_option',
'value1',
'--mock_multi_option',
'value2',
])
for options in [pipeline_options, mock_options]:
view1 = options.view_as(PipelineOptionsTest.MockOptions)
view2 = options.view_as(PipelineOptionsTest.MockOptions)
view1.mock_option = 'new_value'
view1.mock_flag = False
view1.mock_multi_option.append('value3')
view3 = options.view_as(PipelineOptionsTest.MockOptions)
view4 = view1.view_as(PipelineOptionsTest.MockOptions)
view5 = options.view_as(TypeOptions).view_as(
PipelineOptionsTest.MockOptions)
for view in [view1, view2, view3, view4, view5]:
self.assertEqual('new_value', view.mock_option)
self.assertFalse(view.mock_flag)
self.assertEqual(['value1', 'value2', 'value3'], view.mock_multi_option)
def test_uninitialized_option_modifications_are_shared_between_views(self):
options = PipelineOptions([])
view1 = options.view_as(PipelineOptionsTest.MockOptions)
view2 = options.view_as(PipelineOptionsTest.MockOptions)
view1.mock_option = 'some_value'
view1.mock_flag = False
view1.mock_multi_option = ['value1', 'value2']
view3 = options.view_as(PipelineOptionsTest.MockOptions)
view4 = view1.view_as(PipelineOptionsTest.MockOptions)
view5 = options.view_as(TypeOptions).view_as(
PipelineOptionsTest.MockOptions)
for view in [view1, view2, view3, view4, view5]:
self.assertEqual('some_value', view.mock_option)
self.assertFalse(view.mock_flag)
self.assertEqual(['value1', 'value2'], view.mock_multi_option)
def test_extra_package(self):
options = PipelineOptions([
'--extra_package',
'abc',
'--extra_packages',
'def',
'--extra_packages',
'ghi'
])
self.assertEqual(
sorted(options.get_all_options()['extra_packages']),
['abc', 'def', 'ghi'])
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['extra_packages'], None)
def test_dataflow_job_file(self):
options = PipelineOptions(['--dataflow_job_file', 'abc'])
self.assertEqual(options.get_all_options()['dataflow_job_file'], 'abc')
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['dataflow_job_file'], None)
def test_template_location(self):
options = PipelineOptions(['--template_location', 'abc'])
self.assertEqual(options.get_all_options()['template_location'], 'abc')
options = PipelineOptions(flags=[''])
self.assertEqual(options.get_all_options()['template_location'], None)
def test_redefine_options(self):
class TestRedefinedOptions(PipelineOptions): # pylint: disable=unused-variable
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--redefined_flag', action='store_true')
class TestRedefinedOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--redefined_flag', action='store_true')
options = PipelineOptions(['--redefined_flag'])
self.assertTrue(options.get_all_options()['redefined_flag'])
# TODO(BEAM-1319): Require unique names only within a test.
# For now, <file name acronym>_vp_arg<number> will be the convention
# to name value-provider arguments in tests, as opposed to
# <file name acronym>_non_vp_arg<number> for non-value-provider arguments.
# The number will grow per file as tests are added.
def test_value_provider_options(self):
class UserOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(
'--pot_vp_arg1', help='This flag is a value provider')
parser.add_value_provider_argument('--pot_vp_arg2', default=1, type=int)
parser.add_argument('--pot_non_vp_arg1', default=1, type=int)
# Provide values: if not provided, the option becomes of the type runtime vp
options = UserOptions(['--pot_vp_arg1', 'hello'])
self.assertIsInstance(options.pot_vp_arg1, StaticValueProvider)
self.assertIsInstance(options.pot_vp_arg2, RuntimeValueProvider)
self.assertIsInstance(options.pot_non_vp_arg1, int)
# Values can be overwritten
options = UserOptions(
pot_vp_arg1=5,
pot_vp_arg2=StaticValueProvider(value_type=str, value='bye'),
pot_non_vp_arg1=RuntimeValueProvider(
option_name='foo', value_type=int, default_value=10))
self.assertEqual(options.pot_vp_arg1, 5)
self.assertTrue(
options.pot_vp_arg2.is_accessible(),
'%s is not accessible' % options.pot_vp_arg2)
self.assertEqual(options.pot_vp_arg2.get(), 'bye')
self.assertFalse(options.pot_non_vp_arg1.is_accessible())
with self.assertRaises(RuntimeError):
options.pot_non_vp_arg1.get()
# Converts extra arguments to list value.
def test_extra_args(self):
options = PipelineOptions([
'--extra_arg',
'val1',
'--extra_arg',
'val2',
'--extra_arg=val3',
'--unknown_arg',
'val4'
])
def add_extra_options(parser):
parser.add_argument("--extra_arg", action='append')
self.assertEqual(
options.get_all_options(
add_extra_args_fn=add_extra_options)['extra_arg'],
['val1', 'val2', 'val3'])
# The argparse package by default tries to autocomplete option names. This
# results in an "ambiguous option" error from argparse when an unknown option
# matching multiple known ones are used. This tests that we suppress this
# error.
def test_unknown_option_prefix(self):
# Test that the "ambiguous option" error is suppressed.
options = PipelineOptions(['--profi', 'val1'])
options.view_as(ProfilingOptions)
# Test that valid errors are not suppressed.
with self.assertRaises(SystemExit):
# Invalid option choice.
options = PipelineOptions(['--type_check_strictness', 'blahblah'])
options.view_as(TypeOptions)
def test_add_experiment(self):
options = PipelineOptions([])
options.view_as(DebugOptions).add_experiment('new_experiment')
self.assertEqual(['new_experiment'],
options.view_as(DebugOptions).experiments)
def test_add_experiment_preserves_existing_experiments(self):
options = PipelineOptions(['--experiment=existing_experiment'])
options.view_as(DebugOptions).add_experiment('new_experiment')
self.assertEqual(['existing_experiment', 'new_experiment'],
options.view_as(DebugOptions).experiments)
def test_lookup_experiments(self):
options = PipelineOptions([
'--experiment=existing_experiment',
'--experiment',
'key=value',
'--experiment',
'master_key=k1=v1,k2=v2',
])
debug_options = options.view_as(DebugOptions)
self.assertEqual(
'default_value',
debug_options.lookup_experiment('nonexistent', 'default_value'))
self.assertEqual(
'value', debug_options.lookup_experiment('key', 'default_value'))
self.assertEqual(
'k1=v1,k2=v2', debug_options.lookup_experiment('master_key'))
self.assertEqual(
True, debug_options.lookup_experiment('existing_experiment'))
def test_transform_name_mapping(self):
options = PipelineOptions(['--transform_name_mapping={\"from\":\"to\"}'])
mapping = options.view_as(GoogleCloudOptions).transform_name_mapping
self.assertEqual(mapping['from'], 'to')
def test_dataflow_service_options(self):
options = PipelineOptions([
'--dataflow_service_option',
'whizz=bang',
'--dataflow_service_option',
'beep=boop'
])
self.assertEqual(
sorted(options.get_all_options()['dataflow_service_options']),
['beep=boop', 'whizz=bang'])
options = PipelineOptions([
'--dataflow_service_options',
'whizz=bang',
'--dataflow_service_options',
'beep=boop'
])
self.assertEqual(
sorted(options.get_all_options()['dataflow_service_options']),
['beep=boop', 'whizz=bang'])
options = PipelineOptions(flags=[''])
self.assertEqual(
options.get_all_options()['dataflow_service_options'], None)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
from typing import Optional
from lxml import etree
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
ExpectationConfiguration,
)
from great_expectations.expectations.metrics.import_manager import F, sparktypes
from great_expectations.expectations.metrics.map_metric import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
try:
import sqlalchemy as sa
except ImportError:
pass
class ColumnValuesMatchXmlSchema(ColumnMapMetricProvider):
condition_metric_name = "column_values.match_xml_schema"
condition_value_keys = ("xml_schema",)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, xml_schema, format, **kwargs):
try:
xmlschema_doc = etree.fromstring(xml_schema)
xmlschema = etree.XMLSchema(xmlschema_doc)
except etree.ParseError:
raise
except:
raise
def matches_xml_schema(val):
try:
xml_doc = etree.fromstring(val)
return xmlschema(xml_doc)
except:
raise
return column.map(matches_xml_schema)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, xml_schema, **kwargs):
try:
xmlschema_doc = etree.fromstring(xml_schema)
xmlschema = etree.XMLSchema(xmlschema_doc)
except etree.ParseError:
raise
except:
raise
def matches_xml_schema(val):
if val is None:
return False
try:
xml_doc = etree.fromstring(val)
return xmlschema(xml_doc)
except:
raise
matches_xml_schema_udf = F.udf(matches_xml_schema, sparktypes.BooleanType())
return matches_xml_schema_udf(column)
class ExpectColumnValuesToMatchXmlSchema(ColumnMapExpectation):
"""Expect column entries to be XML documents matching a given [XMLSchema](https://en.wikipedia.org/wiki/XML_schema).
expect_column_values_to_match_xml_schema is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
xml_schema (str): \
The XMLSchema name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_xml_parseable \
<great_expectations.execution_engine.execution_engine.ExecutionEngine
.expect_column_values_to_be_xml_parseable>`
The `XMLSchema docs <https://www.w3.org/XML/Schema>`_.
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [{"data": {}, "tests": []}]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["xml", "glam"],
"contributors": ["@mielvds"],
"package": "experimental_expectations",
"requirements": ["lxml"],
}
map_metric = "column_values.match_xml_schema"
success_keys = (
"xml_schema",
"mostly",
)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "xml_schema", "row_condition", "condition_parser"],
)
if not params.get("xml_schema"):
template_str = "values must match a XML Schema but none was specified."
else:
params["formatted_xml"] = (
"<pre>"
+ etree.tostring(params.get("xml_schema"), pretty_print=True)
+ "</pre>" # TODO:
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str = "values must match the following XML Schema, at least $mostly_pct % of the time: $formatted_xml"
else:
template_str = (
"values must match the following XML Schema: $formatted_xml"
)
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": {"params": {"formatted_xml": {"classes": []}}},
},
}
)
]
if __name__ == "__main__":
ExpectColumnValuesToMatchXmlSchema().print_diagnostic_checklist()
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Race.reach'
db.add_column('dnd_race', 'reach', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5), keep_default=False)
def backwards(self, orm):
# Deleting field 'Race.reach'
db.delete_column('dnd_race', 'reach')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'base_land_speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '30'}),
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for CorrelationCholesky bijector."""
import itertools
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.distributions import beta
from tensorflow_probability.python.distributions import cholesky_lkj
from tensorflow_probability.python.distributions import lkj
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc import hmc
from tensorflow_probability.python.mcmc import sample
from tensorflow_probability.python.mcmc import transformed_kernel
# Bijector for converting the image of CorrelationCholesky bijector to
# unconstrained space; by only considering the strictly lower triangular entries
# of the output matrices.
class OutputToUnconstrained(tfb.Bijector):
def __init__(self, name='output_to_unconstrained'):
parameters = dict(locals())
with tf.name_scope(name) as name:
super(OutputToUnconstrained, self).__init__(
validate_args=True,
forward_min_event_ndims=2,
inverse_min_event_ndims=1,
parameters=parameters,
name=name)
def _forward(self, x):
x = tf.convert_to_tensor(x)
# Remove the first row and last column so we can extract strictly
# lower triangular entries.
n = x.shape[-1]
t = tf.linalg.band_part(x[..., 1:, :-1], num_lower=n - 1, num_upper=0)
return tfb.FillTriangular().inverse(t)
def _inverse_log_det_jacobian(self, y):
return tf.zeros_like(y[..., 0])
@test_util.test_all_tf_execution_regimes
class CorrelationCholeskyBijectorTest(test_util.TestCase):
"""Tests the correctness of the CorrelationCholesky bijector."""
def testBijector(self):
x = np.float32(np.array([7., -5., 5., 1., 2., -2.]))
y = np.float32(
np.array([[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],
[-0.666667, 0.666667, 0.333333, 0.], [0.5, -0.5, 0.7, 0.1]]))
b = tfb.CorrelationCholesky()
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_, atol=1e-5, rtol=1e-5)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, atol=1e-5, rtol=1e-5)
expected_fldj = -0.5 * np.sum([3, 4, 5] * np.log([2, 9, 100]))
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
self.assertAllClose(expected_fldj, fldj)
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(-expected_fldj, ildj)
def testBijectorBatch(self):
x = np.float32([[7., -5., 5., 1., 2., -2.], [1., 3., -5., 1., -4., 8.]])
y = np.float32([
[[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],
[-0.666667, 0.666667, 0.333333, 0.], [0.5, -0.5, 0.7, 0.1]],
[[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],
[0.888889, -0.444444, 0.111111, 0.],
[-0.833333, 0.5, 0.166667, 0.166667]],
])
b = tfb.CorrelationCholesky()
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_, atol=1e-5, rtol=1e-5)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, atol=1e-5, rtol=1e-5)
expected_fldj = -0.5 * np.sum(
[3, 4, 5] * np.log([[2, 9, 100], [2, 81, 36]]), axis=-1)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
self.assertAllClose(expected_fldj, fldj)
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(-expected_fldj, ildj)
def testShape(self):
x_shape = tf.TensorShape([5, 4, 6])
y_shape = tf.TensorShape([5, 4, 4, 4])
b = tfb.CorrelationCholesky(validate_args=True)
x = tf.ones(shape=x_shape, dtype=tf.float32)
y_ = b.forward(x)
self.assertAllEqual(
tensorshape_util.as_list(y_.shape), tensorshape_util.as_list(y_shape))
x_ = b.inverse(y_)
self.assertAllEqual(
tensorshape_util.as_list(x_.shape), tensorshape_util.as_list(x_shape))
y_shape_ = b.forward_event_shape(x_shape)
self.assertAllEqual(
tensorshape_util.as_list(y_shape_), tensorshape_util.as_list(y_shape))
x_shape_ = b.inverse_event_shape(y_shape)
self.assertAllEqual(
tensorshape_util.as_list(x_shape_), tensorshape_util.as_list(x_shape))
y_shape_tensor = self.evaluate(
b.forward_event_shape_tensor(tensorshape_util.as_list(x_shape)))
self.assertAllEqual(y_shape_tensor, tensorshape_util.as_list(y_shape))
x_shape_tensor = self.evaluate(
b.inverse_event_shape_tensor(tensorshape_util.as_list(y_shape)))
self.assertAllEqual(x_shape_tensor, tensorshape_util.as_list(x_shape))
def testShapeError(self):
b = tfb.FillTriangular(validate_args=True)
x_shape_bad = tf.TensorShape([5, 4, 7])
with self.assertRaisesRegexp(ValueError, 'is not a triangular number'):
b.forward_event_shape(x_shape_bad)
with self.assertRaisesOpError('is not a triangular number'):
self.evaluate(
b.forward_event_shape_tensor(tensorshape_util.as_list(x_shape_bad)))
y_shape_bad = tf.TensorShape([5, 4, 4, 3])
with self.assertRaisesRegexp(ValueError, 'Matrix must be square'):
b.inverse_event_shape(y_shape_bad)
with self.assertRaisesOpError('Matrix must be square'):
self.evaluate(
b.inverse_event_shape_tensor(tensorshape_util.as_list(y_shape_bad)))
@test_util.test_graph_mode_only
@test_util.numpy_disable_gradient_test('HMC')
def testSampleMarginals(self):
# Verify that the marginals of the LKJ distribution are distributed
# according to a (scaled) Beta distribution. The LKJ distributed samples are
# obtained by sampling a CholeskyLKJ distribution using HMC and the
# CorrelationCholesky bijector.
dim = 4
concentration = np.array(2.5, dtype=np.float64)
beta_concentration = np.array(.5 * dim + concentration - 1, np.float64)
beta_dist = beta.Beta(
concentration0=beta_concentration, concentration1=beta_concentration)
inner_kernel = hmc.HamiltonianMonteCarlo(
target_log_prob_fn=cholesky_lkj.CholeskyLKJ(
dimension=dim, concentration=concentration).log_prob,
num_leapfrog_steps=3,
step_size=0.3)
kernel = transformed_kernel.TransformedTransitionKernel(
inner_kernel=inner_kernel, bijector=tfb.CorrelationCholesky())
num_chains = 10
num_total_samples = 30000
# Make sure that we have enough samples to catch a wrong sampler to within
# a small enough discrepancy.
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_total_samples)
@tf.function # Ensure that MCMC sampling is done efficiently.
def sample_mcmc_chain():
return sample.sample_chain(
num_results=num_total_samples // num_chains,
num_burnin_steps=1000,
current_state=tf.eye(dim, batch_shape=[num_chains], dtype=tf.float64),
trace_fn=lambda _, pkr: pkr.inner_results.is_accepted,
kernel=kernel,
seed=test_util.test_seed())
# Draw samples from the HMC chains.
chol_lkj_samples, is_accepted = self.evaluate(sample_mcmc_chain())
# Ensure that the per-chain acceptance rate is high enough.
self.assertAllGreater(np.mean(is_accepted, axis=0), 0.8)
# Transform from Cholesky LKJ samples to LKJ samples.
lkj_samples = tf.matmul(chol_lkj_samples, chol_lkj_samples, adjoint_b=True)
lkj_samples = tf.reshape(lkj_samples, shape=[num_total_samples, dim, dim])
# Only look at the entries strictly below the diagonal which is achieved by
# the OutputToUnconstrained bijector. Also scale the marginals from the
# range [-1,1] to [0,1].
scaled_lkj_samples = .5 * (OutputToUnconstrained().forward(lkj_samples) + 1)
# Each of the off-diagonal marginals should be distributed according to a
# Beta distribution.
for i in range(dim * (dim - 1) // 2):
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
scaled_lkj_samples[..., i],
cdf=beta_dist.cdf,
false_fail_rate=1e-9))
def testTheoreticalFldj(self):
bijector = tfb.CorrelationCholesky()
x = np.linspace(-50, 50, num=30).reshape(5, 6).astype(np.float64)
y = self.evaluate(bijector.forward(x))
bijector_test_util.assert_bijective_and_finite(
bijector,
x,
y,
eval_func=self.evaluate,
event_ndims=1,
inverse_event_ndims=2,
rtol=1e-5)
fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
fldj_theoretical = bijector_test_util.get_fldj_theoretical(
bijector,
x,
event_ndims=1,
inverse_event_ndims=2,
output_to_unconstrained=OutputToUnconstrained())
self.assertAllClose(
self.evaluate(fldj_theoretical),
self.evaluate(fldj),
atol=1e-5,
rtol=1e-5)
def testBijectorWithVariables(self):
x_ = np.array([1.], dtype=np.float32)
y_ = np.array([[1., 0.], [0.707107, 0.707107]], dtype=np.float32)
x = tf.Variable(x_, dtype=tf.float32)
y = tf.Variable(y_, dtype=tf.float32)
forward_event_ndims = tf.Variable(1, dtype=tf.int32)
inverse_event_ndims = tf.Variable(2, dtype=tf.int32)
self.evaluate([
v.initializer for v in (x, y, forward_event_ndims, inverse_event_ndims)
])
bijector = tfb.CorrelationCholesky()
self.assertAllClose(
y_, self.evaluate(bijector.forward(x)), atol=1e-5, rtol=1e-5)
self.assertAllClose(
x_, self.evaluate(bijector.inverse(y)), atol=1e-5, rtol=1e-5)
fldj = bijector.forward_log_det_jacobian(x, event_ndims=forward_event_ndims)
self.assertAllClose(-3 * 0.5 * np.log(2), self.evaluate(fldj))
ildj = bijector.inverse_log_det_jacobian(y, event_ndims=inverse_event_ndims)
self.assertAllClose(3 * 0.5 * np.log(2), ildj)
@parameterized.parameters(itertools.product([2, 3, 4, 5, 6, 7], [1., 2., 3.]))
def testBijectiveWithLKJSamples(self, dimension, concentration):
bijector = tfb.CorrelationCholesky()
lkj_dist = lkj.LKJ(
dimension=dimension,
concentration=np.float64(concentration),
input_output_cholesky=True)
batch_size = 10
y = self.evaluate(
lkj_dist.sample([batch_size], seed=test_util.test_seed()))
x = self.evaluate(bijector.inverse(y))
bijector_test_util.assert_bijective_and_finite(
bijector,
x,
y,
eval_func=self.evaluate,
event_ndims=1,
inverse_event_ndims=2,
rtol=1e-5)
@parameterized.parameters(itertools.product([2, 3, 4, 5, 6, 7], [1., 2., 3.]))
@test_util.numpy_disable_gradient_test
def testJacobianWithLKJSamples(self, dimension, concentration):
bijector = tfb.CorrelationCholesky()
lkj_dist = lkj.LKJ(
dimension=dimension,
concentration=np.float64(concentration),
input_output_cholesky=True)
batch_size = 10
y = self.evaluate(lkj_dist.sample([batch_size], seed=test_util.test_seed()))
x = self.evaluate(bijector.inverse(y))
fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
fldj_theoretical = bijector_test_util.get_fldj_theoretical(
bijector,
x,
event_ndims=1,
inverse_event_ndims=2,
output_to_unconstrained=OutputToUnconstrained())
self.assertAllClose(
self.evaluate(fldj_theoretical),
self.evaluate(fldj),
atol=1e-5,
rtol=1e-5)
if __name__ == '__main__':
test_util.main()
| |
import unittest
import numpy
import pytest
from chainerx_tests import array_utils
import chainer.testing
import chainerx
import chainerx.testing
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices', [
# empty indexing
((), ()),
((3,), ()),
((2, 2, 2), ()),
# integer indexing - non-tuple indexing
((3,), 0),
((3,), 1),
((3,), 2),
((3,), -1),
((2, 3), 0),
((2, 3), 1),
((2, 3), numpy.int8(-1)),
((2, 3), numpy.int32(0)),
((2, 3), numpy.uint64(1)),
# integer indexining - tuple indexing
((3,), (0,)),
((3,), (1,)),
((3,), (2,)),
((3,), (-1,)),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (0, 0)),
((2, 3), (1, 1)),
((2, 3, 4), (0, -2, 3)),
((2, 3, 4), (1, 0)),
# slice indexing - non-tuple indexing
((3,), slice(None)),
((3,), slice(2)),
((3,), slice(0, 3)),
((3,), slice(0, 2)),
((3,), slice(1, 3)),
((3,), slice(0, 0)),
((3,), slice(0, 1)),
((3,), slice(2, 0, -1)),
((3,), slice(-2, -1)),
((3,), slice(2, None, -1)),
((3,), slice(None, 0, 1)),
((3,), slice(None, -1, -1)),
((3,), slice(None, -2, -1)),
((6,), slice(0, 6, 2)),
((6,), slice(1, 6, 2)),
((6,), slice(5, None, -2)),
# slice indexing - tuple indexing
((3,), (slice(None),)),
((3,), (slice(2),)),
((3,), (slice(0, 3),)),
((3,), (slice(0, 2),)),
((3,), (slice(1, 3),)),
((3,), (slice(0, 0),)),
((3,), (slice(0, 1),)),
((3,), (slice(2, 0, -1),)),
((3,), (slice(-2, -1),)),
((3,), (slice(2, None, -1),)),
((3,), (slice(None, 0, 1),)),
((3,), (slice(None, -1, -1),)),
((3,), (slice(None, -2, -1),)),
((6,), (slice(0, 6, 2),)),
((6,), (slice(1, 6, 2),)),
((6,), (slice(5, None, -2),)),
((6,), (slice(50, 1, -1),)),
((6,), (slice(3, 3, 1),)),
((6,), (slice(3, 3, -2),)),
((6,), (slice(50, 50, 1),)),
((6,), (slice(50, 50, -2),)),
((6,), (slice(-50, -50, 1),)),
((6,), (slice(-50, -50, -2),)),
((2, 3), (slice(None), slice(None))),
((2, 3), (slice(1), slice(2))),
((2, 3), (slice(0, 2), slice(0, 3))),
((2, 3), (slice(0, 2), slice(0, -1))),
((2, 3), (slice(0, None, -1), slice(2, 3))),
((2, 3), (slice(0, None, None), slice(-2, 0, -1))),
((2, 3), (slice(1, 2), slice(0, 2))),
((2, 3), (slice(-2, None, -1), slice(0, 3))),
((2, 3), (slice(-2, None, -1), slice(-3, None, -1))),
((2, 3), (slice(-2, None, -1), slice(None, None, -2))),
((2, 3), (slice(1, 2), slice(None, None, 1))),
((2, 3), (slice(1, 2), slice(None, None, 2))),
((2, 3, 4), (slice(1), slice(-2, 3), slice(1, None, -1))),
# newaxis indexing - non-tuple indexing
((), chainerx.newaxis),
((3,), chainerx.newaxis),
# newaxis indexing - tuple indexing
((), (chainerx.newaxis,)),
((3,), (chainerx.newaxis,)),
((2, 3), (chainerx.newaxis, chainerx.newaxis)),
# mixed indexing - tuple indexing
((2, 3), (0, slice(1, 3))),
((4, 3), (slice(1, 3), 1)),
((2, 3, 4), (1, slice(2,), slice(1, 3))),
((2, 3), (1, chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), slice(1, 2), slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (slice(0, 1), slice(1, 2), chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), chainerx.newaxis, slice(1, 2), slice(1, 3))),
((2, 3, 4), (chainerx.newaxis, slice(0, 1), slice(1, 2), slice(1, 3))),
((2, 3, 4),
(1, slice(2,), chainerx.newaxis, slice(1, 3), chainerx.newaxis)),
])
class TestGetitem(op_utils.NumpyOpTest):
# TODO(niboshi): Remove this
check_numpy_strides_compliance = False
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return x,
def forward_xp(self, inputs, xp):
x, = inputs
y = x[self.indices]
return y,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_getitem_zero_sized_offsets(device):
a = chainerx.arange(6)
b = a[3:3]
# Test pre-conditions.
assert b.size == 0
assert b.offset == 12
# The offset of `c` should be the same as `b` since `b` is empty.
c = b[2:]
assert c.size == 0
assert c.offset == b.offset
@op_utils.op_test(['native:0', 'cuda:0'])
# TODO(hvy): Add cases where axis=None, when supported.
@chainer.testing.parameterize_pytest('shape,indices,axis', [
# Valid parameters
((3,), [0], 0),
((3,), [1], 0),
((2, 3), [0], 0),
((2, 3), [0], 1),
((2, 3), [0], -1),
((2, 3), [1], 0),
((2, 3), [0, -1], 0),
((2, 3), [1, 0], 0),
((2, 3), [1, 2], 1),
((2, 3), [2, 1], 1),
((2, 3), [[0], [1]], 0),
# Invalid: Axis out of bounds
((2, 3), [0], 2),
((2, 3), [0], -3),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
@chainer.testing.parameterize_pytest(
'indices_type', ['list', 'numpy', 'xp'])
# TODO(niboshi): indices_dtype is ignored if indices_type == 'list', which is
# wasteful.
@chainer.testing.parameterize_pytest(
'indices_dtype', chainerx.testing.integral_dtypes)
class TestTake(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
forward_accept_errors = (chainerx.DimensionError, numpy.AxisError)
def setup(self):
if (numpy.dtype(self.indices_dtype).kind == 'u'
and (numpy.array(self.indices, 'int64') < 0).any()):
raise unittest.SkipTest(
'Indices underflows and index out of bounds cannot be tested.')
def generate_inputs(self):
a = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return a,
def forward_xp(self, inputs, xp):
indices = self.indices
axis = self.axis
indices_type = self.indices_type
a, = inputs
assert isinstance(indices, list)
if indices_type == 'list':
pass
elif indices_type == 'numpy':
indices = numpy.array(indices).astype(self.indices_dtype)
elif indices_type == 'xp':
indices = xp.array(indices).astype(self.indices_dtype)
else:
assert False, indices_type
if self.is_module:
b = xp.take(a, indices, axis)
else:
b = a.take(indices, axis)
return b,
def _random_condition(shape, dtype):
size = int(numpy.prod(shape))
mask = numpy.random.randint(0, 1, size).astype('bool_').reshape(shape)
pos = array_utils.uniform(shape, dtype)
pos[numpy.logical_not(pos)] = True # All elements are True
return pos * mask
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,in_shapes': [
# Same Shapes
((2, 3), ((2, 3), (2, 3))),
# Broadcast Shapes
((2, 3), ((1, 3), (1, 3))),
((2, 3), ((2, 1), (1, 3))),
((2, 3), ((2, 3), (1, 3))),
((4, 5), ((3, 4, 1), (1, 5))),
((1, 4, 5), ((3, 4, 1), (3, 1, 5))),
],
'cond_dtype': ['bool_'],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,in_shapes': [((2, 3), ((2, 3), (2, 3)))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
))
class TestWhere(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
input_lhs = 'random'
input_rhs = 'random'
def generate_inputs(self):
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
return super().generate_inputs()
def func(self, xp, x, y):
condition = xp.array(self.condition)
return xp.where(condition, x, y)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('cond_shape,x_shape,y_shape', [
((2, 3), (3, 4), (2, 3)),
((2, 3), (2, 3), (3, 4)),
((2, 3), (1, 3), (2, 4))
])
def test_where_invalid_shapes(xp, cond_shape, x_shape, y_shape):
x = array_utils.create_dummy_ndarray(xp, x_shape, 'float32')
y = array_utils.create_dummy_ndarray(xp, y_shape, 'float32')
c = array_utils.create_dummy_ndarray(xp, cond_shape, 'float32')
return xp.where(c, x, y)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,shape': math_utils.shapes_combination_inplace_binary,
'cond_dtype': ['bool_'],
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,shape': [((2, 3), (2, 3))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
))
class TestWhereScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
input = 'random'
scalar_value = 3
def generate_inputs(self):
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
condition = xp.array(self.condition)
if self.is_scalar_rhs:
return xp.where(condition, a, scalar)
else:
return xp.where(condition, scalar, a)
_in_out_dtypes_where_scalar = [
((bool, bool), 'bool_'),
((bool, int), 'int32'),
((bool, float), 'float32'),
((int, bool), 'int32'),
((int, int), 'int32'),
((int, float), 'float32'),
((float, bool), 'float32'),
((float, int), 'float32'),
((float, float), 'float32'),
]
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('cond_shape', [(2, 3)])
@pytest.mark.parametrize('cond_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('in_types,out_dtype', _in_out_dtypes_where_scalar)
def test_where_scalar_scalar(xp, cond_shape, cond_dtype, in_types, out_dtype):
cond = xp.array(_random_condition(cond_shape, cond_dtype))
x_type, y_type = in_types
x = x_type(0)
y = y_type(2)
out = xp.where(cond, x, y)
return dtype_utils.cast_if_numpy_array(xp, out, out_dtype)
| |
import usb
import logging
################
# IBUDDY class
################
class BuddyDevice:
SETUP = (0x22, 0x09, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00)
MESS = (0x55, 0x53, 0x42, 0x43, 0x00, 0x40, 0x02)
LEFT = 0
RIGHT = 1
UP = 0
DOWN = 1
finalMess = 0xFF
battery = 0
product = 0
def __init__(self, battery, buddy_product):
try:
self.dev=UsbDevice(0x1130, buddy_product, battery)
self.dev.open()
self.dev.handle.reset()
self.resetMessage()
self.pumpMessage()
self.battery=battery
self.product=buddy_product
except NoBuddyException, e:
raise NoBuddyException()
# Commands are sent as disabled bits
def setReverseBitValue(self,num,value):
if (value==1):
temp = 0xFF - (1<<num)
self.finalMess = self.finalMess & temp
elif (value==0):
temp = 1 << num
self.finalMess = self.finalMess | temp
def getReverseBitValue(self,num):
temp = self.finalMess
temp = temp >> num
res = not(temp&1)
return res
def setHeadColor(self, red, green, blue):
self.setReverseBitValue(4,red)
self.setReverseBitValue(5,green)
self.setReverseBitValue(6,blue)
def setHeart(self, status):
self.setReverseBitValue(7,status)
def pumpMessage(self):
self.send(self.finalMess)
def resetMessage(self):
self.finalMess = 0xFF
def flick(self, direction):
if (direction == self.RIGHT):
self.setReverseBitValue(1,1)
self.setReverseBitValue(0,0)
elif(direction == self.LEFT):
self.setReverseBitValue(1,0)
self.setReverseBitValue(0,1)
def wing(self, direction):
if (direction == self.UP):
self.setReverseBitValue(3,1)
self.setReverseBitValue(2,0)
elif(direction == self.DOWN):
self.setReverseBitValue(3,0)
self.setReverseBitValue(2,1)
def getColors (self):
return self.getReverseBitValue(4), self.getReverseBitValue(5), self.getReverseBitValue(6)
def getHeart(self):
return self.getReverseBitValue(7)
def getWing(self):
return self.getReverseBitValue(2)
def getDirection(self):
return self.getReverseBitValue(1)
def send(self, inp):
try:
self.dev.handle.controlMsg(0x21, 0x09, self.SETUP, 0x02, 0x01)
self.dev.handle.controlMsg(0x21, 0x09, self.MESS+(inp,), 0x02, 0x01)
except usb.USBError:
self.__init__(self.battery,self.product)
#####################
# USB class
######################
class UsbDevice:
def __init__(self, vendor_id, product_id, skip):
busses = usb.busses()
self._log = logging.getLogger(self.__class__.__name__)
self.handle = None
count = 0
for bus in busses:
devices = bus.devices
for dev in devices:
if dev.idVendor==vendor_id and dev.idProduct==product_id:
if count==skip:
self._log.info("ibuddy found! vend: %s prod: %s",dev.idVendor, dev.idProduct)
self.dev = dev
self.conf = self.dev.configurations[0]
self.intf = self.conf.interfaces[0][0]
self.endpoints = []
for endpoint in self.intf.endpoints:
self.endpoints.append(endpoint)
self._log.info("endpoint")
return
else:
count=count+1
raise NoBuddyException()
def open(self):
if self.handle:
self.handle = None
self.handle = self.dev.open()
#We need to detach HID interface
try:
self.handle.detachKernelDriver(0)
self.handle.detachKernelDriver(1)
except:
self._log.info("Could not connect to Device")
self.handle.setConfiguration(self.conf)
self.handle.claimInterface(self.intf)
self.handle.setAltInterface(self.intf)
class NoBuddyException(Exception): pass
if __name__ == "__main__":
# Running Test
from time import sleep
b = BuddyDevice(0,5)
b.setHeart(True)
b.pumpMessage()
sleep(0.1)
b.setHeart(False)
b.pumpMessage()
sleep(0.2)
b.setHeart(True)
b.setHeadColor(1,1,0)
b.flick(b.RIGHT)
b.pumpMessage()
sleep(0.2)
b.setHeart(False)
b.setHeadColor(0,1,1)
b.flick(b.LEFT)
b.pumpMessage()
sleep(0.4)
b.setHeart(True)
b.setHeadColor(1,1,1)
b.wing(b.UP)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(1,0,0)
b.wing(b.DOWN)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(0,1,0)
b.wing(b.UP)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(0,0,1)
b.wing(b.DOWN)
b.setHeart(False)
b.pumpMessage()
sleep(0.2)
b.setHeadColor(1,0,1)
b.setHeart(True)
b.pumpMessage()
sleep(0.2)
b.setHeart(False)
b.pumpMessage()
sleep(0.4)
b.setHeart(True)
b.pumpMessage()
sleep(0.1)
b.setHeart(False)
b.pumpMessage()
sleep(0.2)
b.setHeart(True)
b.setHeadColor(1,1,0)
b.flick(b.RIGHT)
b.pumpMessage()
sleep(0.2)
b.setHeart(False)
b.setHeadColor(0,1,1)
b.flick(b.LEFT)
b.pumpMessage()
sleep(0.4)
b.setHeart(True)
b.setHeadColor(1,1,1)
b.wing(b.UP)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(1,0,0)
b.wing(b.DOWN)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(0,1,0)
b.wing(b.UP)
b.pumpMessage()
sleep(0.1)
b.setHeadColor(0,0,1)
b.wing(b.DOWN)
b.setHeart(False)
b.pumpMessage()
sleep(0.2)
b.setHeadColor(1,0,1)
b.setHeart(True)
b.pumpMessage()
sleep(0.2)
b.setHeart(False)
b.pumpMessage()
sleep(0.4)
b.resetMessage()
b.pumpMessage()
| |
"""High level parallel SNP and indel calling using multiple variant callers.
"""
import os
import collections
import copy
import pprint
import toolz as tz
from bcbio import bam, utils
from bcbio.cwl import cwlutils
from bcbio.distributed.split import (grouped_parallel_split_combine, parallel_split_combine)
from bcbio.distributed import multi as dmulti
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import region as pregion
from bcbio.variation import (gatk, gatkfilter, germline, multi,
phasing, ploidy, vcfutils, vfilter)
# ## Variant filtration -- shared functionality
def variant_filtration(call_file, ref_file, vrn_files, data, items):
"""Filter variant calls using Variant Quality Score Recalibration.
Newer GATK with Haplotype calling has combined SNP/indel filtering.
"""
caller = data["config"]["algorithm"].get("variantcaller")
if "gvcf" not in dd.get_tools_on(data):
call_file = ploidy.filter_vcf_by_sex(call_file, items)
if caller in ["freebayes"]:
return vfilter.freebayes(call_file, ref_file, vrn_files, data)
elif caller in ["platypus"]:
return vfilter.platypus(call_file, data)
elif caller in ["samtools"]:
return vfilter.samtools(call_file, data)
elif caller in ["gatk", "gatk-haplotype", "haplotyper"]:
return gatkfilter.run(call_file, ref_file, vrn_files, data)
# no additional filtration for callers that filter as part of call process
else:
return call_file
# ## High level functionality to run genotyping in parallel
def get_variantcaller(data, key="variantcaller", default=None, require_bam=True):
if not require_bam or data.get("align_bam"):
return tz.get_in(["config", "algorithm", key], data, default)
def combine_multiple_callers(samples):
"""Collapse together variant calls from multiple approaches into single data item with `variants`.
"""
by_bam = collections.OrderedDict()
for data in (x[0] for x in samples):
work_bam = tz.get_in(("combine", "work_bam", "out"), data, data.get("align_bam"))
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = get_variantcaller(data)
key = (multi.get_batch_for_key(data), work_bam)
if key not in by_bam:
by_bam[key] = []
by_bam[key].append((variantcaller, jointcaller, data))
out = []
for callgroup in by_bam.values():
ready_calls = []
for variantcaller, jointcaller, data in callgroup:
if variantcaller:
cur = data.get("vrn_file_plus", {})
cur.update({"variantcaller": variantcaller,
"vrn_file": data.get("vrn_file_orig") if jointcaller else data.get("vrn_file"),
"vrn_file_batch": data.get("vrn_file_batch") if not jointcaller else None,
"vrn_stats": data.get("vrn_stats"),
"validate": data.get("validate") if not jointcaller else None})
if jointcaller:
cur["population"] = False
ready_calls.append(cur)
if jointcaller:
ready_calls.append({"variantcaller": jointcaller,
"vrn_file": data.get("vrn_file"),
"vrn_file_batch": data.get("vrn_file_batch"),
"validate": data.get("validate"),
"do_upload": False})
if not jointcaller and not variantcaller:
ready_calls.append({"variantcaller": "precalled",
"vrn_file": data.get("vrn_file"),
"validate": data.get("validate"),
"do_upload": False})
final = callgroup[0][-1]
def orig_variantcaller_order(x):
try:
return final["config"]["algorithm"]["orig_variantcaller"].index(x["variantcaller"])
except ValueError:
return final["config"]["algorithm"]["orig_jointcaller"].index(x["variantcaller"])
if len(ready_calls) > 1 and "orig_variantcaller" in final["config"]["algorithm"]:
final["variants"] = sorted(ready_calls, key=orig_variantcaller_order)
final["config"]["algorithm"]["variantcaller"] = final["config"]["algorithm"].pop("orig_variantcaller")
if "orig_jointcaller" in final["config"]["algorithm"]:
final["config"]["algorithm"]["jointcaller"] = final["config"]["algorithm"].pop("orig_jointcaller")
else:
final["variants"] = ready_calls
final.pop("vrn_file_batch", None)
final.pop("vrn_file_orig", None)
final.pop("vrn_file_plus", None)
final.pop("vrn_stats", None)
out.append([final])
return out
def _split_by_ready_regions(ext, file_key, dir_ext_fn):
"""Organize splits based on regions generated by parallel_prep_region.
Sort splits so largest regions analyzed first, avoiding potentially lagging runs
at end.
"""
def _sort_by_size(region_w_bams):
region, _ = region_w_bams
_, start, end = region
return end - start
def _assign_bams_to_regions(data):
"""Ensure BAMs aligned with input regions, either global or individual.
"""
for i, region in enumerate(data["region"]):
work_bams = []
for xs in data["region_bams"]:
if len(xs) == 1:
work_bams.append(xs[0])
else:
work_bams.append(xs[i])
for work_bam in work_bams:
assert os.path.exists(work_bam), work_bam
yield region, work_bams
def _do_work(data):
if "region" in data:
name = data["group"][0] if "group" in data else data["description"]
out_dir = os.path.join(data["dirs"]["work"], dir_ext_fn(data))
out_file = os.path.join(out_dir, "%s%s" % (name, ext))
assert isinstance(data["region"], (list, tuple))
out_parts = []
for r, work_bams in sorted(_assign_bams_to_regions(data), key=_sort_by_size, reverse=True):
out_region_dir = os.path.join(out_dir, r[0])
out_region_file = os.path.join(out_region_dir,
"%s-%s%s" % (name, pregion.to_safestr(r), ext))
out_parts.append((r, work_bams, out_region_file))
return out_file, out_parts
else:
return None, []
return _do_work
def _collapse_by_bam_variantcaller(samples):
"""Collapse regions to a single representative by BAM input, variant caller and batch.
"""
by_bam = collections.OrderedDict()
for data in (x[0] for x in samples):
work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam"))
variantcaller = get_variantcaller(data)
if isinstance(work_bam, list):
work_bam = tuple(work_bam)
key = (multi.get_batch_for_key(data), work_bam, variantcaller)
try:
by_bam[key].append(data)
except KeyError:
by_bam[key] = [data]
out = []
for grouped_data in by_bam.values():
cur = grouped_data[0]
cur.pop("region", None)
region_bams = cur.pop("region_bams", None)
if region_bams and len(region_bams[0]) > 1:
cur.pop("work_bam", None)
out.append([cur])
return out
def _dup_samples_by_variantcaller(samples, require_bam=True):
"""Prepare samples by variant callers, duplicating any with multiple callers.
"""
samples = [utils.to_single_data(x) for x in samples]
samples = germline.split_somatic(samples)
to_process = []
extras = []
for data in samples:
added = False
for add in handle_multiple_callers(data, "variantcaller", require_bam=require_bam):
added = True
to_process.append([add])
if not added:
data = _handle_precalled(data)
extras.append([data])
return to_process, extras
def parallel_variantcall_region(samples, run_parallel):
"""Perform variant calling and post-analysis on samples by region.
"""
to_process, extras = _dup_samples_by_variantcaller(samples)
split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller)
samples = _collapse_by_bam_variantcaller(
grouped_parallel_split_combine(to_process, split_fn,
multi.group_batches, run_parallel,
"variantcall_sample", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"]))
return extras + samples
def vc_output_record(samples):
"""Prepare output record from variant calling to feed into downstream analysis.
Prep work handles reformatting so we return generated dictionaries.
For any shared keys that are calculated only once for a batch, like variant calls
for the batch, we assign to every sample.
"""
shared_keys = [["vrn_file"], ["validate", "summary"],
["validate", "tp"], ["validate", "fp"], ["validate", "fn"]]
raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples])
shared = {}
for key in shared_keys:
cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x]))
if len(cur) > 0:
assert len(cur) == 1, (key, cur)
shared[tuple(key)] = cur[0]
else:
shared[tuple(key)] = None
out = []
for d in raw:
for key, val in shared.items():
d = tz.update_in(d, key, lambda x: val)
out.append([d])
return out
def is_joint(data):
return "gvcf" in dd.get_tools_on(data) or dd.get_jointcaller(data)
def batch_for_variantcall(samples):
"""Prepare a set of samples for parallel variant calling.
CWL input target that groups samples into batches and variant callers
for parallel processing.
If doing joint calling, with `tools_on: [gvcf]`, split the sample into
individuals instead of combining into a batch.
"""
to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False)
batch_groups = collections.defaultdict(list)
to_process = [utils.to_single_data(x) for x in to_process]
for data in cwlutils.samples_to_records(to_process):
vc = get_variantcaller(data, require_bam=False)
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
for b in batches:
batch_groups[(b, vc)].append(utils.deepish_copy(data))
batches = []
for cur_group in batch_groups.values():
joint_calling = any([is_joint(d) for d in cur_group])
if joint_calling:
for d in cur_group:
batches.append([d])
else:
batches.append(cur_group)
return batches + extras
def _handle_precalled(data):
"""Copy in external pre-called variants fed into analysis.
"""
if data.get("vrn_file"):
vrn_file = data["vrn_file"]
if isinstance(vrn_file, (list, tuple)):
assert len(vrn_file) == 1
vrn_file = vrn_file[0]
precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled"))
ext = utils.splitext_plus(vrn_file)[-1]
orig_file = os.path.abspath(vrn_file)
our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext))
utils.copy_plus(orig_file, our_vrn_file)
data["vrn_file"] = our_vrn_file
return data
def handle_multiple_callers(data, key, default=None, require_bam=True):
"""Split samples that potentially require multiple variant calling approaches.
"""
callers = get_variantcaller(data, key, default, require_bam=require_bam)
if isinstance(callers, basestring):
return [data]
elif not callers:
return []
else:
out = []
for caller in callers:
base = copy.deepcopy(data)
if not base["config"]["algorithm"].get("orig_%s" % key):
base["config"]["algorithm"]["orig_%s" % key] = \
base["config"]["algorithm"][key]
base["config"]["algorithm"][key] = caller
# if splitting by variant caller, also split by jointcaller
if key == "variantcaller":
jcallers = get_variantcaller(data, "jointcaller", [])
if isinstance(jcallers, basestring):
jcallers = [jcallers]
if jcallers:
base["config"]["algorithm"]["orig_jointcaller"] = jcallers
jcallers = [x for x in jcallers if x.startswith(caller)]
if jcallers:
base["config"]["algorithm"]["jointcaller"] = jcallers[0]
else:
base["config"]["algorithm"]["jointcaller"] = False
out.append(base)
return out
# gatk-haplotype supported but waiting for bug fixes in Spark implementation
SUPPORT_MULTICORE = ["strelka2", "haplotyper", "tnhaplotyper", "tnscope"]
def get_variantcallers():
from bcbio.variation import (freebayes, cortex, samtools, varscan, mutect, mutect2,
platypus, scalpel, sentieon, strelka2, vardict, qsnp)
return {"gatk": gatk.unified_genotyper,
"gatk-haplotype": gatk.haplotype_caller,
"mutect2": mutect2.mutect2_caller,
"freebayes": freebayes.run_freebayes,
"cortex": cortex.run_cortex,
"samtools": samtools.run_samtools,
"varscan": varscan.run_varscan,
"mutect": mutect.mutect_caller,
"platypus": platypus.run,
"scalpel": scalpel.run_scalpel,
"strelka2": strelka2.run,
"vardict": vardict.run_vardict,
"vardict-java": vardict.run_vardict,
"vardict-perl": vardict.run_vardict,
"haplotyper": sentieon.run_haplotyper,
"tnhaplotyper": sentieon.run_tnhaplotyper,
"tnscope": sentieon.run_tnscope,
"qsnp": qsnp.run_qsnp}
def variantcall_sample(data, region=None, align_bams=None, out_file=None):
"""Parallel entry point for doing genotyping of a region of a sample.
"""
if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file):
utils.safe_makedir(os.path.dirname(out_file))
ref_file = dd.get_ref_file(data)
config = data["config"]
caller_fns = get_variantcallers()
caller_fn = caller_fns[config["algorithm"].get("variantcaller")]
if len(align_bams) == 1:
items = [data]
else:
items = multi.get_orig_items(data)
assert len(items) == len(align_bams)
assoc_files = tz.get_in(("genome_resources", "variation"), data, {})
if not assoc_files: assoc_files = {}
for bam_file in align_bams:
bam.index(bam_file, data["config"], check_timestamp=False)
do_phasing = data["config"]["algorithm"].get("phasing", False)
call_file = "%s-unphased%s" % utils.splitext_plus(out_file) if do_phasing else out_file
call_file = caller_fn(align_bams, items, ref_file, assoc_files, region, call_file)
if do_phasing == "gatk":
call_file = phasing.read_backed_phasing(call_file, align_bams, ref_file, region, config)
utils.symlink_plus(call_file, out_file)
if region:
data["region"] = region
data["vrn_file"] = out_file
return [data]
def concat_batch_variantcalls(items, region_block=True, skip_jointcheck=False):
"""CWL entry point: combine variant calls from regions into single VCF.
"""
items = [utils.to_single_data(x) for x in items]
batch_name = _get_batch_name(items, skip_jointcheck)
variantcaller = _get_batch_variantcaller(items)
out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, "%s.vcf.gz" % (batch_name))
utils.safe_makedir(os.path.dirname(out_file))
if region_block:
regions = [_region_to_coords(rs[0]) for rs in items[0]["region_block"]]
else:
regions = [_region_to_coords(r) for r in items[0]["region"]]
vrn_file_regions = items[0]["vrn_file_region"]
out_file = vcfutils.concat_variant_files(vrn_file_regions, out_file, regions,
dd.get_ref_file(items[0]), items[0]["config"])
return {"vrn_file": out_file}
def _region_to_coords(region):
"""Split GATK region specification (chr1:1-10) into a tuple of chrom, start, end
"""
chrom, coords = region.split(":")
start, end = coords.split("-")
return (chrom, int(start), int(end))
def _get_batch_name(items, skip_jointcheck=False):
"""Retrieve the shared batch name for a group of items.
"""
batch_names = collections.defaultdict(int)
has_joint = any([is_joint(d) for d in items])
for data in items:
if has_joint and not skip_jointcheck:
batches = dd.get_sample_name(data)
else:
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
for b in batches:
batch_names[b] += 1
return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
def _get_batch_variantcaller(items):
variantcaller = [vc for vc in list(set([get_variantcaller(x) for x in items])) if vc]
assert len(variantcaller) == 1, "%s\n%s" % (variantcaller, pprint.pformat(items))
return variantcaller[0]
def variantcall_batch_region(items):
"""CWL entry point: variant call a batch of samples in a block of regions.
"""
items = [utils.to_single_data(x) for x in items]
align_bams = [dd.get_align_bam(x) for x in items]
variantcaller = _get_batch_variantcaller(items)
region_blocks = list(set([tuple(x.get("region_block")) for x in items if "region_block" in x]))
assert len(region_blocks) == 1, region_blocks
region_block = region_blocks[0]
caller_fn = get_variantcallers()[variantcaller]
assoc_files = tz.get_in(("genome_resources", "variation"), items[0], {})
region = _region_to_coords(region_block[0])
chrom, start, end = region
region_str = "_".join(str(x) for x in region)
batch_name = _get_batch_name(items)
out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom,
"%s-%s-block.vcf.gz" % (batch_name, region_str))
utils.safe_makedir(os.path.dirname(out_file))
if variantcaller in SUPPORT_MULTICORE:
call_file = caller_fn(align_bams, items, dd.get_ref_file(items[0]), assoc_files,
[_region_to_coords(r) for r in region_block], out_file)
else:
call_file = _run_variantcall_batch_multicore(items, region_block, out_file)
return {"vrn_file_region": call_file, "region_block": region_block}
def _run_variantcall_batch_multicore(items, regions, final_file):
"""Run variant calling on a batch of items using multiple cores.
"""
batch_name = _get_batch_name(items)
variantcaller = _get_batch_variantcaller(items)
work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items]
def split_fn(data):
out = []
for region in regions:
region = _region_to_coords(region)
chrom, start, end = region
region_str = "_".join(str(x) for x in region)
out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom,
"%s-%s.vcf.gz" % (batch_name, region_str))
out.append((region, work_bams, out_file))
return final_file, out
parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1}
run_parallel = dmulti.runner(parallel, items[0]["config"])
to_run = copy.deepcopy(items[0])
to_run["sam_ref"] = dd.get_ref_file(to_run)
to_run["group_orig"] = items
parallel_split_combine([[to_run]], split_fn, run_parallel,
"variantcall_sample", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return final_file
| |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError("value cannot be a MultiIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError("itemps cannot be a MultiIndex")
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif np.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in compat.iteritems(self))
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self))
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in compat.iteritems(self))
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [df.index for df in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [df.columns for df in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| |
"""
Module for adaptive stochastic descent optimization
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
# required to make json saving work in Python 2/3
try:
to_unicode = unicode
except NameError:
to_unicode = str
import imp
import json
import logging
import datetime
import os
import signal
import glob
from copy import copy
from random import Random
from time import sleep, time
from itertools import product
from subprocess import Popen, PIPE
import importlib, types
import numpy.random as nr
import numpy as np
from neuron import h
from netpyne import sim,specs
from .utils import createFolder
from .utils import bashTemplate
from .utils import dcp, sigfig
pc = h.ParallelContext() # use bulletin board master/slave
def asd(function, xPop, saveFile=None, args=None, stepsize=0.1, sinc=2, sdec=2, pinc=2, pdec=2,
pinitial=None, sinitial=None, xmin=None, xmax=None, maxiters=None, maxtime=None,
abstol=1e-6, reltol=1e-3, stalliters=None, stoppingfunc=None, randseed=None,
label=None, maxFitness=None, verbose=2, **kwargs):
"""
Function for/to <short description of `netpyne.batch.asd_parallel.asd`>
Parameters
----------
function : <type>
<Short description of function>
**Default:** *required*
xPop : <type>
<Short description of xPop>
**Default:** *required*
saveFile : <``None``?>
<Short description of saveFile>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
args : <``None``?>
<Short description of args>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
stepsize : float
<Short description of stepsize>
**Default:** ``0.1``
**Options:** ``<option>`` <description of option>
sinc : int
<Short description of sinc>
**Default:** ``2``
**Options:** ``<option>`` <description of option>
sdec : int
<Short description of sdec>
**Default:** ``2``
**Options:** ``<option>`` <description of option>
pinc : int
<Short description of pinc>
**Default:** ``2``
**Options:** ``<option>`` <description of option>
pdec : int
<Short description of pdec>
**Default:** ``2``
**Options:** ``<option>`` <description of option>
pinitial : <``None``?>
<Short description of pinitial>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
sinitial : <``None``?>
<Short description of sinitial>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
xmin : <``None``?>
<Short description of xmin>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
xmax : <``None``?>
<Short description of xmax>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
maxiters : <``None``?>
<Short description of maxiters>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
maxtime : <``None``?>
<Short description of maxtime>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
abstol : float
<Short description of abstol>
**Default:** ``1e-06``
**Options:** ``<option>`` <description of option>
reltol : float
<Short description of reltol>
**Default:** ``0.001``
**Options:** ``<option>`` <description of option>
stalliters : <``None``?>
<Short description of stalliters>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
stoppingfunc : <``None``?>
<Short description of stoppingfunc>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
randseed : <``None``?>
<Short description of randseed>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
label : <``None``?>
<Short description of label>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
maxFitness : <``None``?>
<Short description of maxFitness>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
verbose : int
<Short description of verbose>
**Default:** ``2``
**Options:** ``<option>`` <description of option>
kwargs : <type>
<Short description of kwargs>
**Default:** *required*
"""
if randseed is not None:
nr.seed(int(randseed)) # Don't reset it if not supplied
if verbose >= 3: print('ASD: Launching with random seed is %i; sample: %f' % (randseed, nr.random()))
def consistentshape(userinput, origshape=False):
"""
Make sure inputs have the right shape and data type.
"""
output = np.reshape(np.array(userinput, dtype='float'), -1)
if origshape: return output, np.shape(userinput)
else: return output
# Handle inputs and set defaults
if maxtime is None: maxtime = 3600
if maxiters is None: maxiters = 1000
maxrangeiters = 100 # Number of times to try generating a new parameter
popsize = len(xPop)
for x in xPop:
x, origshape = consistentshape(x, origshape=True) # Turn it into a vector but keep the original shape (not necessarily class, though)
nparams = len(x) # Number of parameters
if not nparams:
errormsg = 'ASD: The length of the input vector cannot be zero'
raise Exception(errormsg)
if sinc<1:
print('ASD: sinc cannot be less than 1; resetting to 2'); sinc = 2
if sdec<1:
print('ASD: sdec cannot be less than 1; resetting to 2'); sdec = 2
if pinc<1:
print('ASD: pinc cannot be less than 1; resetting to 2')
pinc = 2
if pdec<1:
print('ASD: pdec cannot be less than 1; resetting to 2')
pdec = 2
# Set initial parameter selection probabilities -- uniform by default
if pinitial is None: probabilities = np.ones(2 * nparams)
else: probabilities = consistentshape(pinitial)
if not sum(probabilities):
errormsg = 'ASD: The sum of input probabilities cannot be zero'
raise Exception(errormsg)
probabilitiesPop = [dcp(probabilities) for i in range(popsize)] # create independent probabilities for each population individual
# Handle step sizes
if sinitial is None:
stepsizes = abs(stepsize * x)
stepsizes = np.concatenate((stepsizes, stepsizes)) # need to duplicate since two for each parameter
else:
stepsizes = consistentshape(sinitial)
stepsizesPop = [dcp(stepsizes) for i in range(popsize)] # create independent step sizes for each population individual
# Handle x limits
xmin = np.zeros(nparams) - np.inf if xmin is None else consistentshape(xmin)
xmax = np.zeros(nparams) + np.inf if xmax is None else consistentshape(xmax)
# Final input checking
for x in xPop:
if sum(np.isnan(x)):
errormsg = 'ASD: At least one value in the vector of starting points is NaN:\n%s' % x
raise Exception(errormsg)
if label is None: label = ''
if stalliters is None: stalliters = 10 * nparams # By default, try 10 times per parameter on average
stalliters = int(stalliters)
maxiters = int(maxiters)
# Initialization
for stepsizes in stepsizesPop:
if all(stepsizes == 0): stepsizes += stepsize # Handle the case where all step sizes are 0
if any(stepsizes == 0): stepsizes[stepsizes == 0] = np.mean(stepsizes[stepsizes != 0]) # Replace step sizes of zeros with the mean of non-zero entries
if args is None: args = {} # Reset if no function arguments supplied
# evaluate initial values
fvalPop = function(xPop, args)
fvalorigPop = [float(fval) for fval in fvalPop]
fvaloldPop = [float(fval) for fval in fvalPop]
fvalnewPop = [float(fval) for fval in fvalPop]
xorigPop = [dcp(x) for x in xPop] # Keep the original x, just in case
# Initialize history
abserrorhistoryPop = [dcp(np.zeros(stalliters)) for i in range(popsize)] # Store previous error changes
relerrorhistoryPop = [dcp(np.zeros(stalliters)) for i in range(popsize)] # Store previous error changes
fvalsPop = [dcp(np.zeros(maxiters + 1)) for i in range(popsize)] # Store all objective function values
allstepsPop = [dcp(np.zeros((maxiters + 1, nparams))) for i in range(popsize)] # Store all parameters
for fvals, fvalorig in zip(fvalsPop, fvalorigPop):
fvals[0] = fvalorig # Store initial function output
for allsteps, xorig in zip(allstepsPop, xorigPop):
allsteps[0, :] = xorig # Store initial input vector
# Loop
count = 0 # Keep track of how many iterations have occurred
start = time() # Keep track of when we begin looping
offset = ' ' * 4 # Offset the print statements
exitreason = 'Unknown exit reason' # Catch everything else
while True:
count += 1 # Increment the count
xnewPop = []
for icand, (x, fval, fvalnew, probabilities, stepsizes) in enumerate(zip(xPop, fvalPop, fvalnewPop, probabilitiesPop, stepsizesPop)):
if verbose == 1: print(offset + label + 'Iteration %i; elapsed %0.1f s; objective: %0.3e' % (count, time() - start, fval)) # For more verbose, use other print statement below
if verbose >= 4: print('\n\n Count=%i \n x=%s \n probabilities=%s \n stepsizes=%s' % (count, x, probabilities, stepsizes))
if fvalnew == maxFitness:
print('Note: rerunning candidate %i since it did not complete in previous iteration ...\n' % (icand))
xnew = dcp(x) # if maxFitness means error evaluating function (eg. preempted job on HPC) so rerun same param set
xnewPop.append(xnew)
else:
# Calculate next parameters
probabilities = probabilities / sum(probabilities) # Normalize probabilities
cumprobs = np.cumsum(probabilities) # Calculate the cumulative distribution
inrange = False
for r in range(maxrangeiters): # Try to find parameters within range
choice = np.flatnonzero(cumprobs > nr.random())[0] # Choose a parameter and upper/lower at random
par = np.mod(choice, nparams) # Which parameter was chosen
pm = np.floor((choice) / nparams) # Plus or minus
newval = x[par] + ((-1)**pm) * stepsizes[choice] # Calculate the new vector
if newval<xmin[par]: newval = xmin[par] # Reset to the lower limit
if newval>xmax[par]: newval = xmax[par] # Reset to the upper limit
inrange = (newval != x[par])
if verbose >= 4: print(offset*2 + 'count=%i r=%s, choice=%s, par=%s, x[par]=%s, pm=%s, step=%s, newval=%s, xmin=%s, xmax=%s, inrange=%s' % (count, r, choice, par, x[par], (-1)**pm, stepsizes[choice], newval, xmin[par], xmax[par], inrange))
if inrange: # Proceed as long as they're not equal
break
if not inrange: # Treat it as a failure if a value in range can't be found
probabilities[choice] = probabilities[choice] / pdec
stepsizes[choice] = stepsizes[choice] / sdec
# Calculate the new value
xnew = dcp(x) # Initialize the new parameter set
xnew[par] = newval # Update the new parameter set
xnewPop.append(xnew)
# update pop variables
xPop[icand], fvalPop[icand], probabilitiesPop[icand], stepsizesPop[icand] = x, fval, probabilities, stepsizes
fvalnewPop = function(xnewPop, args) # Calculate the objective function for the new parameter sets
print('\n')
for icand, (x, xnew, fval, fvalorig, fvalnew, fvalold, fvals, probabilities, stepsizes, abserrorhistory, relerrorhistory) in \
enumerate(zip(xPop, xnewPop, fvalPop, fvalorigPop, fvalnewPop, fvaloldPop, fvalsPop, probabilitiesPop, stepsizesPop, abserrorhistoryPop, relerrorhistoryPop)):
if fvalnew == -1:
ratio = 1
abserrorhistory[np.mod(count, stalliters)] = 0
relerrorhistory[np.mod(count, stalliters)] = 0
fvalold = float(fval)
flag = '--' # Marks no change
else:
eps = 1e-12 # Small value to avoid divide-by-zero errors
try:
if abs(fvalnew)<eps and abs(fval)<eps: ratio = 1 # They're both zero: set the ratio to 1
elif abs(fvalnew)<eps: ratio = 1.0/eps # Only the denominator is zero: reset to the maximum ratio
else: ratio = fval / float(fvalnew) # The normal situation: calculate the real ratio
except:
ratio = 1.0
abserrorhistory[np.mod(count, stalliters)] = max(0, fval-fvalnew) # Keep track of improvements in the error
relerrorhistory[np.mod(count, stalliters)] = max(0, ratio-1.0) # Keep track of improvements in the error
if verbose >= 3: print(offset + 'candidate %d, step=%i choice=%s, par=%s, pm=%s, origval=%s, newval=%s' % (icand, count, choice, par, pm, x[par], xnew[par]))
# Check if this step was an improvement
fvalold = float(fval) # Store old fval
if fvalnew < fvalold: # New parameter set is better than previous one
probabilities[choice] = probabilities[choice] * pinc # Increase probability of picking this parameter again
stepsizes[choice] = stepsizes[choice] * sinc # Increase size of step for next time
x = dcp(xnew) # Reset current parameters
fval = float(fvalnew) # Reset current error
flag = '++' # Marks an improvement
else: # New parameter set is the same or worse than the previous one
probabilities[choice] = probabilities[choice] / pdec # Decrease probability of picking this parameter again
stepsizes[choice] = stepsizes[choice] / sdec # Decrease size of step for next time
flag = '--' # Marks no change
if np.isnan(fvalnew):
if verbose >= 1: print('ASD: Warning, objective function returned NaN')
if verbose >= 2: print(offset + label + 'candidate %d, step %i (%0.1f s) %s (orig: %s | best:%s | new:%s | diff:%s)' % ((icand, count, time() - start, flag) + sigfig([fvalorig, fvalold, fvalnew, fvalnew - fvalold])))
# Store output information
fvals[count] = float(fval) # Store objective function evaluations
allsteps[count,:] = dcp(x) # Store parameters
xPop[icand], xnewPop[icand], fvalPop[icand], fvalorigPop[icand], fvalnewPop[icand], fvaloldPop[icand], fvalsPop[icand], probabilitiesPop[icand], stepsizesPop[icand], abserrorhistoryPop[icand], relerrorhistoryPop[icand], allstepsPop[icand] = x, xnew, fval, fvalorig, fvalnew, fvalold, fvals, probabilities, stepsizes, abserrorhistory, relerrorhistory, allsteps
print('\n')
if saveFile:
sim.saveJSON(saveFile, {'x': allstepsPop, 'fvals': fvalsPop})
sleep(1)
# Stopping criteria
if count >= maxiters: # Stop if the iteration limit is exceeded
exitreason = 'Maximum iterations reached'
break
if (time() - start) > maxtime:
exitreason = 'Time limit reached (%s > %s)' % sigfig([(time()-start), maxtime])
break
if (count > stalliters) and (np.mean([np.mean(abs(x)) for x in abserrorhistoryPop]) < abstol): # Stop if improvement is too small
exitreason = 'Absolute improvement too small (%s < %s)' % sigfig([np.mean([np.mean(x) for x in abserrorhistoryPop]), abstol])
break
if (count > stalliters) and (np.mean([sum(x) for x in relerrorhistoryPop]) < reltol): # Stop if improvement is too small
exitreason = 'Relative improvement too small (%s < %s)' % sigfig([np.mean([np.mean(x) for x in relerrorhistory]), reltol])
break
if stoppingfunc and stoppingfunc():
exitreason = 'Stopping function called'
break
# Return
if verbose >= 2:
print('\n=== %s %s (steps: %i) ===' % (label, exitreason, count))
for icand, fvals in enumerate(fvalsPop):
print(' == candidate: %d | orig: %s | best: %s | ratio: %s ==' % ((icand,) + sigfig([fvals[0], fvals[-1], fvals[-1] / fvals[0]])))
output = {}
output['x'] = [np.reshape(x, origshape) for x in xPop] # Parameters
output['fval'] = [fvals[count] for fvals in fvalsPop]
output['exitreason'] = exitreason
output['details'] = {}
output['details']['fvals'] = [fvals[:count+1] for fvals in fvalsPop] # Function evaluations
output['details']['xvals'] = [allsteps[:count+1, :] for allsteps in allstepsPop]
output['details']['probabilities'] = probabilitiesPop
output['details']['stepsizes'] = stepsizesPop
return output # Return parameter vector as well as details about run
# -------------------------------------------------------------------------------
# Adaptive Stochastic Descente (ASD) optimization
# -------------------------------------------------------------------------------
# func needs to be outside of class
def runASDJob(script, cfgSavePath, netParamsSavePath, simDataPath):
"""
Function for/to <short description of `netpyne.batch.asd_parallel.runASDJob`>
Parameters
----------
script : <type>
<Short description of script>
**Default:** *required*
cfgSavePath : <type>
<Short description of cfgSavePath>
**Default:** *required*
netParamsSavePath : <type>
<Short description of netParamsSavePath>
**Default:** *required*
simDataPath : <type>
<Short description of simDataPath>
**Default:** *required*
"""
import os
print('\nJob in rank id: ',pc.id())
command = 'nrniv %s simConfig=%s netParams=%s' % (script, cfgSavePath, netParamsSavePath)
print(command)
with open(simDataPath+'.run', 'w') as outf, open(simDataPath+'.err', 'w') as errf:
pid = Popen(command.split(' '), stdout=outf, stderr=errf, preexec_fn=os.setsid).pid
with open('./pids.pid', 'a') as file:
file.write(str(pid) + ' ')
def asdOptim(self, pc):
"""
Function for/to <short description of `netpyne.batch.asd_parallel.asdOptim`>
Parameters
----------
self : <type>
<Short description of self>
**Default:** *required*
pc : <type>
<Short description of pc>
**Default:** *required*
"""
import sys
# -------------------------------------------------------------------------------
# ASD optimization: Parallel evaluation
# -------------------------------------------------------------------------------
def evaluator(candidates, args):
import os
global ngen
ngen += 1
total_jobs = 0
# options slurm, mpi
type = args.get('type', 'mpi_direct')
# paths to required scripts
script = args.get('script', 'init.py')
netParamsSavePath = args.get('netParamsSavePath')
genFolderPath = self.saveFolder + '/gen_' + str(ngen)
# mpi command setup
nodes = args.get('nodes', 1)
paramLabels = args.get('paramLabels', [])
coresPerNode = args.get('coresPerNode', 1)
mpiCommand = args.get('mpiCommand', 'ibrun')
numproc = nodes*coresPerNode
# slurm setup
custom = args.get('custom', '')
folder = args.get('folder', '.')
email = args.get('email', 'a@b.c')
walltime = args.get('walltime', '00:01:00')
reservation = args.get('reservation', None)
allocation = args.get('allocation', 'csd403') # NSG account
# fitness function
fitnessFunc = args.get('fitnessFunc')
fitnessFuncArgs = args.get('fitnessFuncArgs')
maxFitness = args.get('maxFitness')
# read params or set defaults
sleepInterval = args.get('sleepInterval', 0.2)
# create folder if it does not exist
createFolder(genFolderPath)
# remember pids and jobids in a list
pids = []
jobids = {}
# create a job for each candidate
for candidate_index, candidate in enumerate(candidates):
# required for slurm
sleep(sleepInterval)
# name and path
jobName = "gen_" + str(ngen) + "_cand_" + str(candidate_index)
jobPath = genFolderPath + '/' + jobName
# set initial cfg initCfg
if len(self.initCfg) > 0:
for paramLabel, paramVal in self.initCfg.items():
self.setCfgNestedParam(paramLabel, paramVal)
# modify cfg instance with candidate values
print(paramLabels, candidate)
for label, value in zip(paramLabels, candidate):
print('set %s=%s' % (label, value))
self.setCfgNestedParam(label, value)
#self.setCfgNestedParam("filename", jobPath)
self.cfg.simLabel = jobName
self.cfg.saveFolder = genFolderPath
# save cfg instance to file
cfgSavePath = jobPath + '_cfg.json'
self.cfg.save(cfgSavePath)
if type=='mpi_bulletin':
# ----------------------------------------------------------------------
# MPI master-slaves
# ----------------------------------------------------------------------
pc.submit(runASDJob, script, cfgSavePath, netParamsSavePath, jobPath)
print('-'*80)
else:
# ----------------------------------------------------------------------
# MPI job commnand
# ----------------------------------------------------------------------
command = '%s -n %d nrniv -python -mpi %s simConfig=%s netParams=%s ' % (mpiCommand, numproc, script, cfgSavePath, netParamsSavePath)
# ----------------------------------------------------------------------
# run on local machine with <nodes*coresPerNode> cores
# ----------------------------------------------------------------------
if type=='mpi_direct':
executer = '/bin/bash'
jobString = bashTemplate('mpi_direct') %(custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through slurm
# ----------------------------------------------------------------------
elif type=='hpc_slurm':
executer = 'sbatch'
res = '#SBATCH --res=%s' % (reservation) if reservation else ''
jobString = bashTemplate('hpc_slurm') % (jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through PBS
# ----------------------------------------------------------------------
elif type=='hpc_torque':
executer = 'qsub'
queueName = args.get('queueName', 'default')
nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode)
jobString = bashTemplate('hpc_torque') % (jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command)
# ----------------------------------------------------------------------
# save job and run
# ----------------------------------------------------------------------
print('Submitting job ', jobName)
print(jobString)
print('-'*80)
# save file
batchfile = '%s.sbatch' % (jobPath)
with open(batchfile, 'w') as text_file:
text_file.write("%s" % jobString)
if type == 'mpi_direct':
with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
else:
with open(jobPath+'.jobid', 'w') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
#proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE)
sleep(0.1)
#read = proc.stdout.read()
if type == 'mpi_direct':
with open('./pids.pid', 'a') as file:
file.write(str(pids))
else:
with open(jobPath+'.jobid', 'r') as outf:
read=outf.readline()
print(read)
if len(read) > 0:
jobid = int(read.split()[-1])
jobids[candidate_index] = jobid
print('jobids', jobids)
total_jobs += 1
sleep(0.1)
# ----------------------------------------------------------------------
# gather data and compute fitness
# ----------------------------------------------------------------------
if type == 'mpi_bulletin':
# wait for pc bulletin board jobs to finish
try:
while pc.working():
sleep(1)
#pc.done()
except:
pass
num_iters = 0
jobs_completed = 0
fitness = [None for cand in candidates]
# print outfilestem
print("Waiting for jobs from generation %d/%d ..." %(ngen, args.get('maxiters')))
# print "PID's: %r" %(pids)
# start fitness calculation
while jobs_completed < total_jobs:
unfinished = [i for i, x in enumerate(fitness) if x is None ]
for candidate_index in unfinished:
try: # load simData and evaluate fitness
jobNamePath = genFolderPath + "/gen_" + str(ngen) + "_cand_" + str(candidate_index)
if os.path.isfile(jobNamePath+'.json'):
with open('%s.json'% (jobNamePath)) as file:
simData = json.load(file)['simData']
fitness[candidate_index] = fitnessFunc(simData, **fitnessFuncArgs)
jobs_completed += 1
print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index]))
except Exception as e:
# print
err = "There was an exception evaluating candidate %d:"%(candidate_index)
print(("%s \n %s"%(err,e)))
#pass
#print 'Error evaluating fitness of candidate %d'%(candidate_index)
num_iters += 1
print('completed: %d' %(jobs_completed))
if num_iters >= args.get('maxiter_wait', 5000):
print("Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % (len(unfinished)))
for canditade_index in unfinished:
fitness[canditade_index] = maxFitness # rerun those that didn't complete;
jobs_completed += 1
try:
if 'scancelUser' in kwargs:
os.system('scancel -u %s'%(kwargs['scancelUser']))
else:
os.system('scancel %d' % (jobids[candidate_index])) # terminate unfinished job (resubmitted jobs not terminated!)
except:
pass
sleep(args.get('time_sleep', 1))
# kill all processes
if type == 'mpi_bulletin':
try:
with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin
pids = [int(i) for i in file.read().split(' ')[:-1]]
with open("./pids.pid", 'w') as file: # delete content
pass
for pid in pids:
try:
os.killpg(os.getpgid(pid), signal.SIGTERM)
except:
pass
except:
pass
elif type == 'mpi_direct':
import psutil
PROCNAME = "nrniv"
for proc in psutil.process_iter():
# check whether the process name matches
try:
if proc.name() == PROCNAME:
proc.kill()
except:
pass
# don't want to to this for hpcs since jobs are running on compute nodes not master
print("-" * 80)
print(" Completed a generation ")
print("-" * 80)
return fitness # single candidate for now
# -------------------------------------------------------------------------------
# ASD optimization: Main code
# -------------------------------------------------------------------------------
import os
# create main sim directory and save scripts
self.saveScripts()
global ngen
ngen = -1
# gather **kwargs
kwargs = {}
''' allowed kwargs:
stepsize 0.1 Initial step size as a fraction of each parameter
sinc 2 Step size learning rate (increase)
sdec 2 Step size learning rate (decrease)
pinc 2 Parameter selection learning rate (increase)
pdec 2 Parameter selection learning rate (decrease)
pinitial None Set initial parameter selection probabilities
sinitial None Set initial step sizes; if empty, calculated from stepsize instead
xmin None Min value allowed for each parameter
xmax None Max value allowed for each parameter
maxiters 1000 Maximum number of iterations (1 iteration = 1 function evaluation)
maxtime 3600 Maximum time allowed, in seconds
abstol 1e-6 Minimum absolute change in objective function
reltol 1e-3 Minimum relative change in objective function
stalliters 10*n Number of iterations over which to calculate TolFun (n = number of parameters)
stoppingfunc None External method that can be used to stop the calculation from the outside.
randseed None The random seed to use
verbose 2 How much information to print during the run
label None A label to use to annotate the output
'''
kwargs['xmin'] = [x['values'][0] for x in self.params]
kwargs['xmax'] = [x['values'][1] for x in self.params]
# 3rd value is list with initial values
if len(self.params[0]['values']) > 2 and isinstance(self.params[0]['values'][2], list):
popsize = len(self.params[0]['values'][2])
x0 = []
for i in range(popsize):
x0.append([x['values'][2][i] for x in self.params])
# if no 3rd value, calculate random values
else:
popsize = self.optimCfg.get('popsize', 1)
x0 = []
for p in range(popsize):
x0.append([np.random.uniform(x['values'][0], x['values'][1]) for x in self.params])
if 'args' not in kwargs: kwargs['args'] = {}
kwargs['args']['cfg'] = self.cfg # include here args/params to pass to evaluator function
kwargs['args']['paramLabels'] = [x['label'] for x in self.params]
kwargs['args']['netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py'
kwargs['args']['maxiters'] = self.optimCfg['maxiters'] if 'maxiters' in self.optimCfg else 1000
kwargs['args']['fitnessFunc'] = self.optimCfg['fitnessFunc']
kwargs['args']['fitnessFuncArgs'] = self.optimCfg['fitnessFuncArgs']
kwargs['args']['maxiter_wait'] = self.optimCfg['maxiter_wait']
kwargs['args']['time_sleep'] = self.optimCfg['time_sleep']
kwargs['args']['popsize'] = popsize
kwargs['args']['maxFitness'] = self.optimCfg.get('maxFitness', 1000)
for key, value in self.optimCfg.items():
kwargs[key] = value
for key, value in self.runCfg.items():
kwargs['args'][key] = value
# if using pc bulletin board, initialize all workers
if self.runCfg.get('type', None) == 'mpi_bulletin':
for iworker in range(int(pc.nhost())):
pc.runworker()
# -------------------------------------------------------------------------------
# Run algorithm
# -------------------------------------------------------------------------------
saveFile = '%s/%s_temp_output.json' % (self.saveFolder, self.batchLabel)
output = asd(evaluator, x0, saveFile, **kwargs)
# print best and finish
bestFval = np.min(output['fval'])
bestX = output['x'][np.argmin(output['fval'])]
print('\nBest Solution with fitness = %.4g: \n' % (bestFval), bestX)
print("-" * 80)
print(" Completed adaptive stochasitc parameter optimization ")
print("-" * 80)
sim.saveJSON('%s/%s_output.json' % (self.saveFolder, self.batchLabel), output)
#sleep(1)
sys.exit()
| |
<<<<<<< HEAD
<<<<<<< HEAD
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name") and isinstance(f.name, str):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
=======
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name") and isinstance(f.name, str):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name") and isinstance(f.name, str):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
import os
import re
import unittest
from parameterized import parameterized
from conans.client import tools
from conans.client.build.msbuild import MSBuild
from conans.test.utils.mocks import MockSettings, MockConanfile
class MSBuildTest(unittest.TestCase):
def test_dont_mess_with_build_type(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
self.assertEqual(msbuild.build_env.flags, [])
template = msbuild._get_props_file_contents()
self.assertNotIn("-Ob0", template)
self.assertNotIn("-Od", template)
msbuild.build_env.flags = ["-Zi"]
template = msbuild._get_props_file_contents()
self.assertNotIn("-Ob0", template)
self.assertNotIn("-Od", template)
self.assertIn("-Zi", template)
self.assertIn("<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>", template)
def test_skip_only_none_definitions(self):
# https://github.com/conan-io/conan/issues/6728
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={"foo": 0, "bar": False})
self.assertIn("<PreprocessorDefinitions>foo=0;bar=False;%(PreprocessorDefinitions)",
template)
def test_without_runtime(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents()
self.assertNotIn("<RuntimeLibrary>", template)
def test_custom_properties(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_file.sln", properties={"MyProp1": "MyValue1",
"MyProp2": "MyValue2"})
self.assertIn('/p:MyProp1="MyValue1"', command)
self.assertIn('/p:MyProp2="MyValue2"', command)
def test_binary_logging_off_explicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln", output_binary_log=False)
self.assertNotIn("/bl", command)
def test_binary_logging_off_implicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln")
self.assertNotIn("/bl", command)
def test_error_targets_argument(self):
conanfile = MockConanfile(MockSettings({}))
msbuild = MSBuild(conanfile)
with self.assertRaises(TypeError):
msbuild.get_command("dummy.sln", targets="sometarget")
@parameterized.expand([("17", "v143"),
("16", "v142"),
("15", "v141"),
("14", "v140"),
("12", "v120"),
("11", "v110"),
("10", "v100"),
("9", "v90"),
("8", "v80")])
def test_default_toolset(self, compiler_version, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": compiler_version,
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
@parameterized.expand([("v143",),
("v142",),
("v141",),
("v140",),
("v120",),
("v110",),
("v100",),
("v90",),
("v80",)])
def test_explicit_toolset(self, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln", toolset=expected_toolset)
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
@parameterized.expand([("16", "v141_xp"),
("15", "v141_xp"),
("14", "v140_xp"),
("12", "v120_xp"),
("11", "v110_xp")])
def test_custom_toolset(self, compiler_version, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": compiler_version,
"compiler.toolset": expected_toolset,
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
def test_definitions(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={'_WIN32_WINNT': "0x0501"})
self.assertIn("<PreprocessorDefinitions>"
"_WIN32_WINNT=0x0501;"
"%(PreprocessorDefinitions)</PreprocessorDefinitions>", template)
def test_definitions_no_value(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={'_DEBUG': None})
self.assertIn("<PreprocessorDefinitions>"
"_DEBUG;"
"%(PreprocessorDefinitions)</PreprocessorDefinitions>", template)
def test_verbosity_default(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln")
self.assertIn('/verbosity:minimal', command)
def test_verbosity_env(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
with tools.environment_append({"CONAN_MSBUILD_VERBOSITY": "detailed"}):
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln")
self.assertIn('/verbosity:detailed', command)
def test_verbosity_explicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln", verbosity="quiet")
self.assertIn('/verbosity:quiet', command)
def test_properties_injection(self):
# https://github.com/conan-io/conan/issues/4471
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln", props_file_path="conan_build.props")
match = re.search('/p:ForceImportBeforeCppTargets="(.+?)"', command)
self.assertTrue(
match, "Haven't been able to find the ForceImportBeforeCppTargets")
props_file_path = match.group(1)
self.assertTrue(os.path.isabs(props_file_path))
self.assertEqual(os.path.basename(props_file_path), "conan_build.props")
def test_windows_ce(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "9",
"os": "WindowsCE",
"os.platform": "YOUR PLATFORM SDK (ARMV4)",
"arch": "armv4"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("test.sln")
self.assertIn('/p:Platform="YOUR PLATFORM SDK (ARMV4)"', command)
def test_intel(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "intel",
"compiler.version": "19.1",
"compiler.base": "Visual Studio",
"compiler.base.version": "15",
"arch": "x86_64"})
expected_toolset = "Intel C++ Compiler 19.1"
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
| |
from __future__ import absolute_import
import sys
import os
import errno
import types
import gc
import signal
import traceback
from gevent.event import AsyncResult
from gevent.hub import get_hub, linkproxy, sleep, getcurrent
from gevent.fileobject import FileObject
from gevent.greenlet import Greenlet, joinall
spawn = Greenlet.spawn
import subprocess as __subprocess__
# Standard functions and classes that this module re-implements in a gevent-aware way.
__implements__ = ['Popen',
'call',
'check_call',
'check_output']
# Standard functions and classes that this module re-imports.
__imports__ = ['PIPE',
'STDOUT',
'CalledProcessError',
# Windows:
'CREATE_NEW_CONSOLE',
'CREATE_NEW_PROCESS_GROUP',
'STD_INPUT_HANDLE',
'STD_OUTPUT_HANDLE',
'STD_ERROR_HANDLE',
'SW_HIDE',
'STARTF_USESTDHANDLES',
'STARTF_USESHOWWINDOW']
__extra__ = ['MAXFD',
'_eintr_retry_call',
'STARTUPINFO',
'pywintypes',
'list2cmdline',
'_subprocess',
# Python 2.5 does not have _subprocess, so we don't use it
'WAIT_OBJECT_0',
'WaitForSingleObject',
'GetExitCodeProcess',
'GetStdHandle',
'CreatePipe',
'DuplicateHandle',
'GetCurrentProcess',
'DUPLICATE_SAME_ACCESS',
'GetModuleFileName',
'GetVersion',
'CreateProcess',
'INFINITE',
'TerminateProcess']
for name in __imports__[:]:
try:
value = getattr(__subprocess__, name)
globals()[name] = value
except AttributeError:
__imports__.remove(name)
__extra__.append(name)
if sys.version_info[:2] <= (2, 6):
__implements__.remove('check_output')
__extra__.append('check_output')
_subprocess = getattr(__subprocess__, '_subprocess', None)
_NONE = object()
for name in __extra__[:]:
if name in globals():
continue
value = _NONE
try:
value = getattr(__subprocess__, name)
except AttributeError:
if _subprocess is not None:
try:
value = getattr(_subprocess, name)
except AttributeError:
pass
if value is _NONE:
__extra__.remove(name)
else:
globals()[name] = value
__all__ = __implements__ + __imports__
mswindows = sys.platform == 'win32'
if mswindows:
import msvcrt
else:
import fcntl
import pickle
from gevent import monkey
fork = monkey.get_original('os', 'fork')
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-1", "/dev/null"])
'/dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c", "echo hello world"], stderr=STDOUT)
'hello world\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output = process.communicate()[0]
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, threadpool=None):
"""Create new Popen instance."""
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
hub = get_hub()
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
if threadpool is None:
threadpool = hub.threadpool
self.threadpool = threadpool
self._waiting = False
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
assert threadpool is None
self._loop = hub.loop
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
self.result = AsyncResult()
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = FileObject(p2cwrite, 'wb')
if c2pread is not None:
if universal_newlines:
self.stdout = FileObject(c2pread, 'rU')
else:
self.stdout = FileObject(c2pread, 'rb')
if errread is not None:
if universal_newlines:
self.stderr = FileObject(errread, 'rU')
else:
self.stderr = FileObject(errread, 'rb')
def __repr__(self):
return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)
def _on_child(self, watcher):
watcher.stop()
status = watcher.rstatus
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
self.returncode = os.WEXITSTATUS(status)
self.result.set(self.returncode)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
if self.stdout:
stdout = spawn(self.stdout.read)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
stderr = spawn(self.stderr.read)
greenlets.append(stderr)
else:
stderr = None
joinall(greenlets)
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
self.wait()
return (None if stdout is None else stdout.value or '',
None if stderr is None else stderr.value or '')
def poll(self):
return self._internal_poll()
def rawlink(self, callback):
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(),
handle, GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format(comspec, args)
if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com":
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
self.result.set(self.returncode)
return self.returncode
def rawlink(self, callback):
if not self.result.ready() and not self._waiting:
self._waiting = True
Greenlet.spawn(self._wait)
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
def _blocking_wait(self):
WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _wait(self):
self.threadpool.spawn(self._blocking_wait).rawlink(self.result)
def wait(self, timeout=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
if not self._waiting:
self._waiting = True
self._wait()
return self.result.wait(timeout=timeout)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _remove_nonblock_flag(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
self._loop.install_sigchld()
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = self.pipe_cloexec()
try:
try:
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = fork()
except:
if gc_was_enabled:
gc.enable()
raise
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
self._remove_nonblock_flag(b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = set([None])
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
finally:
# Make sure that the process exits no matter what.
# The return code does not matter much as it won't be
# reported to the application
os._exit(1)
# Parent
self._watcher = self._loop.child(self.pid)
self._watcher.start(self._on_child, self._watcher)
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
errpipe_read = FileObject(errpipe_read, 'rb')
data = errpipe_read.read()
finally:
if hasattr(errpipe_read, 'close'):
errpipe_read.close()
else:
os.close(errpipe_read)
if data != "":
self.wait()
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
if self.returncode is None:
if get_hub() is not getcurrent():
sig_pending = getattr(self._loop, 'sig_pending', True)
if sig_pending:
sleep(0.00001)
return self.returncode
def wait(self, timeout=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
return self.result.wait(timeout=timeout)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def write_and_close(fobj, data):
try:
if data:
fobj.write(data)
except (OSError, IOError), ex:
if ex.errno != errno.EPIPE and ex.errno != errno.EINVAL:
raise
finally:
try:
fobj.close()
except EnvironmentError:
pass
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
__all__ = ["absolute_difference",
"add_loss",
"cosine_distance",
"compute_weighted_loss",
"get_losses",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy"]
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
def compute_weighted_loss(losses, weights=1.0, scope=None):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.multiply(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of regularization losses as Tensors.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sigmoid_cross_entropy instead. Note that the order "
"of the predictions and labels arguments has been changed.")
def sigmoid_cross_entropy(
logits, multi_class_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.softmax_cross_entropy instead. Note that the order "
"of the logits and labels arguments has been changed.")
def softmax_cross_entropy(
logits, onehot_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] one-hot-encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sparse_softmax_cross_entropy instead. Note that "
"the order of the logits and labels arguments has been changed.")
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.log_loss instead. Note that the order of the "
"predictions and labels arguments has been changed.")
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.hinge_loss instead. Note that the order of the "
"logits and labels arguments has been changed, and to stay "
"unweighted, reduction=Reduction.NONE")
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
Returns:
An unweighted `Tensor` of same shape as `logits` and `labels` representing the
loss values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.mean_pairwise_squared_error instead. Note that the "
"order of the predictions and labels arguments has been changed.")
def mean_pairwise_squared_error(
predictions, labels=None, weights=1.0, scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(
predictions, labels=None, axis=None, weights=1.0, scope=None, dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
axis: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
dim: The old (deprecated) name for `axis`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dim'")
axis = dim
if axis is None and dim is None:
raise ValueError("You must specify 'axis'.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[axis,])
return compute_weighted_loss(losses, weights, scope=scope)
| |
#!/usr/bin/env python
"""Class for querying the Twitter API"""
__author__ = "Peter J Usherwood"
__python_version__ = "3.5"
import json
import os
import tweepy
import time
from datetime import datetime
import math
import io
from tweepy.streaming import StreamListener
from tweepy import Stream
import re
from requests.exceptions import Timeout, ConnectionError
import ssl
from usherwood_ds.data_imports.import_classes.twitter_classes import TwitterTextMention, TwitterUser
from usherwood_ds.data_imports.import_classes.common_classes import TextMention, User
with open(os.path.join(os.path.dirname(__file__), "../api_credentials.json"),'r') as openfile:
api_credentials = json.load(openfile)
class TwitterAPI:
def __init__(self,
api_credentials=api_credentials,
run_time=1200,
save_increment=600,
stream_save_path='raw_tweets.json',
regex_rule='test'):
self.consumer_key = api_credentials["Twitter"]["consumer_key"]
self.consumer_secret = api_credentials["Twitter"]["consumer_secret"]
self.access_token_key = api_credentials["Twitter"]["access_token_key"]
self.access_token_secret = api_credentials["Twitter"]["access_token_secret"]
self.api = None
self.stream_api = None
self.setup_api(run_time=run_time,
save_incrememnt=save_increment,
stream_save_path=stream_save_path,
regex_rule=regex_rule)
def setup_api(self,
run_time,
save_incrememnt,
stream_save_path,
regex_rule):
"""
Setup the API, ran during the init
"""
l = StdOutListener(time_limit=run_time,
save_increment=save_incrememnt,
stream_save_path=stream_save_path,
regex_rule=regex_rule)
auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token_key, self.access_token_secret)
self.api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
self.stream_api = Stream(auth, l)
return True
def fortify_twitter_tweet(self, tweet_id):
"""
Fortifies data from a Tweet ID
:param tweet_id: Twitter Tweet ID
:return: json response of fortified Tweet
"""
tweet = None
try:
tweet = self.api.get_status(tweet_id)
tweet = tweet._json
except Exception as e:
print(e)
return tweet
def fortify_twitter_tweets_batch(self, tweet_ids=None):
"""
Fortifies data for tweets in batch, much more cost efficient than fortify_twitter_tweet
:param tweet_id: Twitter Tweet ID
:return: list of json responses of fortified tweets and users
"""
tweets = []
users = []
for chunk in chunks(tweet_ids, 100):
try:
batch_tweets = self.api.statuses_lookup(chunk)
batch_tweets = [tweet._json for tweet in batch_tweets]
batch_users = [tweet['user'] for tweet in batch_tweets]
tweets += batch_tweets
users += batch_users
except Exception as e:
print(e)
return tweets, users
def fortify_twitter_user(self, username=None, user_id=None):
"""
Fortifies data from a username or user_id
:param username: Twitter username/screen_name
:param user_id: Twitter user_id
:return: json response of fortified user
"""
user = None
try:
if username:
user = self.api.get_user(screen_name=username)
elif user_id:
user = self.api.get_user(user_id=user_id)
user = user._json
except Exception as e:
print(e)
return user
def fortify_twitter_users_batch(self, usernames=None, user_ids=None):
"""
Fortifies data for users in batch, much more cost efficient than fortify_twitter_user
:param username: List containing Twitter usernames/screen_names
:param user_id: List containing Twitter user_id
:return: list of json responses of fortified users
"""
users = []
if usernames:
for chunk in chunks(usernames, 100):
try:
batch_users = self.api.lookup_users(screen_names=chunk)
batch_users = [user._json for user in batch_users]
users += batch_users
except Exception as e:
print(e)
if user_ids:
for chunk in chunks(user_ids, 100):
try:
batch_users = self.api.lookup_users(user_ids=chunk)
batch_users = [user._json for user in batch_users]
users += batch_users
except Exception as e:
print(e)
return users
def get_user_followers(self, username=None, user_id=None, max_number=200):
"""
Retrieves the users following the user specified by a username or user_id
:param username: Twitter username/screen_name
:param user_id: Twitter user_id
:param max_number: The maximum number of Twitter followers to retrieve, 200 can be done in one api call and
thus this makes it a good lower threshold.
:return: list of json responses of fully fortified user objects, one json response for every follower retrieved
"""
followers = []
cursor = -1
valid_run = True
while valid_run:
try:
if username:
batch_followers = self.api.followers(screen_name=username, cursor=cursor, count=200)
elif user_id:
batch_followers = self.api.followers(user_id=user_id, cursor=cursor, count=200)
cursor = batch_followers[1][1]
batch_followers = [follower._json for follower in batch_followers[0]]
followers += batch_followers
if cursor == 0:
valid_run = False
elif len(followers) + 200 > max_number:
valid_run = False
except Exception as e:
print(e)
return followers
def get_user_tweets(self, username=None, user_id=None, max_number=20):
"""
Retrieves the tweets of the user starting with the newest
:param username: Twitter username/screen_name
:param user_id: Twitter user_id
:param max_number: The maximum number of Tweets to retrieve, 20 can be done in one api call and
thus this makes it a good lower threshold.
:return: list of json responses of fully fortified Tweet objects, one json response for every Tweet retrieved
"""
tweets = []
user = []
max_page = int(max_number/20)+1
print(max_page)
try:
if username:
for ix, page in enumerate(tweepy.Cursor(self.api.user_timeline, screen_name=username).pages()):
if ix < max_page:
tweets += page
else:
tweets = [tweet._json for tweet in tweets]
user = tweets[0]['user']
return tweets, user
elif user_id:
for ix, page in enumerate(tweepy.Cursor(self.api.user_timeline, user_id=user_id).pages()):
if ix < max_page:
tweets += page
else:
tweets = [tweet._json for tweet in tweets]
user = tweets[0]['user']
return tweets, user
except Exception as e:
print(e)
tweets = [tweet._json for tweet in tweets]
if tweets:
user = tweets[0]['user']
return tweets, user
def get_user_friends_ids(self, username=None, user_id=None, max_number=5000):
#use this function if you want to limit the number of friends retrieved in one call
COUNTS_PPAGE = 5000
max_page = math.ceil(max_number/COUNTS_PPAGE)
ids = []
try:
if username:
for ix, page in enumerate(tweepy.Cursor(self.api.friends_ids, screen_name=username).pages()):
if ix < max_page:
ids += page
if len(page) != COUNTS_PPAGE:
return ids
else:
return ids
# time.sleep(60)
elif user_id:
# friends = self.api.friends_ids(user_id=user_id,cursor=cursor)
for ix, page in enumerate(tweepy.Cursor(self.api.friends_ids, user_id=user_id).pages()):
if ix < max_page:
ids += page
if len(page) != COUNTS_PPAGE:
return ids
else:
return ids
except (Timeout, ssl.SSLError, ConnectionError) as exc: # ReadTimeoutError,
print('Error, retrying in 15 minutes')
time.sleep(60*15)
self.get_user_friends_ids(self, username=username, user_id=user_id, max_number=max_number)
except tweepy.TweepError as re:
print(re)
print('Passing User')
return []
def get_tweet_replies(self,
username,
tweet_id,
max_replies=100,
max_tries=10000):
"""
Gets all replies on a given tweet
:param username: Str, username of the original tweet author
:param tweet_id: Str, the tweet id to get replies on
:param max_replies: Int, maximum number of replies to try and get
:param max_tries: Int, maximum number of tweets to look through
:return: replies_raw, a list of tweets with user field
"""
replies_raw = []
for reply in tweepy.Cursor(api.api.search, q='to:' + username,
result_type='recent',
timeout=999999).items(max_tries):
if hasattr(reply, 'in_reply_to_status_id_str'):
if (reply.in_reply_to_status_id_str == tweet_id):
replies_raw.append(reply)
if len(replies_raw) == max_replies:
break
replies_raw = [reply._json for reply in replies_raw]
return replies_raw
@staticmethod
def parse_tweet_to_common_mention(tweet):
"""
Creates a common text mention from a tweet from the Twitter API
:param tweet: the Twitter json response
:return: TextMention, used by unified_import
"""
common_mention = TextMention()
common_mention.doc_id = str(tweet['text']) + 'https://twitter.com/'+tweet['user']['screen_name']+\
'/statuses/' + str(tweet['id'])
common_mention.domain = 'twitter.com'
common_mention.source = 'TwitterAPI'
common_mention.url = 'https://twitter.com/'+tweet['user']['screen_name']+'/statuses/' + str(tweet['id'])
common_mention.author_id = 'twitter.com' + str(tweet['user']['screen_name'])
common_mention.dategmt = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S %z %Y')
common_mention.datelocal = None #TODO add
common_mention.datelocalzone = None # TODO add
common_mention.snippet = tweet['text']
common_mention.sentiment = 'Not Found because it does not exist'
common_mention.location = 'Need to Add'
if tweet['geo'] is not None:
common_mention.lat = tweet['geo']['coordinates'][1]
common_mention.long = tweet['geo']['coordinates'][0]
else:
common_mention.lat = None
common_mention.long = None
return common_mention
@staticmethod
def parse_user_to_common_user(user):
"""
Creates a common user from a user object from the Twitter API
:param user: the Twitter json response
:return: User, used by unified_import
"""
common_user = User()
common_user.author_id = 'twitter.com' + str(user['screen_name'])
common_user.domain = 'twitter.com'
common_user.source = 'TwitterAPI'
common_user.author_fullname = user['name']
common_user.author_username = user['screen_name']
common_user.bio = user['description']
common_user.profilepictureurl = user['profile_image_url_https']
return common_user
@staticmethod
def parse_user_to_twitter_user(user):
"""
Creates a Twitter user (a Twitter specific extension of the user class) from a user from the Twitter API
:param user: the Twitter json response
:return: TwitterUser
"""
twitter_user = TwitterUser()
twitter_user.twitter_author_id = str(user['id'])
twitter_user.domain = 'twitter.com'
twitter_user.source = 'TwitterAPI'
twitter_user.author_fullname = user['name']
twitter_user.author_username = user['screen_name']
twitter_user.bio = user['description']
twitter_user.profilepictureurl = user['profile_image_url_https']
twitter_user.followers_count = user['followers_count']
twitter_user.profile_image_full = user['profile_image_url_https'].replace("_normal", "")
twitter_user.verified = user['verified']
twitter_user.number_of_statuses = user['statuses_count']
twitter_user.created_at = datetime.strptime(user['created_at'], '%a %b %d %H:%M:%S %z %Y')
twitter_user.author_id = 'twitter.com' + str(user['id'])
return twitter_user
@staticmethod
def parse_tweet_to_twitter_mention(tweet):
"""
Creates a Twitter text mention (a Twitter specific extension of the mention class) from a tweet from the
Twitter API
:param tweet: the Twitter json response
:return: TwitterTextMention
"""
twitter_mention = TwitterTextMention()
twitter_mention.tweet_id = str(tweet['id'])
twitter_mention.domain = 'twitter.com'
twitter_mention.source = 'TwitterAPI'
twitter_mention.url = 'https://twitter.com/statuses/' + str(tweet['id'])
twitter_mention.author_id = 'twitter.com' + str(tweet['user']['id'])
twitter_mention.dategmt = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S %z %Y')
twitter_mention.datelocal = None #TODO add
twitter_mention.datelocalzone = None # TODO add
twitter_mention.snippet = tweet['text']
twitter_mention.sentiment = 'Not Found'
twitter_mention.location = 'Not added'
twitter_mention.favorite_count = tweet['favorite_count']
twitter_mention.device = tweet['source']
twitter_mention.doc_id = str(tweet['text']) + 'https://twitter.com/statuses/' + str(tweet['id'])
if tweet['geo'] is not None:
twitter_mention.lat = tweet['geo']['coordinates'][1]
twitter_mention.long = tweet['geo']['coordinates'][0]
else:
twitter_mention.lat = None
twitter_mention.long = None
if tweet['entities'] is not None:
if tweet['entities'].get('media'):
if tweet['entities']['media'] is not None:
if tweet['entities']['media'][0].get('media_url'):
twitter_mention.imageURL = tweet['entities']['media'][0]['media_url']
else:
twitter_mention.imageURL=None
# adding retweet information
retweeted_status = None
try:
retweeted_status = tweet['retweeted_status']
except KeyError:
pass
if retweeted_status is not None:
# is retweeted thus setting retweet count to 0, because otherwise it would use the retweet count
# of original tweet (which we don't want)
twitter_mention.retweet_count = 0
twitter_mention.is_retweet = 1
twitter_mention.id_of_reweet = tweet['retweeted_status']['id']
twitter_mention.id_of_original_tweet_author = tweet['retweeted_status']['user']['id']
twitter_mention.screen_name_of_original_tweet_author = tweet['retweeted_status']['user']['screen_name']
else:
twitter_mention.retweet_count = tweet['retweet_count']
twitter_mention.is_retweet = 0
# adding quoting info
quoted_status = None
try:
quoted_status = tweet['quoted_status']
except KeyError:
pass
if quoted_status is not None:
twitter_mention.is_quoting = 1
twitter_mention.id_of_quoted_tweet = tweet['quoted_status']['id']
twitter_mention.id_of_quoted_author = tweet['quoted_status']['user']['id']
twitter_mention.screen_name_of_quoted_author = tweet['quoted_status']['user']['screen_name']
else:
twitter_mention.is_quoting = 0
# adding reply information
twitter_mention.id_of_antecedent_tweet = tweet['in_reply_to_status_id']
twitter_mention.screen_name_of_antecedent_author = tweet['in_reply_to_screen_name']
twitter_mention.id_of_antecedent_author = tweet['in_reply_to_user_id']
if tweet['in_reply_to_status_id'] is None:
twitter_mention.is_response = 0
else:
twitter_mention.is_response = 1
return twitter_mention
class StdOutListener(StreamListener):
def __init__(self,
time_limit=60,
save_increment=600,
stream_save_path='raw_tweets.json',
regex_rule='test'):
self.time = time.time()
self.limit = time_limit
self.tweet_data = []
self.save_tick = 1
self.call_tick = 0
self.save_increment = save_increment
self.path = stream_save_path
self.new_file = False
self.regex_rule = regex_rule
def on_data(self, data):
if not os.path.exists(self.path):
saveFile = io.open(self.path, 'w', encoding='utf-8')
self.new_file=True
saveFile.close()
else:
if self.call_tick == 0:
file = open(self.path, "r+", encoding="utf-8")
file.seek(0, os.SEEK_END)
pos = file.tell() - 1
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
file.close()
saveFile = io.open(self.path, 'a', encoding='utf-8')
while (time.time() - self.time) < self.limit:
try:
regexp = re.compile(self.regex_rule)
if regexp.search(data.lower()):
self.tweet_data.append(data)
if (time.time() - self.time) >= self.save_tick*self.save_increment:
print(str(len(self.tweet_data)), 'new records saved')
if self.save_tick == 1 and self.new_file:
saveFile.write(u'[\n')
saveFile.write(','.join(self.tweet_data))
else:
saveFile.write(','+','.join(self.tweet_data))
self.tweet_data = []
self.save_tick += 1
self.call_tick += 1
return True
except Exception as e:
print('failed ondata,', str(e))
time.sleep(60)
pass
self.save_tick = 0
self.call_tick = 0
saveFile.write(u'\n]')
saveFile.close()
return False
def on_error(self, status):
time.sleep(60)
print(status)
def finalize_tweet_stream(file_path='raw_tweets_closed.json'):
saveFile = io.open(file_path, 'a', encoding='utf-8')
saveFile.write(u'\n]\n')
saveFile.close()
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
| |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from oslo.utils import timeutils
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.tests.unit import fake_network
from nova.tests.unit.integrated.api import client
from nova.tests.unit.integrated import integrated_helpers
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTest, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
# if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_force_delete_parameter = 'forceDelete'
_api_version = 'v3'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = None
_access_ipv6_parameter = None
def _get_access_ips_params(self):
return {}
def _verify_access_ips(self, server):
# NOTE(alexxu): access_ips was demoted as extensions in v3 api.
# So skips verifying access_ips
pass
| |
# -*- coding: utf-8 -*-
import lang.mqllex as mqllex
import yacc.yacc as yacc
from lang.mqllex import tokens
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'MULTIPLY', 'DIVIDE'),
('left', 'POWER'),
('right', 'UMINUS')
)
def p_program(p):
"""program : program command
| command
"""
if len(p) == 2:
if not p[0]: p[0] = []
p[0].append(p[1])
elif len(p) == 3:
p[0] = p[1]
if not p[0]: p[0] = []
if p[2]:
p[0].append(p[2])
def p_program_function(p):
"""program : program function
| function
"""
if len(p) == 2 and p[1]:
p[0] = [['fun']]
p[0][0].append(p[1])
elif len(p) == 3:
p[0] = p[1]
if not p[0]: p[0] = [['fun']]
if p[2]:
p[0].append(['fun', p[2]])
def p_program_error(p):
"""program : error"""
print('Program error: %s' % p[1])
p[0] = None
p.parser.error = 1
def p_command_import(p):
"""command : import STRING"""
p[0] = ('import', p[2])
def p_command_outcsv(p):
"""command : outcsv ID in STRING"""
p[0] = ('outcsv', p[2], p[4])
def p_command_save(p):
"""command : save ID in ID"""
p[0] = ('save', p[2], p[4])
def p_command_save_list(p):
"""command : save LPAREN parlist RPAREN in ID"""
p[0] = ('save', p[3], p[6])
def p_command_search(p):
"""command : search ID with ID as sql"""
p[0] = ('search', p[2], p[4], p[6])
def p_command_foreach(p):
"""command : foreach ID in ID"""
p[0] = ('foreach', p[2], p[4])
def p_command_foreach_enum(p):
"""command : foreach ID COMMA ID in ID"""
p[0] = ('foreach2', p[2], p[4], p[6])
def p_command_end(p):
"""command : end"""
p[0] = ('end', None)
def p_command_return(p):
"""command : return"""
p[0] = ('return', None)
def p_command_return_expr(p):
"""command : return expression"""
p[0] = ('return', p[2])
def p_command_continue(p):
"""command : continue"""
p[0] = ('continue', None)
def p_command_if(p):
"""command : if relexpression then"""
p[0] = ('if', p[2])
def p_command_else(p):
"""command : else"""
p[0] = ('else', None)
def p_command_def_empty(p):
"""command : def ID LPAREN RPAREN"""
p[0] = ('def', p[2], [])
def p_command_def(p):
"""command : def ID LPAREN parlist RPAREN"""
p[0] = ('def', p[2], p[4])
def p_command_connect(p):
"""command : connect ID EQUALS expression"""
p[0] = ('connect', p[2], p[4])
def p_command_let(p):
"""command : let ID EQUALS expression"""
p[0] = ('let', p[2], p[4])
def p_command_let_command_error(p):
"""command : let error"""
p[0] = 'Invalid let command.'
def p_command_list(p):
"""command : list ID EQUALS LPAREN parlist RPAREN"""
p[0] = ('list', p[2], p[5])
def p_sql(p):
"""sql : SELECT
| INSERT
| UPDATE"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = None
##########################################
#### Arithmetic expressions
##########################################
def p_expression_variable(p):
"""expression : variable"""
p[0] = ('var', p[1])
def p_expression_dbvariable(p):
"""expression : dbvariable"""
p[0] = ('dbvar', p[1])
def p_expression_float(p):
"""expression : FLOAT"""
p[0] = ('float', p[1])
def p_expression_integer(p):
"""expression : INTEGER"""
p[0] = ('int', p[1])
def p_expression_string(p):
"""expression : STRING"""
p[0] = ('str', p[1])
def p_expression_function(p):
"""expression : function"""
p[0] = ('fun', p[1])
def p_expression_slice(p):
"""expression : slice"""
p[0] = ('slice', p[1])
def p_expression_unary(p):
"""expression : MINUS expression %prec UMINUS"""
p[0] = ('unary', '-', p[2])
def p_expression_group(p):
"""expression : LPAREN expression RPAREN"""
p[0] = ('group', p[2])
def p_expression_binop(p):
"""expression : expression PLUS expression
| expression MINUS expression
| expression MULTIPLY expression
| expression DIVIDE expression
| expression POWER expression
"""
p[0] = ('binop', p[2], p[1], p[3])
def p_relexpression(p):
"""relexpression : expression LT expression
| expression LE expression
| expression GT expression
| expression GE expression
| expression EQUALS expression
| expression NE expression
| expression IN expression
| expression RE expression
"""
p[0] = ('relop', p[2], p[1], p[3])
##########################################
#### Functions
##########################################
def p_function(p):
"""function : ID LPAREN parlist RPAREN
| ID LPAREN RPAREN"""
if len(p) == 4:
p[0] = (p[1], '')
else:
p[0] = (p[1], p[3])
##########################################
#### Slice
##########################################
def p_slice(p):
"""slice : ID LQPAREN expression COLON expression RQPAREN
| ID LQPAREN COLON expression RQPAREN
| ID LQPAREN expression COLON RQPAREN
| ID LQPAREN expression RQPAREN"""
if len(p) == 5:
p[0] = (p[1], p[3])
elif len(p) == 6:
p[0] = (p[1], p[3], p[4])
elif len(p) == 7:
p[0] = (p[1], p[3], p[4], p[5])
##########################################
#### Variables
##########################################
def p_variable(p):
"""variable : ID"""
if len(p) == 2:
p[0] = (p[1], None, None)
elif len(p) == 5:
p[0] = (p[1], p[3], None)
else:
p[0] = (p[1], p[3], p[5])
def p_dbvariable(p):
"""dbvariable : DBID"""
if len(p) == 2:
p[0] = (p[1], None, None)
elif len(p) == 5:
p[0] = (p[1], p[3], None)
else:
p[0] = (p[1], p[3], p[5])
def p_parlist(p):
"""parlist : parlist COMMA expression
| expression"""
if len(p) > 2:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
# Error rule for syntax errors
def p_error(p):
if hasattr(p, 'lexer'):
numline = len(p.lexer.lexdata[0:p.lexer.lexpos].split('\n'))
print('Syntax error in input! Line : %s' % numline)
print('>>> %s' % p.lexer.lexdata.split('\n')[numline - 1])
def parse(data, debug=False, tracking=False):
mqlparser.error = 0
p = mqlparser.parse(data, debug=debug, tracking=tracking)
if mqlparser.error: return None
return p
mqlparser = yacc.yacc()
| |
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
from decimal import Decimal
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_log import log as logging
from oslo_concurrency import lockutils
from designate import backend
from designate import exceptions
from designate import objects
from designate import utils
from designate.central import rpcapi as central_api
from designate.mdns import rpcapi as mdns_api
from designate import service
from designate.context import DesignateContext
from designate.i18n import _LE
from designate.i18n import _LI
from designate.i18n import _LW
from designate.pool_manager import cache
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUCCESS_STATUS = 'SUCCESS'
ERROR_STATUS = 'ERROR'
NO_DOMAIN_STATUS = 'NO_DOMAIN'
CREATE_ACTION = 'CREATE'
DELETE_ACTION = 'DELETE'
UPDATE_ACTION = 'UPDATE'
MAXIMUM_THRESHOLD = 100
@contextmanager
def wrap_backend_call():
"""
Wraps backend calls, ensuring any exception raised is a Backend exception.
"""
try:
yield
except exceptions.Backend:
raise
except Exception as e:
raise exceptions.Backend('Unknown backend failure: %r' % e)
class Service(service.RPCService, service.Service):
"""
Service side of the Pool Manager RPC API.
API version history:
1.0 - Initial version
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, threads=None):
super(Service, self).__init__(threads=threads)
# Build the Pool (and related) Object from Config
self.pool = objects.Pool.from_config(
CONF, CONF['service:pool_manager'].pool_id)
# Get a pool manager cache connection.
self.cache = cache.get_pool_manager_cache(
CONF['service:pool_manager'].cache_driver)
# Store some settings for quick access later
self.threshold = CONF['service:pool_manager'].threshold_percentage
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
self.delay = CONF['service:pool_manager'].poll_delay
# Create the necessary Backend instances for each target
self._setup_target_backends()
def _setup_target_backends(self):
self.target_backends = {}
for target in self.pool.targets:
# Fetch an instance of the Backend class, passing in the options
# and masters
self.target_backends[target.id] = backend.get_backend(
target.type, target)
LOG.info(_LI('%d targets setup'), len(self.pool.targets))
if not self.target_backends:
raise exceptions.NoPoolTargetsConfigured()
@property
def service_name(self):
return 'pool_manager'
@property
def _rpc_topic(self):
# Modify the default topic so it's pool manager instance specific.
topic = super(Service, self)._rpc_topic
topic = '%s.%s' % (topic, CONF['service:pool_manager'].pool_id)
LOG.info(_LI('Using topic %(topic)s for this pool manager instance.')
% {'topic': topic})
return topic
def start(self):
for target in self.pool.targets:
self.target_backends[target.id].start()
super(Service, self).start()
if CONF['service:pool_manager'].enable_recovery_timer:
LOG.info(_LI('Starting periodic recovery timer'))
self.tg.add_timer(
CONF['service:pool_manager'].periodic_recovery_interval,
self.periodic_recovery,
CONF['service:pool_manager'].periodic_recovery_interval)
if CONF['service:pool_manager'].enable_sync_timer:
LOG.info(_LI('Starting periodic synchronization timer'))
self.tg.add_timer(
CONF['service:pool_manager'].periodic_sync_interval,
self.periodic_sync,
CONF['service:pool_manager'].periodic_sync_interval)
def stop(self):
for target in self.pool.targets:
self.target_backends[target.id].stop()
super(Service, self).stop()
@property
def central_api(self):
return central_api.CentralAPI.get_instance()
@property
def mdns_api(self):
return mdns_api.MdnsAPI.get_instance()
# Periodioc Tasks
def periodic_recovery(self):
"""
:return: None
"""
# TODO(kiall): Replace this inter-process-lock with a distributed
# lock, likely using the tooz library - see bug 1445127.
with lockutils.lock('periodic_recovery', external=True, delay=30):
context = DesignateContext.get_admin_context(all_tenants=True)
LOG.debug("Starting Periodic Recovery")
try:
# Handle Deletion Failures
domains = self._get_failed_domains(context, DELETE_ACTION)
for domain in domains:
self.delete_domain(context, domain)
# Handle Creation Failures
domains = self._get_failed_domains(context, CREATE_ACTION)
for domain in domains:
self.create_domain(context, domain)
# Handle Update Failures
domains = self._get_failed_domains(context, UPDATE_ACTION)
for domain in domains:
self.update_domain(context, domain)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'recovery occurred'))
def periodic_sync(self):
"""
:return: None
"""
# TODO(kiall): Replace this inter-process-lock with a distributed
# lock, likely using the tooz library - see bug 1445127.
with lockutils.lock('periodic_sync', external=True, delay=30):
context = DesignateContext.get_admin_context(all_tenants=True)
LOG.debug("Starting Periodic Synchronization")
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'status': '!%s' % ERROR_STATUS
}
periodic_sync_seconds = \
CONF['service:pool_manager'].periodic_sync_seconds
if periodic_sync_seconds is not None:
# Generate the current serial, will provide a UTC Unix TS.
current = utils.increment_serial()
criterion['serial'] = ">%s" % (current - periodic_sync_seconds)
domains = self.central_api.find_domains(context, criterion)
try:
for domain in domains:
# TODO(kiall): If the domain was created within the last
# periodic_sync_seconds, attempt to recreate
# to fill in targets which may have failed.
self.update_domain(context, domain)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'synchronization occurred.'))
# Standard Create/Update/Delete Methods
def create_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be created
:return: None
"""
LOG.info(_LI("Creating new domain %s"), domain.name)
results = []
# Create the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._create_domain_on_target(context, target, domain))
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for creating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
else:
LOG.warn(_LW('Consensus not reached for creating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
# Send a NOTIFY to each nameserver
for nameserver in self.pool.nameservers:
create_status = self._build_status_object(
nameserver, domain, CREATE_ACTION)
self.cache.store(context, create_status)
self._update_domain_on_nameserver(context, nameserver, domain)
def _create_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to create Domain on
:param domain: Domain to be created
:return: True/False
"""
LOG.debug("Creating domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.create_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to create domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def update_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be updated
:return: None
"""
LOG.info(_LI("Updating domain %s"), domain.name)
results = []
# Update the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._update_domain_on_target(context, target, domain))
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for updating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
else:
LOG.warn(_LW('Consensus not reached for updating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
# Send a NOTIFY to each nameserver
for nameserver in self.pool.nameservers:
# See if there is already another update in progress
try:
update_status = self.cache.retrieve(
context, nameserver.id, domain.id, UPDATE_ACTION)
except exceptions.PoolManagerStatusNotFound:
update_status = self._build_status_object(
nameserver, domain, UPDATE_ACTION)
self.cache.store(context, update_status)
self._update_domain_on_nameserver(context, nameserver, domain)
def _update_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to update Domain on
:param domain: Domain to be updated
:return: True/False
"""
LOG.debug("Updating domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.update_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to update domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def _update_domain_on_also_notify(self, context, also_notify, domain):
LOG.info(_LI('Updating domain %(domain)s on also_notify %(server)s.') %
{'domain': domain.name,
'server': self._get_destination(also_notify)})
self.mdns_api.notify_zone_changed(
context, domain, also_notify, self.timeout, self.retry_interval,
self.max_retries, 0)
def _update_domain_on_nameserver(self, context, nameserver, domain):
LOG.info(_LI('Updating domain %(domain)s on nameserver %(server)s.') %
{'domain': domain.name,
'server': self._get_destination(nameserver)})
self.mdns_api.notify_zone_changed(
context, domain, nameserver, self.timeout, self.retry_interval,
self.max_retries, 0)
self.mdns_api.poll_for_serial_number(
context, domain, nameserver, self.timeout, self.retry_interval,
self.max_retries, self.delay)
def delete_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be deleted
:return: None
"""
LOG.info(_LI("Deleting domain %s"), domain.name)
results = []
# Delete the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._delete_domain_on_target(context, target, domain))
# TODO(kiall): We should monitor that the Domain is actually deleted
# correctly on each of the nameservers, rather than
# assuming a sucessful delete-on-target is OK as we have
# in the past.
if self._exceed_or_meet_threshold(
results.count(True), MAXIMUM_THRESHOLD):
LOG.debug('Consensus reached for deleting domain %(domain)s '
'on pool targets' % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, domain.serial)
else:
LOG.warn(_LW('Consensus not reached for deleting domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
def _delete_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to delete Domain from
:param domain: Domain to be deleted
:return: True/False
"""
LOG.debug("Deleting domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.delete_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to delete domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def update_status(self, context, domain, nameserver, status,
actual_serial):
"""
update_status is called by mdns for creates and updates.
deletes are handled by the backend entirely and status is determined
at the time of delete itself.
:param context: Security context information.
:param domain: The designate domain object.
:param nameserver: The nameserver for which a status update is being
sent.
:param status: The status, 'SUCCESS' or 'ERROR'.
:param actual_serial: The actual serial number received from the name
server for the domain.
:return: None
"""
LOG.debug("Calling update_status for %s : %s : %s : %s" %
(domain.name, domain.action, status, actual_serial))
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
with lockutils.lock('update-status-%s' % domain.id):
try:
current_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
except exceptions.PoolManagerStatusNotFound:
current_status = self._build_status_object(
nameserver, domain, action)
self.cache.store(context, current_status)
cache_serial = current_status.serial_number
LOG.debug('For domain %s : %s on nameserver %s the cache serial '
'is %s and the actual serial is %s.' %
(domain.name, action,
self._get_destination(nameserver),
cache_serial, actual_serial))
if actual_serial and cache_serial <= actual_serial:
current_status.status = status
current_status.serial_number = actual_serial
self.cache.store(context, current_status)
consensus_serial = self._get_consensus_serial(context, domain)
# If there is a valid consensus serial we can still send a success
# for that serial.
# If there is a higher error serial we can also send an error for
# the error serial.
if consensus_serial != 0 and cache_serial <= consensus_serial \
and domain.status != 'ACTIVE':
LOG.info(_LI('For domain %(domain)s '
'the consensus serial is %(consensus_serial)s.') %
{'domain': domain.name,
'consensus_serial': consensus_serial})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, consensus_serial)
if status == ERROR_STATUS:
error_serial = self._get_error_serial(
context, domain, consensus_serial)
if error_serial > consensus_serial or error_serial == 0:
LOG.warn(_LW('For domain %(domain)s '
'the error serial is %(error_serial)s.') %
{'domain': domain.name,
'error_serial': error_serial})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, error_serial)
if consensus_serial == domain.serial and self._is_consensus(
context, domain, action, SUCCESS_STATUS,
MAXIMUM_THRESHOLD):
self._clear_cache(context, domain, action)
# Utility Methods
def _get_failed_domains(self, context, action):
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'action': action,
'status': 'ERROR'
}
return self.central_api.find_domains(context, criterion)
@staticmethod
def _get_destination(nameserver):
return '%s:%s' % (nameserver.host, nameserver.port)
@staticmethod
def _percentage(count, total_count):
return (Decimal(count) / Decimal(total_count)) * Decimal(100)
def _exceed_or_meet_threshold(self, count, threshold=None):
threshold = threshold or self.threshold
return self._percentage(
count, len(self.pool.targets)) >= Decimal(threshold)
@staticmethod
def _get_sorted_serials(pool_manager_statuses, descending=False):
serials = []
for pool_manager_status in pool_manager_statuses:
serials.append(pool_manager_status.serial_number)
serials.sort(reverse=descending)
return serials
def _get_serials_ascending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses)
def _get_serials_descending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses, descending=True)
def _is_consensus(self, context, domain, action, status, threshold=None):
status_count = 0
pool_manager_statuses = self._retrieve_statuses(
context, domain, action)
for pool_manager_status in pool_manager_statuses:
if pool_manager_status.status == status:
status_count += 1
if threshold is None:
threshold = self.threshold
return self._exceed_or_meet_threshold(status_count, threshold)
def _get_consensus_serial(self, context, domain):
consensus_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
pm_statuses = self._retrieve_statuses(context, domain, action)
for serial in self._get_serials_descending(pm_statuses):
serial_count = 0
for pm_status in pm_statuses:
if pm_status.serial_number >= serial:
serial_count += 1
if self._exceed_or_meet_threshold(serial_count, self.threshold):
consensus_serial = serial
break
return consensus_serial
def _get_error_serial(self, context, domain, consensus_serial):
error_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
if self._is_consensus(context, domain, action, ERROR_STATUS):
pm_statuses = self._retrieve_statuses(context, domain, action)
for serial in self._get_serials_ascending(pm_statuses):
if serial > consensus_serial:
error_serial = serial
break
return error_serial
# When we hear back from the nameserver, the serial_number is set to the
# value the nameserver
@staticmethod
def _build_status_object(nameserver, domain, action):
values = {
'nameserver_id': nameserver.id,
'domain_id': domain.id,
'status': None,
'serial_number': 0,
'action': action
}
return objects.PoolManagerStatus(**values)
# Methods for manipulating the cache.
def _clear_cache(self, context, domain, action=None):
LOG.debug('Clearing cache for domain %s with action %s.' %
(domain.name, action))
pool_manager_statuses = []
if action:
actions = [action]
else:
actions = [CREATE_ACTION, UPDATE_ACTION, DELETE_ACTION]
for nameserver in self.pool.nameservers:
for action in actions:
pool_manager_status = self._build_status_object(
nameserver, domain, action)
pool_manager_statuses.append(pool_manager_status)
for pool_manager_status in pool_manager_statuses:
# Ignore any not found errors while clearing the cache
try:
self.cache.clear(context, pool_manager_status)
except exceptions.PoolManagerStatusNotFound:
pass
def _retrieve_from_mdns(self, context, nameserver, domain, action):
try:
(status, actual_serial, retries) = \
self.mdns_api.get_serial_number(
context, domain, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
except messaging.MessagingException as msg_ex:
LOG.debug('Could not retrieve status and serial for domain %s on '
'nameserver %s with action %s (%s: %s)' %
(domain.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex)))
return None
pool_manager_status = self._build_status_object(
nameserver, domain, action)
if status == NO_DOMAIN_STATUS:
if action == CREATE_ACTION:
pool_manager_status.status = 'ERROR'
elif action == DELETE_ACTION:
pool_manager_status.status = 'SUCCESS'
# TODO(Ron): Handle this case properly.
elif action == UPDATE_ACTION:
pool_manager_status.status = 'ERROR'
else:
pool_manager_status.status = status
pool_manager_status.serial_number = actual_serial \
if actual_serial is not None else 0
LOG.debug('Retrieved status %s and serial %s for domain %s '
'on nameserver %s with action %s from mdns.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name, self._get_destination(nameserver), action))
self.cache.store(context, pool_manager_status)
return pool_manager_status
def _retrieve_statuses(self, context, domain, action):
pool_manager_statuses = []
for nameserver in self.pool.nameservers:
try:
pool_manager_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
LOG.debug('Cache hit! Retrieved status %s and serial %s '
'for domain %s on nameserver %s with action %s from '
'the cache.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name,
self._get_destination(nameserver), action))
except exceptions.PoolManagerStatusNotFound:
LOG.debug('Cache miss! Did not retrieve status and serial '
'for domain %s on nameserver %s with action %s from '
'the cache. Getting it from the server.' %
(domain.name,
self._get_destination(nameserver),
action))
pool_manager_status = self._retrieve_from_mdns(
context, nameserver, domain, action)
if pool_manager_status is not None:
pool_manager_statuses.append(pool_manager_status)
return pool_manager_statuses
| |
import logging
import os
import warnings
from great_expectations.datasource.batch_kwargs_generator.batch_kwargs_generator import (
BatchKwargsGenerator,
)
from great_expectations.datasource.types import PathBatchKwargs
from great_expectations.exceptions import BatchKwargsError
logger = logging.getLogger(__name__)
KNOWN_EXTENSIONS = [
".csv",
".tsv",
".parquet",
".xls",
".xlsx",
".json",
".csv.gz",
".tsv.gz",
".feather",
".pkl",
]
class SubdirReaderBatchKwargsGenerator(BatchKwargsGenerator):
"""The SubdirReaderBatchKwargsGenerator inspects a filesystem and produces path-based batch_kwargs.
SubdirReaderBatchKwargsGenerator recognizes data assets using two criteria:
- for files directly in 'base_directory' with recognized extensions (.csv, .tsv, .parquet, .xls, .xlsx, .json
.csv.gz, tsv.gz, .feather, .pkl), it uses the name of the file without the extension
- for other files or directories in 'base_directory', is uses the file or directory name
SubdirReaderBatchKwargsGenerator sees all files inside a directory of base_directory as batches of one datasource.
SubdirReaderBatchKwargsGenerator can also include configured reader_options which will be added to batch_kwargs generated
by this generator.
"""
_default_reader_options = {}
recognized_batch_parameters = {"data_asset_name", "partition_id"}
def __init__(
self,
name="default",
datasource=None,
base_directory="/data",
reader_options=None,
known_extensions=None,
reader_method=None,
):
super().__init__(name, datasource=datasource)
if reader_options is None:
reader_options = self._default_reader_options
if known_extensions is None:
known_extensions = KNOWN_EXTENSIONS
self._known_extensions = known_extensions
self._reader_options = reader_options
self._reader_method = reader_method
self._base_directory = base_directory
@property
def reader_options(self):
return self._reader_options
@property
def known_extensions(self):
return self._known_extensions
@property
def reader_method(self):
return self._reader_method
@property
def base_directory(self):
# If base directory is a relative path, interpret it as relative to the data context's
# context root directory (parent directory of great_expectation dir)
if os.path.isabs(self._base_directory) or self._datasource.data_context is None:
return self._base_directory
else:
return os.path.join(
self._datasource.data_context.root_directory, self._base_directory
)
def get_available_data_asset_names(self):
if not os.path.isdir(self.base_directory):
return {"names": [], "is_complete_list": True}
known_assets = self._get_valid_file_options(base_directory=self.base_directory)
return {"names": known_assets, "is_complete_list": True}
# TODO: deprecate generator_asset argument
def get_available_partition_ids(self, generator_asset=None, data_asset_name=None):
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
# If the generator asset names a single known *file*, return ONLY that
for extension in self.known_extensions:
if os.path.isfile(
os.path.join(self.base_directory, data_asset_name + extension)
):
return [data_asset_name]
if os.path.isfile(os.path.join(self.base_directory, data_asset_name)):
return [data_asset_name]
# Otherwise, subdir files are partition ids
return [
path
for (path, type) in self._get_valid_file_options(
base_directory=os.path.join(self.base_directory, data_asset_name)
)
]
def _build_batch_kwargs(self, batch_parameters):
"""
Args:
batch_parameters:
Returns:
batch_kwargs
"""
try:
data_asset_name = batch_parameters.pop("data_asset_name")
except KeyError:
raise BatchKwargsError(
"Unable to build BatchKwargs: no name provided in batch_parameters.",
batch_kwargs=batch_parameters,
)
if "partition_id" in batch_parameters:
partition_id = batch_parameters.pop("partition_id")
# Find the path
path = None
for extension in self.known_extensions:
if os.path.isfile(
os.path.join(
self.base_directory, data_asset_name, partition_id + extension
)
):
path = os.path.join(
self.base_directory, data_asset_name, partition_id + extension
)
if path is None:
logger.warning(
"Unable to find path with the provided partition; searching for asset-name partitions."
)
# Fall through to this case in the event that there is not a subdir available, or if partition_id was
# not provided
if os.path.isfile(os.path.join(self.base_directory, data_asset_name)):
path = os.path.join(self.base_directory, data_asset_name)
for extension in self.known_extensions:
if os.path.isfile(
os.path.join(self.base_directory, data_asset_name + extension)
):
path = os.path.join(
self.base_directory, data_asset_name + extension
)
if path is None:
raise BatchKwargsError(
f"Unable to build batch kwargs from for asset '{data_asset_name}'",
batch_parameters,
)
return self._build_batch_kwargs_from_path(path, **batch_parameters)
else:
return self.yield_batch_kwargs(
data_asset_name=data_asset_name, **batch_parameters
)
def _get_valid_file_options(self, base_directory=None):
valid_options = []
if base_directory is None:
base_directory = self.base_directory
file_options = os.listdir(base_directory)
for file_option in file_options:
for extension in self.known_extensions:
if (
file_option.endswith(extension)
and not file_option.startswith(".")
and (file_option[: -len(extension)], "file") not in valid_options
):
valid_options.append((file_option[: -len(extension)], "file"))
elif os.path.isdir(os.path.join(self.base_directory, file_option)):
# Make sure there's at least one valid file inside the subdir
subdir_options = self._get_valid_file_options(
base_directory=os.path.join(base_directory, file_option)
)
if (
len(subdir_options) > 0
and (file_option, "directory") not in valid_options
):
valid_options.append((file_option, "directory"))
return valid_options
def _get_iterator(self, data_asset_name, reader_options=None, limit=None):
logger.debug(
"Beginning SubdirReaderBatchKwargsGenerator _get_iterator for data_asset_name: %s"
% data_asset_name
)
# If the data asset is a file, then return the path.
# Otherwise, use files in a subdir as batches
if os.path.isdir(os.path.join(self.base_directory, data_asset_name)):
subdir_options = os.listdir(
os.path.join(self.base_directory, data_asset_name)
)
batches = []
for file_option in subdir_options:
for extension in self.known_extensions:
if file_option.endswith(extension) and not file_option.startswith(
"."
):
batches.append(
os.path.join(
self.base_directory, data_asset_name, file_option
)
)
return self._build_batch_kwargs_path_iter(
batches, reader_options=reader_options, limit=limit
)
else:
for extension in self.known_extensions:
path = os.path.join(self.base_directory, data_asset_name + extension)
if os.path.isfile(path):
return iter(
[
self._build_batch_kwargs_from_path(
path, reader_options=reader_options, limit=limit
)
]
)
# If we haven't returned yet, raise
raise BatchKwargsError(
"No valid files found when searching {:s} using configured known_extensions: "
"{:s} ".format(
os.path.join(self.base_directory, data_asset_name),
", ".join(map(str, self.known_extensions)),
),
batch_kwargs=PathBatchKwargs(
path=os.path.join(self.base_directory, data_asset_name)
),
)
def _build_batch_kwargs_path_iter(self, path_list, reader_options=None, limit=None):
for path in path_list:
yield self._build_batch_kwargs_from_path(
path, reader_options=reader_options, limit=limit
)
def _build_batch_kwargs_from_path(
self, path, reader_method=None, reader_options=None, limit=None
):
batch_kwargs = self._datasource.process_batch_parameters(
reader_method=reader_method or self.reader_method,
reader_options=reader_options or self.reader_options,
limit=limit,
)
batch_kwargs["path"] = path
batch_kwargs["datasource"] = self._datasource.name
return PathBatchKwargs(batch_kwargs)
| |
from __future__ import division, print_function
import json
import numpy as np
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import Adam
from keras.preprocessing import image
# In case we are going to use the TensorFlow backend we need to explicitly set the Theano image ordering
from keras import backend as K
K.set_image_dim_ordering('th')
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3, 1, 1))
def vgg_preprocess(x):
"""
Subtracts the mean RGB value, and transposes RGB to BGR.
The mean RGB was computed on the image set used to train the VGG model.
Args:
x: Image array (height x width x channels)
Returns:
Image array (height x width x transposed_channels)
"""
x = x - vgg_mean
return x[:, ::-1] # reverse axis rgb->bgr
class Vgg16():
"""
The VGG 16 Imagenet model
"""
def __init__(self, file_path):
self.FILE_PATH = file_path
self.create()
self.get_classes()
def get_classes(self):
"""
Downloads the Imagenet classes index file and loads it to self.classes.
The file is downloaded only if it not already in the cache.
"""
fname = 'imagenet_class_index.json'
fpath = get_file(fname, self.FILE_PATH + fname, cache_subdir='models')
with open(fpath) as f:
class_dict = json.load(f)
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def predict(self, imgs, details=False):
"""
Predict the labels of a set of images using the VGG16 model.
Args:
imgs (ndarray) : An array of N images (size: N x width x height x channels).
details : ??
Returns:
preds (np.array) : Highest confidence value of the predictions for each image.
idxs (np.ndarray): Class index of the predictions with the max confidence.
classes (list) : Class labels of the predictions with the max confidence.
"""
# predict probability of each class for each image
all_preds = self.model.predict(imgs)
# for each image get the index of the class with max probability
idxs = np.argmax(all_preds, axis=1)
# get the values of the highest probability for each image
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
# get the label of the class with the highest probability for each image
classes = [self.classes[idx] for idx in idxs]
return np.array(preds), idxs, classes
def ConvBlock(self, layers, filters):
"""
Adds a specified number of ZeroPadding and Covolution layers
to the model, and a MaxPooling layer at the very end.
Args:
layers (int): The number of zero padded convolution layers
to be added to the model.
filters (int): The number of convolution filters to be
created for each layer.
"""
model = self.model
for i in range(layers):
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def FCBlock(self):
"""
Adds a fully connected layer of 4096 neurons to the model with a
Dropout of 0.5
Args: None
Returns: None
"""
model = self.model
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def create(self):
"""
Creates the VGG16 network achiticture and loads the pre-trained weights.
Args: None
Returns: None
"""
model = self.model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3, 224, 224), output_shape=(3, 224, 224)))
self.ConvBlock(2, 64)
self.ConvBlock(2, 128)
self.ConvBlock(3, 256)
self.ConvBlock(3, 512)
self.ConvBlock(3, 512)
model.add(Flatten())
self.FCBlock()
self.FCBlock()
model.add(Dense(1000, activation='softmax'))
fname = 'vgg16.h5'
model.load_weights(self.FILE_PATH + fname)
def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):
"""
Takes the path to a directory, and generates batches of augmented/normalized data. Yields batches indefinitely, in an infinite loop.
See Keras documentation: https://keras.io/preprocessing/image/
"""
return gen.flow_from_directory(path, target_size=(224, 224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def ft(self, num):
"""
Replace the last layer of the model with a Dense (fully connected) layer of num neurons.
Will also lock the weights of all layers except the new layer so that we only learn
weights for the last layer in subsequent training.
Args:
num (int) : Number of neurons in the Dense layer
Returns:
None
"""
model = self.model
model.pop()
for layer in model.layers: layer.trainable = False
model.add(Dense(num, activation='softmax'))
self.compile()
def fine_tune(self, batches):
"""
Modifies the original VGG16 network architecture and updates self.classes for new training data.
Args:
batches : A keras.preprocessing.image.ImageDataGenerator object.
See definition for get_batches().
"""
self.ft(batches.num_class)
classes = list(iter(batches.class_indices)) # get a list of all the class labels
# batches.class_indices is a dict with the class name as key and an index as value
# eg. {'cats': 0, 'dogs': 1}
# sort the class labels by index according to batches.class_indices and update model.classes
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
self.classes = classes
def compile(self, lr=0.001):
"""
Configures the model for training.
See Keras documentation: https://keras.io/models/model/
"""
self.model.compile(optimizer=Adam(lr=lr),
loss='categorical_crossentropy', metrics=['accuracy'])
def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):
"""
Trains the model for a fixed number of epochs (iterations on a dataset).
See Keras documentation: https://keras.io/models/model/
"""
self.model.fit(trn, labels, nb_epoch=nb_epoch,
validation_data=(val, val_labels), batch_size=batch_size)
def fit(self, batches, val_batches, nb_epoch=1):
"""
Fits the model on data yielded batch-by-batch by a Python generator.
See Keras documentation: https://keras.io/models/model/
"""
self.model.fit_generator(batches, int(batches.samples / batches.batch_size), epochs=nb_epoch,
validation_data=val_batches,
validation_steps=int(val_batches.samples / val_batches.batch_size))
def test(self, path, batch_size=8):
"""
Predicts the classes using the trained model on data yielded batch-by-batch.
Args:
path (string): Path to the target directory. It should contain one subdirectory
per class.
batch_size (int): The number of images to be considered in each batch.
Returns:
test_batches, numpy array(s) of predictions for the test_batches.
"""
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
return test_batches, self.model.predict_generator(test_batches, test_batches.samples)
| |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.preprocessors.rewards."""
from typing import Sequence, Text, Union
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.decorators import overrides
from dm_robotics.agentflow.preprocessors import rewards
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
import numpy as np
def random_scalar_spec(name):
return specs.Array(
shape=(), dtype=np.random.choice([np.float32, np.float64]), name=name)
def create_timestep_spec(observation_spec):
return spec_utils.TimeStepSpec(
observation_spec,
reward_spec=random_scalar_spec('reward'),
discount_spec=random_scalar_spec('discount'))
def create_timestep(
input_spec: spec_utils.TimeStepSpec, name: Text,
value: Sequence[float]) -> timestep_preprocessor.PreprocessorTimestep:
dtype = input_spec.observation_spec[name].dtype
observation = testing_functions.valid_value(input_spec.observation_spec)
observation[name] = np.asarray(value, dtype=dtype)
timestep = testing_functions.random_timestep(
spec=input_spec, observation=observation)
return timestep_preprocessor.PreprocessorTimestep.from_environment_timestep(
timestep, pterm=0.0)
class RewardsTest(absltest.TestCase):
def test_l2_spec_updated_properly(self):
observation_spec = {
'obs0': testing_functions.random_array_spec(),
'obs1': testing_functions.random_array_spec()
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.0, reward_offset=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
# Assert discount specs are unchanged.
self.assertEqual(input_spec.discount_spec,
output_spec.discount_spec)
# Assert observation specs are unchanged.
self.assertEqual(input_spec.observation_spec,
output_spec.observation_spec)
# Assert reward specs match observation spec dtypes.
type0 = input_spec.observation_spec['obs0']
type1 = input_spec.observation_spec['obs1']
targ_type = np.promote_types(type0, type1)
self.assertEqual(output_spec.reward_spec.dtype, targ_type)
def test_thresholded_l2_spec_unchanged(self):
observation_spec = {
'obs0': testing_functions.random_array_spec(),
'obs1': testing_functions.random_array_spec()
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
self.assertEqual(input_spec, output_spec)
def test_spec_validation_missing_observation(self):
observation_spec = {
'wrong_name':
specs.Array(shape=(2,), dtype=np.int32, name='bool as two'),
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessors = [
rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.0, reward_offset=1.0),
rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
]
for rp in reward_preprocessors:
try:
rp.setup_io_spec(input_spec)
self.fail('Exception expected due to missing observation')
except KeyError:
pass # expected
def test_l2_spec_numerics(self):
random_arr_spec = testing_functions.random_array_spec()
observation_spec = {
'obs0': random_arr_spec,
'obs1': random_arr_spec
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.L2Reward(
obs0='obs0', obs1='obs1', reward_scale=1.5, reward_offset=2.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
timestep = testing_functions.valid_value(input_spec)
processed_timestep = reward_preprocessor.process(timestep)
dist = np.linalg.norm(timestep.observation['obs0'] -
timestep.observation['obs1'])
expected_reward = output_spec.reward_spec.dtype.type(-1 * dist * 1.5 + 2.0)
self.assertEqual(expected_reward,
processed_timestep.reward)
self.assertEqual(expected_reward.dtype,
processed_timestep.reward.dtype)
def test_thresholded_l2_spec_numerics(self):
random_arr_spec = testing_functions.random_array_spec()
observation_spec = {
'obs0': random_arr_spec,
'obs1': random_arr_spec
}
input_spec = create_timestep_spec(observation_spec)
reward_preprocessor = rewards.ThresholdedL2Reward(
obs0='obs0', obs1='obs1', threshold=0.5, reward=1.0)
output_spec = reward_preprocessor.setup_io_spec(input_spec)
timestep = testing_functions.valid_value(input_spec)
processed_timestep = reward_preprocessor.process(timestep)
self.assertEqual(output_spec.reward_spec.dtype,
processed_timestep.reward.dtype)
class ComputeRewardTest(absltest.TestCase):
def test_scalar_reward_computed_based_on_observation(self):
reward_fn = lambda obs: obs['obs'][0]
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(reward_function=reward_fn)
reward_preprocessor.setup_io_spec(input_spec)
output_timestep = reward_preprocessor.process(input_timestep)
self.assertEqual(output_timestep.reward, 2.0)
def test_array_rewards_fail_without_correct_shape(self):
reward_fn = lambda obs: obs['obs']
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(reward_function=reward_fn)
with self.assertRaises(ValueError):
reward_preprocessor.setup_io_spec(input_spec)
reward_preprocessor.process(input_timestep)
def test_array_rewards_succeeds_with_correct_shape(self):
reward_fn = lambda obs: obs['obs']
observation_spec = {
'obs': specs.Array(shape=(2,), dtype=np.float32)
}
input_spec = create_timestep_spec(observation_spec)
input_timestep = create_timestep(input_spec, 'obs', [2.0, 3.0])
reward_preprocessor = rewards.ComputeReward(
reward_function=reward_fn, output_spec_shape=(2,))
reward_preprocessor.setup_io_spec(input_spec)
output_timestep = reward_preprocessor.process(input_timestep)
np.testing.assert_allclose(output_timestep.reward, [2.0, 3.0])
class CombineRewardsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._reward_1 = _TestReward(reward_value=1.)
self._reward_10 = _TestReward(reward_value=10.)
observation_spec = {
'unused_obs': testing_functions.random_array_spec(
shape=(2,),
minimum=np.asarray([0, 0]),
maximum=np.asarray([10, 10]))
}
self._input_spec = create_timestep_spec(observation_spec)
self._input_timestep = create_timestep(
self._input_spec, 'unused_obs', [2.0, 3.0])
def test_default_combination(self):
# Default combination is maximum of all rewards.
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10])
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 10.0)
def test_max_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.max)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 10.0)
def test_min_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.min)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 1.0)
def test_sum_combination(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.sum)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 11.0)
def test_sum_combination_with_list_input(self):
reward_array = _TestReward(reward_value=np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, reward_array],
combination_strategy=np.sum)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 4.0)
def test_output_list_of_rewards_fails_without_correct_shape(self):
# Must update the output shape when returning an array of rewards.
with self.assertRaises(ValueError):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.stack)
combined_reward.setup_io_spec(self._input_spec)
combined_reward.process(self._input_timestep)
def test_output_list_of_rewards_succeeds_with_correct_shape(self):
combined_reward = rewards.CombineRewards(
reward_preprocessors=[self._reward_1, self._reward_10],
combination_strategy=np.stack, output_spec_shape=(2,))
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
np.testing.assert_allclose(output_timestep.reward, [1., 10.])
def test_processing_unflattened_rewards(self):
zero_rewards = _TestReward(np.zeros(3))
one_rewards = _TestReward(np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[zero_rewards, one_rewards],
combination_strategy=lambda rewards: np.mean(rewards, axis=0),
output_spec_shape=(3,), flatten_rewards=False)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
np.testing.assert_allclose(output_timestep.reward, [0.5, 0.5, 0.5])
# Check to make sure the flattened version gives a different result.
# Reset things to help set up the specs.
input_spec = self._input_spec.replace() # makes a copy of the spec.
zero_rewards = _TestReward(np.zeros(3))
one_rewards = _TestReward(np.ones(3))
combined_reward = rewards.CombineRewards(
reward_preprocessors=[zero_rewards, one_rewards],
combination_strategy=lambda rewards: np.mean(rewards, axis=0),
output_spec_shape=(), flatten_rewards=True)
combined_reward.setup_io_spec(input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertEqual(output_timestep.reward, 0.5)
def test_staged_active_monotonous(self):
"""More stages above threshold mean more reward, no matter the values."""
reward_100 = _TestReward(reward_value=100.)
reward_0 = _TestReward(reward_value=0.)
staged_combination = rewards.StagedWithActiveThreshold(threshold=1e-6)
combined_reward_1_above_thresh = rewards.CombineRewards(
reward_preprocessors=[reward_100, reward_0],
combination_strategy=staged_combination)
reward_100 = _TestReward(reward_value=100.)
reward_001 = _TestReward(reward_value=0.001)
staged_combination = rewards.StagedWithActiveThreshold(threshold=1e-6)
combined_reward_2_above_thresh = rewards.CombineRewards(
reward_preprocessors=[reward_100, reward_001],
combination_strategy=staged_combination)
combined_reward_1_above_thresh.setup_io_spec(self._input_spec)
combined_reward_2_above_thresh.setup_io_spec(self._input_spec)
output_timestep_1_above_thresh = combined_reward_1_above_thresh.process(
self._input_timestep)
output_timestep_2_above_thresh = combined_reward_2_above_thresh.process(
self._input_timestep)
self.assertLess(output_timestep_1_above_thresh.reward,
output_timestep_2_above_thresh.reward)
@parameterized.named_parameters(
# Count 1, value 1.
('_contiguous', (1., 1., 0.5, 0.8, 0.), 0.9, 0.4),
# Count 3 despite 0.5 among them; value 0.8.
('_count_below_thresh', (1., 1., 0.5, 0.8, 0.), 0.7, 0.76),
# Count 3; value 1, NOT 100.
('_clip_final', (1., 1., 0.5, 100, 0.), 0.7, 0.8),
# Count 3 despite 500 among them.
('_clip_mid', (1., 1., 500, 0.8, 0.), 0.7, 0.76),
)
def test_staged_active_clipping(self, term_rewards, threshold,
expected_reward):
"""Terms are clipped if too large."""
reward_preprocessors = [_TestReward(reward_value=r) for r in term_rewards]
staged_combination = rewards.StagedWithActiveThreshold(threshold=threshold)
combined_reward = rewards.CombineRewards(
reward_preprocessors=reward_preprocessors,
combination_strategy=staged_combination)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertAlmostEqual(expected_reward, output_timestep.reward)
@parameterized.named_parameters(
# Should work even on a single stage, and round up if above thresh.
('_singleton_above_thresh', (0.92,), 0.9, 1., True),
# Should work even on a single stage, and give shaped val if below thresh.
('_singleton_below_thresh', (0.82,), 0.9, 0.82, True),
# First two tasks are solved so we work on third. `assume` flag irrelevant
('_monotonic_cumululative', (0.92, 0.91, 0.1), 0.9, 0.7, True),
# First two tasks are solved so we work on third. `assume` flag irrelevant
('_monotonic_not_cumululative', (0.92, 0.91, 0.1), 0.9, 0.7, False),
# Second task is solved so we assume first is too and work on third.
('_not_monotonic_cumululative', (0.6, 0.91, 0.1), 0.9, 0.7, True),
# Second task is solved but first isn't so we work on that.
('_not_monotonic_not_cumululative', (0.6, 0.91, 0.1), 0.9, 0.2, False),
# Nothing is solved so we work on the first task.
('_none_solved', (0.6, 0.2, 0.6), 0.9, 0.2, False),
)
def test_staged_success(self, term_rewards, threshold, expected_reward,
assume_cumulative_success):
"""Terms are clipped if too large."""
reward_preprocessors = [_TestReward(reward_value=r) for r in term_rewards]
staged_combination = rewards.StagedWithSuccessThreshold(
threshold=threshold,
assume_cumulative_success=assume_cumulative_success)
combined_reward = rewards.CombineRewards(
reward_preprocessors=reward_preprocessors,
combination_strategy=staged_combination)
combined_reward.setup_io_spec(self._input_spec)
output_timestep = combined_reward.process(self._input_timestep)
self.assertAlmostEqual(expected_reward, output_timestep.reward)
class _TestReward(timestep_preprocessor.TimestepPreprocessor):
def __init__(self, reward_value: Union[float, int, np.ndarray]):
super().__init__()
self._reward_value = reward_value
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _process_impl(
self, timestep: timestep_preprocessor.PreprocessorTimestep
) -> timestep_preprocessor.PreprocessorTimestep:
return timestep._replace(reward=self._reward_value)
@overrides(timestep_preprocessor.TimestepPreprocessor)
def _output_spec(
self, input_spec: spec_utils.TimeStepSpec) -> spec_utils.TimeStepSpec:
if np.isscalar(self._reward_value):
self._reward_value = input_spec.reward_spec.dtype.type(self._reward_value)
else:
return input_spec.replace(reward_spec=specs.Array(
self._reward_value.shape, self._reward_value.dtype))
return input_spec
if __name__ == '__main__':
absltest.main()
| |
"""
The setup package to install SeleniumBase dependencies and plugins.
(Uses selenium 3.x and is compatible with Python 2.7+ and Python 3.5+)
"""
from setuptools import setup, find_packages # noqa: F401
import os
import sys
this_dir = os.path.abspath(os.path.dirname(__file__))
long_description = None
total_description = None
try:
with open(os.path.join(this_dir, "README.md"), "rb") as f:
total_description = f.read().decode("utf-8")
description_lines = total_description.split("\n")
long_description_lines = []
for line in description_lines:
if not line.startswith("<meta ") and not line.startswith("<link "):
long_description_lines.append(line)
long_description = "\n".join(long_description_lines)
except IOError:
long_description = "A complete library for building end-to-end tests."
about = {}
# Get the package version from the seleniumbase/__version__.py file
with open(os.path.join(this_dir, "seleniumbase", "__version__.py"), "rb") as f:
exec(f.read().decode("utf-8"), about)
if sys.argv[-1] == "publish":
reply = None
input_method = input
if not sys.version_info[0] >= 3:
input_method = raw_input # noqa: F821
confirm_text = ">>> Confirm release PUBLISH to PyPI? (yes/no): "
reply = str(input_method(confirm_text)).lower().strip()
if reply == "yes":
print("\n*** Checking code health with flake8:\n")
os.system("python -m pip install 'flake8==4.0.1'")
flake8_status = os.system("flake8 --exclude=recordings,temp")
if flake8_status != 0:
print("\nWARNING! Fix flake8 issues before publishing to PyPI!\n")
sys.exit()
else:
print("*** No flake8 issues detected. Continuing...")
print("\n*** Removing existing distribution packages: ***\n")
os.system("rm -f dist/*.egg; rm -f dist/*.tar.gz; rm -f dist/*.whl")
os.system("rm -rf build/bdist.*; rm -rf build/lib")
print("\n*** Installing build: *** (Required for PyPI uploads)\n")
os.system("python -m pip install --upgrade 'build>=0.7.0'")
print("\n*** Installing twine: *** (Required for PyPI uploads)\n")
os.system("python -m pip install --upgrade 'twine>=3.8.0'")
print("\n*** Installing tqdm: *** (Required for PyPI uploads)\n")
os.system("python -m pip install --upgrade 'tqdm>=4.63.0'")
print("\n*** Rebuilding distribution packages: ***\n")
os.system("python -m build") # Create new tar/wheel
print("\n*** Publishing The Release to PyPI: ***\n")
os.system("python -m twine upload dist/*") # Requires ~/.pypirc Keys
print("\n*** The Release was PUBLISHED SUCCESSFULLY to PyPI! :) ***\n")
else:
print("\n>>> The Release was NOT PUBLISHED to PyPI! <<<\n")
sys.exit()
setup(
name="seleniumbase",
version=about["__version__"],
description="A complete web automation framework for end-to-end testing.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/seleniumbase/SeleniumBase",
project_urls={
"Changelog": "https://github.com/seleniumbase/SeleniumBase/releases",
"Download": "https://pypi.org/project/seleniumbase/#files",
"Gitter": "https://gitter.im/seleniumbase/SeleniumBase",
"Slack": "https://app.slack.com/client/T0ABCRTNX/C01SM888REZ",
"Blog": "https://seleniumbase.com/",
"PyPI": "https://pypi.org/project/seleniumbase/",
"Source": "https://github.com/seleniumbase/SeleniumBase",
"Documentation": "https://seleniumbase.io/",
},
platforms=["Windows", "Linux", "Mac OS-X"],
author="Michael Mintz",
author_email="mdmintz@gmail.com",
maintainer="Michael Mintz",
license="MIT",
keywords="pytest automation selenium browser testing webdriver sbase",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Environment :: Web Environment",
"Framework :: Pytest",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP :: Browsers",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Testing :: Acceptance",
"Topic :: Software Development :: Testing :: Traffic Generation",
"Topic :: Utilities",
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=[
'pip>=20.3.4;python_version<"3.6"',
'pip>=21.3.1;python_version>="3.6" and python_version<"3.7"',
'pip>=22.0.3;python_version>="3.7"',
'packaging>=20.9;python_version<"3.6"',
'packaging>=21.3;python_version>="3.6"',
'setuptools>=44.1.1;python_version<"3.5"',
'setuptools>=50.3.2;python_version>="3.5" and python_version<"3.6"',
'setuptools>=59.6.0;python_version>="3.6" and python_version<"3.7"',
'setuptools>=60.9.3;python_version>="3.7"',
'setuptools-scm>=5.0.2;python_version<"3.6"',
'setuptools-scm>=6.4.2;python_version>="3.6"',
'tomli>=1.2.3;python_version>="3.6" and python_version<"3.7"',
'tomli>=2.0.1;python_version>="3.7"',
"wheel>=0.37.1",
"attrs>=21.4.0",
'PyYAML>=6.0;python_version>="3.6"',
'traitlets>=4.3.3;python_version<"3.7"',
'traitlets>=5.1.1;python_version>="3.7"',
"certifi>=2021.10.8",
'filelock>=3.2.1;python_version<"3.6"',
'filelock>=3.4.1;python_version>="3.6" and python_version<"3.7"',
'filelock>=3.6.0;python_version>="3.7"',
'platformdirs>=2.0.2;python_version<"3.6"',
'platformdirs>=2.4.0;python_version>="3.6" and python_version<"3.7"',
'platformdirs>=2.5.1;python_version>="3.7"',
"six==1.16.0",
'ipdb==0.13.4;python_version<"3.5"',
'ipdb==0.13.9;python_version>="3.5"',
'parso==0.7.1;python_version<"3.6"',
'parso==0.8.3;python_version>="3.6"',
'jedi==0.17.2;python_version<"3.6"',
'jedi==0.18.1;python_version>="3.6"',
'idna==2.10;python_version<"3.6"', # Must stay in sync with "requests"
'idna==3.3;python_version>="3.6"', # Must stay in sync with "requests"
'chardet==3.0.4;python_version<"3.5"', # Stay in sync with "requests"
'chardet==4.0.0;python_version>="3.5"', # Stay in sync with "requests"
'charset-normalizer==2.0.12;python_version>="3.5"', # Sync "requests"
"urllib3==1.26.8", # Must stay in sync with "requests"
'requests==2.27.1;python_version<"3.5"',
'requests==2.25.1;python_version>="3.5" and python_version<"3.6"',
'requests==2.27.1;python_version>="3.6"',
"nose==1.3.7",
'sniffio==1.2.0;python_version>="3.7"',
'h11==0.13.0;python_version>="3.7"',
'trio==0.20.0;python_version>="3.7"',
'trio-websocket==0.9.2;python_version>="3.7"',
'pyopenssl==22.0.0;python_version>="3.7"',
'wsproto==1.1.0;python_version>="3.7"',
'selenium==3.141.0;python_version<"3.7"',
'selenium==4.1.2;python_version>="3.7"',
'msedge-selenium-tools==3.141.3;python_version<"3.7"',
'more-itertools==5.0.0;python_version<"3.5"',
'more-itertools==8.12.0;python_version>="3.5"',
"cssselect==1.1.0",
"sortedcontainers==2.4.0",
'fasteners==0.16;python_version<"3.5"',
'fasteners==0.16.3;python_version>="3.5" and python_version<"3.6"',
'fasteners==0.17.3;python_version>="3.6"',
"execnet==1.9.0",
'pluggy==0.13.1;python_version<"3.6"',
'pluggy==1.0.0;python_version>="3.6"',
'py==1.8.1;python_version<"3.5"',
'py==1.11.0;python_version>="3.5"',
'pytest==4.6.11;python_version<"3.5"',
'pytest==6.1.2;python_version>="3.5" and python_version<"3.6"',
'pytest==7.0.1;python_version>="3.6"',
'pytest-forked==1.3.0;python_version<"3.6"',
'pytest-forked==1.4.0;python_version>="3.6"',
'pytest-html==1.22.1;python_version<"3.6"',
'pytest-html==2.0.1;python_version>="3.6"', # Newer ones had issues
'pytest-metadata==1.8.0;python_version<"3.6"',
'pytest-metadata==1.11.0;python_version>="3.6"',
"pytest-ordering==0.6",
'pytest-rerunfailures==8.0;python_version<"3.5"',
'pytest-rerunfailures==9.1.1;python_version>="3.5" and python_version<"3.6"', # noqa: E501
'pytest-rerunfailures==10.2;python_version>="3.6"',
'pytest-xdist==1.34.0;python_version<"3.5"',
'pytest-xdist==2.2.1;python_version>="3.5" and python_version<"3.6"',
'pytest-xdist==2.5.0;python_version>="3.6"',
"parameterized==0.8.1",
"sbvirtualdisplay==1.0.0",
'soupsieve==1.9.6;python_version<"3.5"',
'soupsieve==2.1;python_version>="3.5" and python_version<"3.6"',
'soupsieve==2.3.1;python_version>="3.6"',
'beautifulsoup4==4.9.3;python_version<"3.5"',
'beautifulsoup4==4.10.0;python_version>="3.5"',
'cryptography==2.9.2;python_version<"3.5"',
'cryptography==3.2.1;python_version>="3.5" and python_version<"3.6"',
'cryptography==36.0.1;python_version>="3.6"',
'pygments==2.5.2;python_version<"3.5"',
'pygments==2.11.2;python_version>="3.5"',
'prompt-toolkit==1.0.18;python_version<"3.5"',
'prompt-toolkit==2.0.10;python_version>="3.5" and python_version<"3.6"', # noqa: E501
'prompt-toolkit==3.0.28;python_version>="3.6"',
'decorator==4.4.2;python_version<"3.5"',
'decorator==5.1.1;python_version>="3.5"',
'ipython==5.10.0;python_version<"3.5"',
'ipython==7.9.0;python_version>="3.5" and python_version<"3.6"',
'ipython==7.16.1;python_version>="3.6" and python_version<"3.7"',
'ipython==7.32.0;python_version>="3.7"', # Requires matplotlib-inline
'matplotlib-inline==0.1.3;python_version>="3.7"', # ipython needs this
"colorama==0.4.4",
'importlib-metadata==2.1.3;python_version<"3.6"',
'importlib-metadata==4.2.0;python_version>="3.6" and python_version<"3.8"', # noqa: E501
"pycparser==2.21",
'pymysql==0.10.1;python_version<"3.6"',
'pymysql==1.0.2;python_version>="3.6"',
'pyotp==2.3.0;python_version<"3.5"',
'pyotp==2.6.0;python_version>="3.5"',
"boto==2.49.0",
"cffi==1.15.0",
"toml==0.10.2",
'Pillow==6.2.2;python_version<"3.5"',
'Pillow==7.2.0;python_version>="3.5" and python_version<"3.6"',
'Pillow==8.4.0;python_version>="3.6" and python_version<"3.7"',
'Pillow==9.0.1;python_version>="3.7"',
'typing-extensions==3.10.0.2;python_version<"3.6"', # <3.8 for "rich"
'typing-extensions==4.0.0;python_version>="3.6" and python_version<"3.8"', # noqa: E501
'rich==11.2.0;python_version>="3.6" and python_version<"4.0"',
'tornado==5.1.1;python_version<"3.5"',
'tornado==6.1;python_version>="3.5"',
'pdfminer.six==20191110;python_version<"3.5"',
'pdfminer.six==20201018;python_version>="3.5" and python_version<"3.6"', # noqa: E501
'pdfminer.six==20211012;python_version>="3.6"',
],
extras_require={
# pip install -e .[coverage]
"coverage": [
'coverage==5.5;python_version<"3.6"',
'coverage==6.2;python_version>="3.6" and python_version<"3.7"',
'coverage==6.3.2;python_version>="3.7"',
'pytest-cov==2.12.1;python_version<"3.6"',
'pytest-cov==3.0.0;python_version>="3.6"',
],
# pip install -e .[flake]
"flake": [
'flake8==3.7.9;python_version<"3.5"',
'flake8==3.9.2;python_version>="3.5" and python_version<"3.6"',
'flake8==4.0.1;python_version>="3.6"',
'mccabe==0.6.1',
'pyflakes==2.1.1;python_version<"3.5"',
'pyflakes==2.3.1;python_version>="3.5" and python_version<"3.6"',
'pyflakes==2.4.0;python_version>="3.6"',
'pycodestyle==2.5.0;python_version<"3.5"',
'pycodestyle==2.7.0;python_version>="3.5" and python_version<"3.6"', # noqa: E501
'pycodestyle==2.8.0;python_version>="3.6"',
],
},
packages=[
"seleniumbase",
"sbase",
"seleniumbase.common",
"seleniumbase.config",
"seleniumbase.console_scripts",
"seleniumbase.core",
"seleniumbase.drivers",
"seleniumbase.extensions",
"seleniumbase.fixtures",
"seleniumbase.js_code",
"seleniumbase.masterqa",
"seleniumbase.plugins",
"seleniumbase.translate",
"seleniumbase.utilities",
"seleniumbase.utilities.selenium_grid",
"seleniumbase.utilities.selenium_ide",
],
include_package_data=True,
entry_points={
"console_scripts": [
"seleniumbase = seleniumbase.console_scripts.run:main",
"sbase = seleniumbase.console_scripts.run:main", # Simplified name
],
"nose.plugins": [
"base_plugin = seleniumbase.plugins.base_plugin:Base",
"selenium = seleniumbase.plugins.selenium_plugin:SeleniumBrowser",
"page_source = seleniumbase.plugins.page_source:PageSource",
"screen_shots = seleniumbase.plugins.screen_shots:ScreenShots",
"test_info = seleniumbase.plugins.basic_test_info:BasicTestInfo",
(
"db_reporting = "
"seleniumbase.plugins.db_reporting_plugin:DBReporting"
),
"s3_logging = seleniumbase.plugins.s3_logging_plugin:S3Logging",
],
"pytest11": ["seleniumbase = seleniumbase.plugins.pytest_plugin"],
},
)
# print(os.system("cat seleniumbase.egg-info/PKG-INFO"))
print("\n*** SeleniumBase Installation Complete! ***\n")
| |
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.DeferredCommand import DeferredCommand
from pyjamas.ui.Widget import Widget
from pyjamas.ui.MenuItem import MenuItem
from pyjamas.ui.MenuBarPopupPanel import MenuBarPopupPanel
class MenuBar(Widget):
def __init__(self, vertical=False, **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-MenuBar"
self.body = None
self.items = []
self.parentMenu = None
self.popup = None
self.selectedItem = None
self.shownChildMenu = None
self.vertical = False
self.autoOpen = False
table = DOM.createTable()
self.body = DOM.createTBody()
DOM.appendChild(table, self.body)
if not vertical:
tr = DOM.createTR()
DOM.appendChild(self.body, tr)
self.vertical = vertical
outer = DOM.createDiv()
DOM.appendChild(outer, table)
self.setElement(outer)
Widget.__init__(self, **kwargs)
# also callable as:
# addItem(item)
# addItem(text, cmd)
# addItem(text, popup)
# addItem(text, asHTML, cmd)
def addItem(self, item, asHTML=None, popup=None):
if not hasattr(item, "setSubMenu"):
item = MenuItem(item, asHTML, popup)
if self.vertical:
tr = DOM.createTR()
DOM.appendChild(self.body, tr)
else:
tr = DOM.getChild(self.body, 0)
DOM.appendChild(tr, item.getElement())
item.setParentMenu(self)
item.setSelectionStyle(False)
self.items.append(item)
return item
def clearItems(self):
container = self.getItemContainerElement()
while DOM.getChildCount(container) > 0:
DOM.removeChild(container, DOM.getChild(container, 0))
self.items = []
def getAutoOpen(self):
return self.autoOpen
def onBrowserEvent(self, event):
Widget.onBrowserEvent(self, event)
item = self.findItem(DOM.eventGetTarget(event))
if item is None:
return False
type = DOM.eventGetType(event)
if type == "click":
self.doItemAction(item, True)
return True
elif type == "mouseover":
self.itemOver(item)
elif type == "mouseout":
self.itemOver(None)
return False
def onPopupClosed(self, sender, autoClosed):
if autoClosed:
self.closeAllParents()
self.onHide()
self.shownChildMenu = None
self.popup = None
def removeItem(self, item):
try:
idx = self.items.index(item)
except ValueError:
return
container = self.getItemContainerElement()
DOM.removeChild(container, DOM.getChild(container, idx))
del self.items[idx]
def setAutoOpen(self, autoOpen):
self.autoOpen = autoOpen
def closeAllParents(self):
curMenu = self
while curMenu is not None:
curMenu.close()
if curMenu.parentMenu is None and curMenu.selectedItem is not None:
curMenu.selectedItem.setSelectionStyle(False)
curMenu.selectedItem = None
curMenu = curMenu.parentMenu
def doItemAction(self, item, fireCommand):
if (self.shownChildMenu is not None) and (item.getSubMenu() == self.shownChildMenu):
return
if (self.shownChildMenu is not None):
self.shownChildMenu.onHide()
self.popup.hide()
if item.getSubMenu() is None:
if fireCommand:
self.closeAllParents()
cmd = item.getCommand()
if cmd is not None:
DeferredCommand().add(cmd)
return
self.selectItem(item)
self.popup = MenuBarPopupPanel(item)
self.popup.addPopupListener(self)
if self.vertical:
self.popup.setPopupPosition(self.getAbsoluteLeft() +
self.getOffsetWidth() - 1,
item.getAbsoluteTop())
else:
self.popup.setPopupPosition(item.getAbsoluteLeft(),
self.getAbsoluteTop() +
self.getOffsetHeight() - 1)
self.shownChildMenu = item.getSubMenu()
sub_menu = item.getSubMenu()
sub_menu.parentMenu = self
self.popup.show()
def onDetach(self):
if self.popup is not None:
self.popup.hide()
Widget.onDetach(self)
def itemOver(self, item):
if item is None:
if (self.selectedItem is not None) and (self.shownChildMenu == self.selectedItem.getSubMenu()):
return
self.selectItem(item)
if item is not None:
if (self.shownChildMenu is not None) or (self.parentMenu is not None) or self.autoOpen:
self.doItemAction(item, False)
def close(self):
if self.parentMenu is not None:
self.parentMenu.popup.hide()
def findItem(self, hItem):
for item in self.items:
if DOM.isOrHasChild(item.getElement(), hItem):
return item
return None
def getItemContainerElement(self):
if self.vertical:
return self.body
else:
return DOM.getChild(self.body, 0)
def onHide(self):
if self.shownChildMenu is not None:
self.shownChildMenu.onHide()
self.popup.hide()
def onShow(self):
if len(self.items) > 0:
self.selectItem(self.items[0])
def selectItem(self, item):
if item == self.selectedItem:
return
if self.selectedItem is not None:
self.selectedItem.setSelectionStyle(False)
if item is not None:
item.setSelectionStyle(True)
self.selectedItem = item
| |
"""
A Python CLI interface that utilizes the Cisco UCS SDK to:
- Connect to a UCSM domain
- View and Add VLANs
- Add a VLAN to a vNIC
Please note that there is very little error handling present so
proceed accordingly.
"""
from UcsSdk import UcsHandle
from UcsSdk import UcsUtils
from UcsSdk.MoMeta.FabricVlan import FabricVlan
from UcsSdk.MoMeta.VnicLanConnTempl import VnicLanConnTempl
from UcsSdk.MoMeta.OrgOrg import OrgOrg
import sys
import warnings
'''
Supress the following warning message which does not affect funcationality:
/Library/Python/2.7/site-packages/UcsSdk/UcsBase.py:1064: UserWarning: [Warning]: AddManagedObject [Description]:Expected Naming Property Name for ClassId VnicLanConnTempl not found
warnings.warn(string)
'''
warnings.filterwarnings("ignore")
def ssl_workaround():
""" Workaround for SSL certification error that prevents proper
UCS domain login when using Python 2.7 or higher. Credit to user Rahul Gupta
(ragupta4) on the Cisco UCS Communities """
is_verify_certificate = False
if not sys.version_info < (2, 6):
from functools import partial
import ssl
ssl.wrap_socket = partial(
ssl.wrap_socket, ssl_version=ssl.PROTOCOL_TLSv1)
if not sys.version_info < (2, 7, 9) and not is_verify_certificate:
ssl._create_default_https_context = ssl._create_unverified_context
def connect():
""" Establish a connection to the UCS Domain """
HANDLE.Login(IP_ADDRESS, USERNAME, PASSWORD)
def current_vlans():
""" Get a list of all current VLANs """
current_vlans.list = {}
obj = HANDLE.GetManagedObject(None, FabricVlan.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Name":
vlan_name = mo.getattr(prop)
if str(prop) == "Id":
vlan_id = mo.getattr(prop)
current_vlans.list.update({vlan_name: vlan_id})
def add_vlans():
""" Create new VLANs on UCS. """
print ''
add_vlans.confirm_new_vlan = raw_input(
'Would you like to add a new VLAN? (yes/no): ')
while add_vlans.confirm_new_vlan not in ['yes', 'y', 'no', 'n']:
print ''
print '*** Error: Please enter either "yes" or "no". ***'
print ''
add_vlans.confirm_new_vlan = raw_input(
'Would you like to add a new VLAN? (yes/no): ')
if add_vlans.confirm_new_vlan not in ['no' or 'n']:
print
add_vlans.vlan_name = raw_input('Enter the VLAN Name: ')
vlan_id = raw_input('Enter the VLAN ID: ')
obj = HANDLE.GetManagedObject(None, None, {"Dn": "fabric/lan"})
HANDLE.AddManagedObject(obj, "fabricVlan", {"DefaultNet": "no", "PubNwName": "", "Dn": "fabric/lan/net-{}".format(add_vlans.vlan_name), "PolicyOwner": "local",
"CompressionType": "included", "Name": "{}".format(add_vlans.vlan_name), "Sharing": "none", "McastPolicyName": "", "Id": "{}".format(vlan_id)})
current_vlans()
for key, value in current_vlans.list.items():
if add_vlans.vlan_name in str(key):
print ''
print 'The following VLAN has been created: '
print ''
print '- ' + key + ' (' + value + ')'
def current_vnic_templates():
""" Get a list of current vNICs in UCS """
current_vnic_templates.list = []
obj = HANDLE.GetManagedObject(None, VnicLanConnTempl.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Name":
vnic_template_name = mo.getattr(prop)
current_vnic_templates.list.append(vnic_template_name)
def current_orgs():
""" Get a list of the current organizations in UCS which will be used in
add_vlan_to_vnic """
current_orgs.list = []
obj = HANDLE.GetManagedObject(None, OrgOrg.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Dn":
org_name = mo.getattr(prop)
current_orgs.list.append(org_name)
current_orgs.list.remove('org-root')
def add_vlan_to_vnic():
""" Add a VLAN to a vNIC template """
print
add_vlan_to_vnic.vnic_name = raw_input('vNIC Template Name: ')
print ''
obj = HANDLE.GetManagedObject(None, VnicLanConnTempl.ClassId(), {
VnicLanConnTempl.RN: "lan-conn-templ-{}".format(add_vlan_to_vnic.vnic_name)})
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Dn":
dn = mo.getattr(prop)
for org in current_orgs.list:
if org in mo.getattr(prop):
organization = org
else:
organization = "org-root"
if str(prop) == "IdentPoolName":
ident_pool_name = mo.getattr(prop)
if str(prop) == "QosPolicyName":
qos_policy_name = mo.getattr(prop)
if str(prop) == "Descr":
descr = mo.getattr(prop)
if str(prop) == "PolicyOwner":
policy_owner = mo.getattr(prop)
if str(prop) == "NwCtrlPolicyName":
nw_ctrl_policy_name = mo.getattr(prop)
if str(prop) == "TemplType":
templ_type = mo.getattr(prop)
if str(prop) == "StatsPolicyName":
stats_policy_name = mo.getattr(prop)
if str(prop) == "Mtu":
mtu = mo.getattr(prop)
if str(prop) == "PinToGroupName":
pin_to_group_name = mo.getattr(prop)
if str(prop) == "SwitchId":
switch_id = mo.getattr(prop)
HANDLE.StartTransaction()
vnic_obj = HANDLE.GetManagedObject(
None, None, {"Dn": "{}".format(organization)})
mo = HANDLE.AddManagedObject(vnic_obj, "vnicLanConnTempl",
{"IdentPoolName": "{}".format(ident_pool_name),
"Dn": "{}".format(dn),
"QosPolicyName": "{}".format(qos_policy_name),
"Descr": "{}".format(descr),
"PolicyOwner": "{}".format(policy_owner),
"NwCtrlPolicyName": "{}".format(nw_ctrl_policy_name),
"TemplType": "{}".format(templ_type),
"StatsPolicyName": "{}".format(stats_policy_name),
"Mtu": "{}".format(mtu),
"PinToGroupName": "{}".format(pin_to_group_name),
"SwitchId": "{}".format(switch_id)}, True)
mo_1 = HANDLE.AddManagedObject(mo, "vnicEtherIf", {
"DefaultNet": "no",
"Name": "{}".format(add_vlans.vlan_name),
"Dn": "{}/if-{}".format(dn, add_vlans.vlan_name)}, True)
HANDLE.CompleteTransaction()
ssl_workaround()
IP_ADDRESS = ""
USERNAME = ""
PASSWORD = ""
HANDLE = UcsHandle()
HANDLE = UcsHandle()
connect()
print ''
print "Cisco UCS Manager VLAN Management"
print ''
print 'Current VLANs:'
current_vlans()
print ''
for name, ID in current_vlans.list.iteritems():
print '- ' + name + ' (' + ID + ')'
add_vlans()
if add_vlans.confirm_new_vlan not in ['no' or 'n']:
print ''
print 'Current vNIC Templates: '
print ''
current_vnic_templates()
for name in current_vnic_templates.list:
print '- ' + name
print ''
confirm_add_vlan = raw_input(
"Would you like to add the " + '"' + add_vlans.vlan_name + '"' + " VLAN to a vNIC template? (yes/no): ")
while confirm_add_vlan not in ['yes', 'y', 'no', 'n']:
print ''
print '*** Error: Please enter either "yes" or "no". ***'
print ''
cconfirm_add_vlan = raw_input(
"Would you like to add the " + '"' + add_vlans.vlan_name + '"' + " VLAN to a vNIC template? (yes/no): ")
if confirm_add_vlan not in ['no' or 'n']:
current_orgs()
add_vlan_to_vnic()
print ("The " + '"' + add_vlans.vlan_name + '"' + " VLAN has been added to " '"' +
add_vlan_to_vnic.vnic_name + '"' + " vNIC template.")
print
else:
print
HANDLE.Logout()
| |
"""Base for all node resource services.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import errno
import glob
import io
import logging
import os
import socket
import struct
import tempfile
import time
import six
from treadmill import dirwatch
from treadmill import exc
from treadmill import fs
from treadmill import logcontext as lc
from treadmill import plugin_manager
from treadmill import utils
from treadmill import watchdog
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
#: Name of the directory holding the resources requests
RSRC_DIR = 'resources'
#: Name of request payload file
REQ_FILE = 'request.yml'
#: Name of reply payload file
REP_FILE = 'reply.yml'
#: Default Resource Service timeout
DEFAULT_TIMEOUT = 15 * 60
def wait_for_file(filename, timeout=None):
"""Wait at least ``timeout`` seconds for a file to appear or be modified.
:param ``int`` timeout:
Minimum amount of seconds to wait for the file.
:returns ``bool``:
``True`` if there was an event, ``False`` otherwise (timeout).
"""
if timeout is None:
timeout = DEFAULT_TIMEOUT
elif timeout == 0:
return os.path.exists(filename)
filedir = os.path.dirname(filename)
# TODO: Fine tune the watcher mask for efficiency.
watcher = dirwatch.DirWatcher(filedir)
now = time.time()
end_time = now + timeout
while not os.path.exists(filename):
if watcher.wait_for_events(timeout=max(0, end_time - now)):
watcher.process_events()
now = time.time()
if now > end_time:
return False
return True
class ResourceServiceError(exc.TreadmillError):
"""Base Resource Service error.
"""
__slots__ = ()
class ResourceServiceRequestError(ResourceServiceError):
"""Resource Service Request error.
"""
__slots__ = (
'request',
)
def __init__(self, message, request):
super(ResourceServiceRequestError, self).__init__(message)
self.request = request
class ResourceServiceTimeoutError(ResourceServiceError, socket.timeout):
"""Resource Service timeout.
"""
__slots__ = ()
class ResourceServiceClient(object):
"""Client class for all Treadmill services.
/apps/<container>/rsrc/req-<svc_name>/
request.yml
reply.yml
svc_req_id
"""
_REQ_UID_FILE = 'svc_req_id'
__slots__ = (
'_serviceinst',
'_clientdir',
)
def __init__(self, serviceinst, clientdir):
self._serviceinst = serviceinst
fs.mkdir_safe(clientdir)
self._clientdir = os.path.realpath(clientdir)
def put(self, rsrc_id, rsrc_data):
"""Request creation/update of a resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:param `str` rsrc_data:
(New) Parameters for the requested resource.
"""
req_dir = self._req_dirname(rsrc_id)
fs.mkdir_safe(req_dir)
with io.open(os.path.join(req_dir, REQ_FILE), 'w') as f:
if os.name == 'posix':
os.fchmod(f.fileno(), 0o644)
yaml.dump(rsrc_data,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=f)
req_uuid_file = os.path.join(req_dir, self._REQ_UID_FILE)
try:
with io.open(req_uuid_file) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
svc_req_uuid = None
else:
raise
with lc.LogContext(_LOGGER, rsrc_id):
if svc_req_uuid is None:
try:
# New request
svc_req_uuid = self._serviceinst.clt_new_request(rsrc_id,
req_dir)
# Write down the UUID
with io.open(req_uuid_file, 'w') as f:
f.write(svc_req_uuid)
os.fchmod(f.fileno(), 0o644)
except OSError:
# Error registration failed, delete the request.
_LOGGER.exception('Unable to submit request')
fs.rmtree_safe(req_dir)
else:
self._serviceinst.clt_update_request(svc_req_uuid)
def delete(self, rsrc_id):
"""Delete an existing resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
with lc.LogContext(_LOGGER, rsrc_id,
adapter_cls=lc.ContainerAdapter) as log:
req_dir = self._req_dirname(rsrc_id)
try:
with io.open(os.path.join(req_dir, self._REQ_UID_FILE)) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
log.warning('Resource %r does not exist', rsrc_id)
return
raise
self._serviceinst.clt_del_request(svc_req_uuid)
os.rename(
req_dir,
self._bck_dirname(svc_req_uuid)
)
def get(self, rsrc_id):
"""Get the result of a resource request.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
"""
try:
res = self.wait(rsrc_id, timeout=0)
except ResourceServiceTimeoutError:
res = None
return res
def wait(self, rsrc_id, timeout=None):
"""Wait for a requested resource to be ready.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
:raises ``ResourceServiceTimeoutError``:
If the request was not available before timeout.
"""
req_dir = self._req_dirname(rsrc_id)
rep_file = os.path.join(req_dir, REP_FILE)
if not wait_for_file(rep_file, timeout):
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
try:
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
def status(self, timeout=30):
"""Query the status of the resource service.
"""
return self._serviceinst.status(timeout=timeout)
def _req_dirname(self, rsrc_id):
"""Request directory name for a given resource id.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
req_dir_name = 'req-{name}-{rsrc_id}'.format(
name=self._serviceinst.name,
rsrc_id=rsrc_id
)
req_dir = os.path.join(self._clientdir, req_dir_name)
return req_dir
def _bck_dirname(self, req_uuid):
"""Return a unique backup directory name.
"""
bck_dir_name = 'bck{ts}-{name}-{req_uuid}'.format(
name=self._serviceinst.name,
req_uuid=req_uuid,
ts=int(time.time()),
)
bck_dir = os.path.join(self._clientdir, bck_dir_name)
return bck_dir
@six.add_metaclass(abc.ABCMeta)
class ResourceService(object):
"""Server class for all Treadmill services.
/service_dir/resources/<containerid>-<uid>/ ->
/apps/<containerid>/rsrc/req-<svc_name>/
/apps/<container>/rsrc/<svc_name>/
request.yml
reply.yml
svc_req_id
"""
__slots__ = (
'_is_dead',
'_dir',
'_rsrc_dir',
'_service_impl',
'_service_class',
'_service_name',
)
_IO_EVENT_PENDING = struct.pack('@Q', 1)
def __init__(self, service_dir, impl):
fs.mkdir_safe(service_dir)
self._dir = os.path.realpath(service_dir)
self._rsrc_dir = os.path.join(self._dir, RSRC_DIR)
fs.mkdir_safe(self._rsrc_dir)
self._is_dead = False
self._service_impl = impl
self._service_class = None
# Figure out the service's name
if isinstance(self._service_impl, six.string_types):
svc_name = self._service_impl.rsplit('.', 1)[-1]
else:
svc_name = self._service_impl.__name__
self._service_name = svc_name
@property
def name(self):
"""Name of the service."""
return self._service_name
def make_client(self, client_dir):
"""Create a client using `clientdir` as request dir location.
"""
return ResourceServiceClient(self, client_dir)
@abc.abstractmethod
def status(self, timeout=30):
"""Query the status of the resource service.
:param ``float`` timeout:
Wait at least timeout seconds for the service to reply.
:raises ``ResourceServiceTimeoutError``:
If the requested service does not come up before timeout.
:raises ``socket.error``:
If there is a communication error with the service.
"""
pass
def get(self, req_id):
"""Read the reply of a given request.
"""
rep_file = os.path.join(self._rsrc_dir, req_id, REP_FILE)
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
@abc.abstractmethod
def _run(self, impl, watchdog_lease):
"""Implementation specifc run.
"""
def run(self, watchdogs_dir, *impl_args, **impl_kwargs):
"""Run the service.
The run procedure will first initialize the service's implementation,
the setup the service's watchdog, and start the service resource
resynchronization procedure.
This procedure is in 4 phases to handle both fresh starts and restarts.
$ Call the implementation's :function:`initialize` function which
allows the implementation to query and import the backend resource's
state.
$ Setup the service request watcher.
$ Import all existing requests (passing them to the
:function:`on_created` implementation's handler.
$ Call the implementation's :function:`synchronize` function which
expunges anything allocated against the backend resource that doesn't
have a matching request anymore.
The implementation is expected to implement two handlers:
* :function:`on_created` that handles new resource requests or update
to existing resource request (implementation is expected to be
idem-potent.
* :function:`on_deleted` that handlers delation of resource requests.
It should properly handle the case where the backend resource is
already gone.
:param ``str`` watchdogs_dir:
Path to the watchdogs directory.
:param ``tuple`` impl_args:
Arguments passed to the implementation's constructor.
:param ``dict`` impl_kwargs:
Keywords arguments passed to the implementation's constructor.
"""
# Load the implementation
if self._service_class is None:
self._service_class = self._load_impl()
impl = self._service_class(*impl_args, **impl_kwargs)
# Setup the watchdog
watchdogs = watchdog.Watchdog(os.path.realpath(watchdogs_dir))
watchdog_lease = watchdogs.create(
name='svc-{svc_name}'.format(svc_name=self.name),
timeout='{hb:d}s'.format(hb=impl.WATCHDOG_HEARTBEAT_SEC),
content='Service %r failed' % self.name
)
self._run(impl, watchdog_lease)
_LOGGER.info('Shuting down %r service', self.name)
# Remove the service heartbeat
watchdog_lease.remove()
def _load_impl(self):
"""Load the implementation class of the service.
"""
if isinstance(self._service_impl, six.string_types):
impl_class = plugin_manager.load('treadmill.services',
self._service_impl)
else:
impl_class = self._service_impl
assert issubclass(impl_class, BaseResourceServiceImpl), \
'Invalid implementation %r' % impl_class
return impl_class
def clt_new_request(self, req_id, req_data_dir):
"""Add a request data dir as `req_id` to the service.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Registering %r: %r -> %r',
req_id, svc_req_lnk, req_data_dir)
# NOTE(boysson): We use a temporary file + rename behavior to override
# any potential old symlinks.
tmpsymlink = tempfile.mktemp(dir=self._rsrc_dir,
prefix='.tmp' + req_id)
os.symlink(req_data_dir, tmpsymlink)
os.rename(tmpsymlink, svc_req_lnk)
return req_id
def clt_del_request(self, req_id):
"""Remove an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Unegistering %r: %r', req_id, svc_req_lnk)
fs.rm_safe(svc_req_lnk)
return req_id
@abc.abstractmethod
def clt_update_request(self, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
pass
def _check_requests(self):
"""Check each existing request and remove stale ones.
"""
svcs = collections.deque()
for svc in glob.glob(os.path.join(self._rsrc_dir, '*')):
try:
os.stat(svc)
svcs.append(svc)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Deleting stale request: %r', svc)
fs.rm_safe(svc)
else:
raise
return svcs
def _on_created(self, impl, filepath):
"""Private handler for request creation events.
"""
# Avoid triggering on changes to the service directory itself.
if filepath == self._rsrc_dir:
return
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
req_file = os.path.join(filepath, REQ_FILE)
rep_file = os.path.join(filepath, REP_FILE)
try:
with io.open(req_file) as f:
req_data = yaml.load(stream=f)
except IOError as err:
if (err.errno == errno.ENOENT or
err.errno == errno.ENOTDIR):
_LOGGER.exception('Removing invalid request: %r', req_id)
try:
fs.rm_safe(filepath)
except OSError as rm_err:
if rm_err.errno == errno.EISDIR:
fs.rmtree_safe(filepath)
else:
raise
return
raise
# TODO: We should also validate the req_id format
with lc.LogContext(_LOGGER, req_id,
adapter_cls=lc.ContainerAdapter) as log:
log.debug('created %r: %r', req_id, req_data)
try:
# TODO: We should also validate the req_id format
utils.validate(req_data, impl.PAYLOAD_SCHEMA)
res = impl.on_create_request(req_id, req_data)
except exc.InvalidInputError as err:
log.error('Invalid request data: %r: %s', req_data, err)
res = {'_error': {'input': req_data, 'why': str(err)}}
except Exception as err: # pylint: disable=W0703
log.exception('Unable to process request: %r %r:',
req_id, req_data)
res = {'_error': {'input': req_data, 'why': str(err)}}
if res is None:
# Request was not actioned
return False
fs.write_safe(
rep_file,
lambda f: yaml.dump(
res, explicit_start=True, explicit_end=True,
default_flow_style=False, stream=f
),
mode='w',
permission=0o644
)
# Return True if there were no error
return not bool(res.get('_error', False))
def _on_deleted(self, impl, filepath):
"""Private handler for request deletion events.
"""
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
# TODO: We should also validate the req_id format
with lc.LogContext(_LOGGER, req_id,
adapter_cls=lc.ContainerAdapter) as log:
log.debug('deleted %r', req_id)
res = impl.on_delete_request(req_id)
return res
@six.add_metaclass(abc.ABCMeta)
class BaseResourceServiceImpl(object):
"""Base interface of Resource Service implementations.
"""
__slots__ = (
'_service_dir',
'_service_rsrc_dir',
)
MAX_REQUEST_PER_CYCLE = 5
PAYLOAD_SCHEMA = ()
WATCHDOG_HEARTBEAT_SEC = 60
def __init__(self):
self._service_dir = None
self._service_rsrc_dir = None
@abc.abstractmethod
def initialize(self, service_dir):
"""Service initialization."""
self._service_dir = service_dir
self._service_rsrc_dir = os.path.join(service_dir, RSRC_DIR)
@abc.abstractmethod
def synchronize(self):
"""Assert that the internal state of the service matches the backend
state.
"""
return
@abc.abstractmethod
def on_create_request(self, rsrc_id, rsrc_data):
"""Call back invoked when a new resource request is received.
Args:
rsrc_id ``str``: Unique resource identifier
rsrc_data ``dict``: Resource request metadata
Returns:
``dict``: Result communicated back to the requestor, ``None``,
``False`` or ``{}`` if no changes to the service were made.
"""
pass
@abc.abstractmethod
def on_delete_request(self, rsrc_id):
"""Call back invoked when a resource is deleted.
Arguments::
rsrc_id ``str``: Unique resource identifier
"""
pass
@abc.abstractmethod
def retry_request(self, rsrc_id):
"""Force re-evaluation of a request.
Arguments::
rsrc_id ``str``: Unique resource identifier
"""
pass
| |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains update/release functions for google-api-nodejs-client."""
import json
import os
import re
from datetime import date
from os.path import join
from tasks import _commit_message, _common, _git
from tasks._check_output import check_output
_REPO_NAME = 'google-api-nodejs-client'
_REPO_PATH = 'google/google-api-nodejs-client'
# Matches strings like "apis/foo/v1.ts".
_SERVICE_FILENAME_RE = re.compile(r'apis/(.+)/(.+)\.ts')
# Matches strings like "v20.3.1".
_VERSION_RE = re.compile(r'^v([0-9]+)\.([0-9]+)\.([0-9]+)$')
class _Version(object):
"""Represents a version of the format "1.2.3"."""
def __init__(self, latest_tag):
match = _VERSION_RE.match(latest_tag)
if not match:
raise Exception(
'latest tag does not match the pattern "{}": {}'.format(
_VERSION_RE.pattern, latest_tag))
self.major_version = int(match.group(1))
self.minor_version = int(match.group(2))
def __str__(self):
return '{}.{}.0'.format(
self.major_version, self.minor_version)
def bump_major(self):
self.major_version += 1
self.minor_version = 0
def bump_minor(self):
self.minor_version += 1
def _build(repo):
check_output(['make', 'build'],
cwd=repo.filepath)
def _generate_all_clients(repo):
check_output(['make', 'generate'],
cwd=repo.filepath)
added, deleted, updated = set(), set(), set()
status_to_ids = {
_git.Status.ADDED: added,
_git.Status.DELETED: deleted,
_git.Status.UPDATED: updated
}
for filename, status in repo.diff_name_status(staged=False):
match = _SERVICE_FILENAME_RE.match(filename)
if not match:
continue
name_version = '{}:{}'.format(match.group(1), match.group(2))
status_to_ids.get(status, set()).add(name_version)
return added, deleted, updated
def _check_latest_version(latest_tag):
latest_version = check_output(['npm', 'view', 'googleapis', 'version'])
latest_version = latest_version.strip()
if latest_tag != latest_version:
raise Exception(
('latest tag does not match the latest package version on npm:'
' {} != {}').format(latest_tag, latest_version))
def _install_dependencies(repo):
check_output(['npm', 'install'], cwd=repo.filepath)
def _publish_package(repo, npm_account):
with open(os.path.expanduser('~/.npmrc'), 'w') as file_:
file_.write('//registry.npmjs.org/:_authToken={}\n'.format(
npm_account.auth_token))
check_output(['npm', 'publish'], cwd=repo.filepath)
def _run_tests(repo):
check_output(['make', 'test'], cwd=repo.filepath)
def _update_changelog_md(repo, new_version, added, deleted, updated):
data = '##### {} - {}\n'.format(
new_version, date.today().strftime("%d %B %Y"))
if deleted:
data += '\n###### Breaking changes\n'
for name_version in sorted(deleted):
data += '- Deleted `{}`\n'.format(name_version)
if added or updated:
data += '\n###### Backwards compatible changes\n'
for name_version in sorted(added):
data += '- Added `{}`\n'.format(name_version)
for name_version in sorted(updated):
data += '- Updated `{}`\n'.format(name_version)
data += '\n'
filename = join(repo.filepath, 'CHANGELOG.md')
with open(filename) as file_:
data = data + file_.read()
with open(filename, 'w') as file_:
file_.write(data)
def _update_and_publish_gh_pages(repo, new_version, github_account):
check_output(['make', 'docs'], cwd=repo.filepath)
repo.checkout('gh-pages')
check_output(['rm', '-rf', 'latest'], cwd=repo.filepath)
doc_filepath = 'doc/googleapis/{}'.format(new_version)
check_output(['cp', '-r', doc_filepath, 'latest'], cwd=repo.filepath)
check_output(['cp', '-r', doc_filepath, new_version], cwd=repo.filepath)
index_md_filename = join(repo.filepath, 'index.md')
lines = []
with open(index_md_filename) as file_:
lines = file_.readlines()
# index.md should be at least 5 lines long and have the first bullet
# (latest) on line 4.
if len(lines) < 5 or lines[3] != '\n' or not lines[4].startswith('*'):
raise Exception('index.md has an unexpected format')
lines[4] = lines[4].replace(' (latest)', '', 1)
bullet = ('* [v{nv} (latest)]'
'(http://google.github.io/google-api-nodejs-client'
'/{nv}/index.html)\n').format(nv=new_version)
lines.insert(4, bullet)
with open(index_md_filename, 'w') as file_:
file_.write(''.join(lines))
repo.add(['latest', new_version])
repo.commit(new_version, github_account.name, github_account.email)
repo.push(branch='gh-pages')
repo.checkout('master')
def _update_package_json(repo, new_version):
filename = join(repo.filepath, 'package.json')
data = {}
with open(filename) as file_:
# Note, we use `json.loads` instead of `json.load` here, and
# `json.dumps` instead of `json.dump` below, because it's easier to
# just mock `open` than it is to mock `open` and both `json` functions.
data = json.loads(file_.read())
data['version'] = new_version
with open(filename, 'w') as file_:
file_.write(json.dumps(data, indent=2))
def update(filepath, github_account):
"""Updates the google-api-nodejs-client repository.
Args:
filepath (str): the directory to work in.
github_account (GitHubAccount): the GitHub account to commit with.
"""
repo = _git.clone_from_github(
_REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)
_install_dependencies(repo)
added, deleted, updated = _generate_all_clients(repo)
if not any([added, deleted, updated]):
return
_build(repo)
_run_tests(repo)
commitmsg = _commit_message.build(added, deleted, updated)
repo.add(['apis'])
repo.commit(commitmsg, github_account.name, github_account.email)
repo.push()
def release(filepath, github_account, npm_account, force=False):
"""Releases a new version in the google-api-nodejs-client repository.
A release consists of:
1. A Git tag of a new version.
2. An update to "package.json" and "CHANGELOG.md".
3. A package pushed to npm.
4. Generated docs updated on the branch "gh-pages".
Args:
filepath (str): the directory to work in.
github_account (GitHubAccount): the GitHub account to commit with.
force (bool, optional): if true, the check that all authors since the
last tag were `github_account` is ignored.
"""
repo = _git.clone_from_github(
_REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)
latest_tag = repo.latest_tag()
version = _Version(latest_tag)
if not _common.check_prerelease(repo, latest_tag, github_account, force):
return
_check_latest_version(latest_tag)
added, deleted, updated = set(), set(), set()
status_to_ids = {
_git.Status.ADDED: added,
_git.Status.DELETED: deleted,
_git.Status.UPDATED: updated
}
diff_ns = repo.diff_name_status(rev=latest_tag)
for filename, status in diff_ns:
match = _SERVICE_FILENAME_RE.match(filename)
if not match:
continue
status_to_ids.get(status, set()).add(match.group(1))
if deleted:
version.bump_major()
else:
version.bump_minor()
new_version = str(version)
_update_package_json(repo, new_version)
_update_changelog_md(repo, new_version, added, deleted, updated)
_install_dependencies(repo)
_build(repo)
_run_tests(repo)
repo.commit(new_version, github_account.name, github_account.email)
repo.tag(new_version)
repo.push()
repo.push(tags=True)
_publish_package(repo, npm_account)
_update_and_publish_gh_pages(repo, new_version, github_account)
| |
# Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
import six
from messager.common import rpc
from messager.common.rpc import common as rpc_common
from messager.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overridden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
dictobj = {'method': method, 'namespace': namespace, 'args': kwargs}
return dictobj
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.fanout_cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast_to_server() does not wait on any
return values.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg)
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A general training and validation pipeline.
"""
from os.path import join, dirname, exists
from shutil import rmtree
from time import time
from collections import deque
from tqdm import tqdm
from absl import app, flags
import tensorflow as tf
from third_party.xiuminglib import xiuminglib as xm
from nerfactor import datasets
from nerfactor import models
from nerfactor.util import logging as logutil, io as ioutil, \
config as configutil
flags.DEFINE_string(
'config', 'nerf.ini', "base .ini file in config/ or a full path")
flags.DEFINE_string('config_override', '', "e.g., 'key1=value1,key2=value2'")
flags.DEFINE_boolean('debug', False, "debug mode switch")
flags.DEFINE_enum(
'device', 'gpu', ['cpu', 'gpu'], "running on what type of device(s)")
FLAGS = flags.FLAGS
logger = logutil.Logger(loggee="trainvali")
def main(_):
if FLAGS.debug:
logger.warn("Debug mode: on")
distributed_train_step_decor = distributed_train_step if FLAGS.debug \
else tf.function(distributed_train_step)
# Distribution strategy
strategy = get_strategy()
# Configurations
config_ini = FLAGS.config
if not exists(config_ini):
config_ini = join(dirname(__file__), 'config', FLAGS.config)
config = ioutil.read_config(config_ini)
# Any override?
if FLAGS.config_override != '':
for kv in FLAGS.config_override.split(','):
k, v = kv.split('=')
config.set('DEFAULT', k, v)
# Output directory
config_dict = configutil.config2dict(config)
xname = config.get('DEFAULT', 'xname').format(**config_dict)
outroot = config.get('DEFAULT', 'outroot')
outdir = join(outroot, xname)
overwrite = config.getboolean('DEFAULT', 'overwrite')
ioutil.prepare_outdir(outdir, overwrite=overwrite)
logger.info("For results, see:\n\t%s", outdir)
# Dump actual configuration used to disk
config_out = outdir.rstrip('/') + '.ini'
ioutil.write_config(config, config_out)
# Make training dataset
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset_train = Dataset(config, 'train', debug=FLAGS.debug)
global_bs_train = dataset_train.bs
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe_train = dataset_train.build_pipeline(no_batch=no_batch)
datapipe_train = strategy.experimental_distribute_dataset(datapipe_train)
# Make validation dataset
dataset_vali = Dataset(config, 'vali', debug=FLAGS.debug)
global_bs_vali = dataset_vali.bs # maybe different from training
try:
datapipe_vali = dataset_vali.build_pipeline(no_batch=no_batch)
except FileNotFoundError:
datapipe_vali = None
# Sample validation batches, and just stick with them
if datapipe_vali is None:
vali_batches = None
else:
n_vali_batches = config.getint('DEFAULT', 'vali_batches')
vali_batches = datapipe_vali.take(n_vali_batches)
vali_batches = strategy.experimental_distribute_dataset(vali_batches)
with strategy.scope():
# Model
model_name = config.get('DEFAULT', 'model')
Model = models.get_model_class(model_name)
model = Model(config, debug=FLAGS.debug)
model.register_trainable()
# Optimizer
lr = config.getfloat('DEFAULT', 'lr')
lr_decay_steps = config.getint('DEFAULT', 'lr_decay_steps', fallback=-1)
if lr_decay_steps > 0:
lr_decay_rate = config.getfloat('DEFAULT', 'lr_decay_rate')
lr = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=lr_decay_steps, decay_rate=lr_decay_rate)
kwargs = {'learning_rate': lr, 'amsgrad': True}
clipnorm = config.getfloat('DEFAULT', 'clipnorm')
clipvalue = config.getfloat('DEFAULT', 'clipvalue')
err_msg = \
"Both `clipnorm` and `clipvalue` are active -- turn one off"
if clipnorm > 0:
assert clipvalue < 0, err_msg
kwargs['clipnorm'] = clipnorm
if clipvalue > 0:
assert clipnorm < 0, err_msg
kwargs['clipvalue'] = clipvalue
optimizer = tf.keras.optimizers.Adam(**kwargs)
# Resume from checkpoint, if any
ckptdir = join(outdir, 'checkpoints')
assert model.trainable_registered, (
"Register the trainable layers to have them tracked by the "
"checkpoint")
ckpt = tf.train.Checkpoint(
step=tf.Variable(0), optimizer=optimizer, net=model)
keep_recent_epochs = config.getint('DEFAULT', 'keep_recent_epochs')
if keep_recent_epochs <= 0:
keep_recent_epochs = None # keep all epochs
ckptmanager = tf.train.CheckpointManager(
ckpt, ckptdir, max_to_keep=keep_recent_epochs)
ckpt.restore(ckptmanager.latest_checkpoint)
if ckptmanager.latest_checkpoint:
logger.info(
"Resumed from step:\n\t%s", ckptmanager.latest_checkpoint)
else:
logger.info("Started from scratch")
# Summary directories
writer_train = tf.summary.create_file_writer(
join(outdir, 'summary_train'))
writer_vali = tf.summary.create_file_writer(
join(outdir, 'summary_vali'))
train_vis_epoch_dir = join(outdir, 'vis_train', 'epoch{e:09d}')
vali_vis_epoch_dir = join(outdir, 'vis_vali', 'epoch{e:09d}')
train_vis_epoch_dir_deque = deque([], keep_recent_epochs)
vali_vis_epoch_dir_deque = deque([], keep_recent_epochs)
train_vis_batch_rawf = join(
train_vis_epoch_dir, 'batch{b:09d}_raw.pickle')
vali_vis_batch_rawf = join(
vali_vis_epoch_dir, 'batch{b:09d}_raw.pickle')
train_vis_batch_dir = join(train_vis_epoch_dir, 'batch{b:09d}')
vali_vis_batch_dir = join(vali_vis_epoch_dir, 'batch{b:09d}')
train_vis_batches_comp = join(train_vis_epoch_dir, 'all')
vali_vis_batches_comp = join(vali_vis_epoch_dir, 'all') # add proper
# extension yourself in your overriding function (this makes the
# pipeline general and not specific to any model)
# ====== Training loop ======
epochs = config.getint('DEFAULT', 'epochs')
vis_train_batches = config.getint('DEFAULT', 'vis_train_batches')
ckpt_period = config.getint('DEFAULT', 'ckpt_period')
vali_period = config.getint('DEFAULT', 'vali_period')
step_restored = ckpt.step.numpy()
for _ in tqdm(range(step_restored, epochs), desc="Training epochs"):
# ------ Train on all batches ------
batch_loss, batch_vis, batch_time = [], [], []
for batch_i, batch in enumerate(datapipe_train):
t0 = time()
loss, to_vis = distributed_train_step_decor(
strategy, model, batch, optimizer, global_bs_train)
batch_time.append(time() - t0)
batch_loss.append(loss)
if batch_i < vis_train_batches:
batch_vis.append(to_vis)
if FLAGS.debug:
logger.warn(
"Debug mode: skipping the rest of this epoch")
break
assert batch_time, "Dataset is empty"
# Record step
ckpt.step.assign_add(1)
step = ckpt.step.numpy()
# Checkpoint and summarize/visualize training
if step % ckpt_period == 0:
# Save checkpoint
saved_path = ckptmanager.save()
logger.info("Checkpointed step %s:\n\t%s", step, saved_path)
# Summarize training
with writer_train.as_default():
tf.summary.scalar(
"loss_train", tf.reduce_mean(batch_loss), step=step)
tf.summary.scalar(
"batch_time_train", tf.reduce_mean(batch_time),
step=step)
vis_dirs = []
for batch_i, to_vis in enumerate(batch_vis):
raw_f = train_vis_batch_rawf.format(e=step, b=batch_i)
vis_dir = train_vis_batch_dir.format(e=step, b=batch_i)
model.vis_batch(
to_vis, vis_dir, mode='train', dump_raw_to=raw_f)
vis_dirs.append(vis_dir)
# Generate a compilation (e.g., HTML) of all visualizations
comp_f = train_vis_batches_comp.format(e=step)
view_at = model.compile_batch_vis(
vis_dirs, comp_f, mode='train')
if view_at is not None:
tf.summary.text("vis_train", view_at, step=step)
maintain_epoch_queue(
train_vis_epoch_dir_deque,
train_vis_epoch_dir.format(e=step))
# ------ Validation ------
if vali_batches is not None and vali_period > 0 \
and step % vali_period == 0:
# Run validation on all validation batches
batch_loss, batch_vis = [], []
for batch_i, batch in enumerate(vali_batches):
# Validate on this validation batch
loss, to_vis = distributed_vali_step(
strategy, model, batch, global_bs_vali)
batch_loss.append(loss)
batch_vis.append(to_vis)
# Summarize/visualize validation
with writer_vali.as_default():
tf.summary.scalar(
"loss_vali", tf.reduce_mean(batch_loss), step=step)
vis_dirs = []
for batch_i, to_vis in enumerate(batch_vis):
raw_f = vali_vis_batch_rawf.format(e=step, b=batch_i)
vis_dir = vali_vis_batch_dir.format(e=step, b=batch_i)
model.vis_batch(
to_vis, vis_dir, mode='vali', dump_raw_to=raw_f)
vis_dirs.append(vis_dir)
# Generate a compilation (e.g., HTML) of all visualizations
comp_f = vali_vis_batches_comp.format(e=step)
view_at = model.compile_batch_vis(
vis_dirs, comp_f, mode='vali')
if view_at is not None:
tf.summary.text("vis_vali", view_at, step=step)
maintain_epoch_queue(
vali_vis_epoch_dir_deque, vali_vis_epoch_dir.format(e=step))
def get_strategy():
"""Creates a distributed strategy.
"""
strategy = None
if FLAGS.device == 'cpu':
strategy = tf.distribute.OneDeviceStrategy('/cpu:0')
elif FLAGS.device == 'gpu':
strategy = tf.distribute.MirroredStrategy()
else:
raise NotImplementedError(FLAGS.device)
return strategy
# May be decorated into a tf.function, depending on whether in debug mode
def distributed_train_step(strategy, model, batch, optimizer, global_bs):
assert model.trainable_registered, \
"Register the trainable layers before using `trainable_variables`"
def train_step(batch):
with tf.GradientTape() as tape:
pred, gt, loss_kwargs, partial_to_vis = model(batch, mode='train')
loss_kwargs['keep_batch'] = True # keep the batch dimension
per_example_loss = model.compute_loss(pred, gt, **loss_kwargs)
weighted_loss = tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_bs)
grads = tape.gradient(weighted_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return weighted_loss, partial_to_vis
# Each GPU takes a step
weighted_loss, partial_to_vis = strategy.run(train_step, args=(batch,))
# Aggregate across GPUs
loss, to_vis = aggeregate_dstributed(
strategy, weighted_loss, partial_to_vis)
return loss, to_vis
# Not using tf.function for validation step because it can become very slow
# when there is a long loop. Given validation step is likely called relatively
# infrequently, eager should be fine
def distributed_vali_step(strategy, model, batch, global_bs):
def vali_step(batch):
pred, gt, loss_kwargs, partial_to_vis = model(batch, mode='vali')
loss_kwargs['keep_batch'] = True # keep the batch dimension
per_example_loss = model.compute_loss(pred, gt, **loss_kwargs)
weighted_loss = tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_bs)
return weighted_loss, partial_to_vis
# Each GPU takes a step
weighted_loss, partial_to_vis = strategy.run(vali_step, args=(batch,))
# Aggregate across GPUs
loss, to_vis = aggeregate_dstributed(
strategy, weighted_loss, partial_to_vis)
return loss, to_vis
def aggeregate_dstributed(strategy, weighted_loss, partial_to_vis):
# Sum the weighted loss
loss = strategy.reduce(tf.distribute.ReduceOp.SUM, weighted_loss, axis=None)
# Concatenate the items to visualize back to the full batch
to_vis = {}
for k, v in partial_to_vis.items():
to_vis[k] = tf.concat(
tf.nest.flatten(v, expand_composites=True), axis=0)
return loss, to_vis
def maintain_epoch_queue(queue, new_epoch_dir):
queue.appendleft(new_epoch_dir)
for epoch_dir in xm.os.sortglob(dirname(new_epoch_dir), '*'):
if epoch_dir not in queue: # already evicted from queue (FIFO)
rmtree(epoch_dir)
if __name__ == '__main__':
app.run(main)
| |
"""Support for displaying collected data over SNMP."""
from datetime import timedelta
import logging
import pysnmp.hlapi.asyncio as hlapi
from pysnmp.hlapi.asyncio import (
CommunityData,
ContextData,
ObjectIdentity,
ObjectType,
SnmpEngine,
UdpTransportTarget,
UsmUserData,
getCmd,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .const import (
CONF_ACCEPT_ERRORS,
CONF_AUTH_KEY,
CONF_AUTH_PROTOCOL,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_DEFAULT_VALUE,
CONF_PRIV_KEY,
CONF_PRIV_PROTOCOL,
CONF_VERSION,
DEFAULT_AUTH_PROTOCOL,
DEFAULT_COMMUNITY,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_PRIV_PROTOCOL,
DEFAULT_VERSION,
MAP_AUTH_PROTOCOLS,
MAP_PRIV_PROTOCOLS,
SNMP_VERSIONS,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Optional(CONF_ACCEPT_ERRORS, default=False): cv.boolean,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_DEFAULT_VALUE): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In(SNMP_VERSIONS),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTH_KEY): cv.string,
vol.Optional(CONF_AUTH_PROTOCOL, default=DEFAULT_AUTH_PROTOCOL): vol.In(
MAP_AUTH_PROTOCOLS
),
vol.Optional(CONF_PRIV_KEY): cv.string,
vol.Optional(CONF_PRIV_PROTOCOL, default=DEFAULT_PRIV_PROTOCOL): vol.In(
MAP_PRIV_PROTOCOLS
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SNMP sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
community = config.get(CONF_COMMUNITY)
baseoid = config.get(CONF_BASEOID)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
version = config.get(CONF_VERSION)
username = config.get(CONF_USERNAME)
authkey = config.get(CONF_AUTH_KEY)
authproto = config.get(CONF_AUTH_PROTOCOL)
privkey = config.get(CONF_PRIV_KEY)
privproto = config.get(CONF_PRIV_PROTOCOL)
accept_errors = config.get(CONF_ACCEPT_ERRORS)
default_value = config.get(CONF_DEFAULT_VALUE)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if version == "3":
if not authkey:
authproto = "none"
if not privkey:
privproto = "none"
request_args = [
SnmpEngine(),
UsmUserData(
username,
authKey=authkey or None,
privKey=privkey or None,
authProtocol=getattr(hlapi, MAP_AUTH_PROTOCOLS[authproto]),
privProtocol=getattr(hlapi, MAP_PRIV_PROTOCOLS[privproto]),
),
UdpTransportTarget((host, port)),
ContextData(),
]
else:
request_args = [
SnmpEngine(),
CommunityData(community, mpModel=SNMP_VERSIONS[version]),
UdpTransportTarget((host, port)),
ContextData(),
]
errindication, _, _, _ = await getCmd(
*request_args, ObjectType(ObjectIdentity(baseoid))
)
if errindication and not accept_errors:
_LOGGER.error("Please check the details in the configuration file")
return
data = SnmpData(request_args, baseoid, accept_errors, default_value)
async_add_entities([SnmpSensor(data, name, unit, value_template)], True)
class SnmpSensor(Entity):
"""Representation of a SNMP sensor."""
def __init__(self, data, name, unit_of_measurement, value_template):
"""Initialize the sensor."""
self.data = data
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data and updates the states."""
await self.data.async_update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
value = self._value_template.async_render_with_possible_json_value(
value, STATE_UNKNOWN
)
self._state = value
class SnmpData:
"""Get the latest data and update the states."""
def __init__(self, request_args, baseoid, accept_errors, default_value):
"""Initialize the data object."""
self._request_args = request_args
self._baseoid = baseoid
self._accept_errors = accept_errors
self._default_value = default_value
self.value = None
async def async_update(self):
"""Get the latest data from the remote SNMP capable host."""
errindication, errstatus, errindex, restable = await getCmd(
*self._request_args, ObjectType(ObjectIdentity(self._baseoid))
)
if errindication and not self._accept_errors:
_LOGGER.error("SNMP error: %s", errindication)
elif errstatus and not self._accept_errors:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[-1][int(errindex) - 1] or "?",
)
elif (errindication or errstatus) and self._accept_errors:
self.value = self._default_value
else:
for resrow in restable:
self.value = str(resrow[-1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.