gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Visualise bundle adjustment results.
Example to run:
python run_visualise.py --filename KV4jIAq3WJo_155_165.pkl
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle as pickle
import errno
import os
import subprocess
import sys
from absl import flags
import cv2
import matplotlib.pyplot as plt
import numpy as np
import plot_utils
import skvideo.io
from third_party.activity_net.download import download_clip
import third_party.hmr.renderer as vis_util
# Input
flags.DEFINE_string('filename', '', 'The annoation pickle file')
flags.DEFINE_string('smpl_face_path', 'smpl_faces.npy',
'Path to smpl model face file.')
# Output
flags.DEFINE_string(
'output_dir', 'results', 'Where to write results to.'
'Directory automatically created.')
def mkdir(dirname):
"""Create directory if it does not exist."""
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def im_save_cv(image, filename):
"""Write image with OpenCV, converting from BGR to RGB format."""
cv2.imwrite(filename, image[:, :, (2, 1, 0)])
def visualize(img,
joints,
vertices,
camera,
image_name,
output_dir,
renderer=None,
color_id=0):
"""Renders the result in original image coordinate frame.
Args:
img: The image
joints: 2D keypoints, in the image coordinate frame.
vertices: Vertices of the SMPL mesh.
camera: Camera predicted.
image_name: Name of image for saving.
output_dir: Directory to save results to
renderer: Renderer object to use.
color_id: 0 is blue, and 1 is light pink. For the visualisation. The
colours are defined in the renderer.
"""
cam_for_render = camera * img.shape[0]
vert_shifted = np.copy(vertices)
# Approximate an orthographic camera:
# move points away and adjust the focal length to zoom in.
vert_shifted[:, -1] = vert_shifted[:, -1] + 100.
cam_for_render[0] *= 100.
rend_img_overlay = renderer(
vert_shifted,
cam=cam_for_render,
img=img,
do_alpha=True,
color_id=color_id)
rend_img = renderer(
vert_shifted,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
rend_img_vp1 = renderer.rotated(
vert_shifted,
60,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
rend_img_vp2 = renderer.rotated(
vert_shifted,
-60,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
save_name = os.path.join(output_dir, image_name + '.jpg')
fig = plot_utils.plot_summary_figure(img, joints, rend_img_overlay, rend_img,
rend_img_vp1, rend_img_vp2, save_name)
plt.close(fig)
def transform_keypoints_to_image(keypoints, img):
"""Transform keypoints from range [0, 1] to image coordinates."""
keypoints[:, :, 0] *= img.shape[0]
keypoints[:, :, 1] *= img.shape[
0] # The saved keypoints are scaled by image height.
return keypoints
def parse_filename(filename):
"""Parse filename of the pickle file."""
name = os.path.basename(filename)
name = name.replace('.pkl', '')
tokens = name.split('_')
end_time = int(tokens[-1])
start_time = int(tokens[-2])
video_id = '_'.join(tokens[0:-2])
return video_id, start_time, end_time
def get_frame_rate(video_path):
"""Get frame rate of the video from its metadata."""
meta_data = skvideo.io.ffprobe(video_path)
if 'video' in meta_data.keys():
meta_data = meta_data['video']
if '@avg_frame_rate' in meta_data:
frame_rate = eval(meta_data['@avg_frame_rate'])
else:
frame_rate = None
return frame_rate
def video_from_images(directory, save_name):
"""Create video from images saved in directory using ffmpeg."""
command = [
'ffmpeg', '-framerate', '25', '-pattern_type',
'glob -i \'{}/*.jpg\''.format(directory), '-c:v', 'libx264', '-pix_fmt',
'yuv420p', '-loglevel', 'panic', save_name
]
command = ' '.join(command)
try:
_ = subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except:
pass
def load_pickle(filename):
"""Read pickle file."""
with open(filename) as fp:
data = pickle.load(fp)
return data
def main(config):
data = load_pickle(config.filename)
video_id, start_time, end_time = parse_filename(config.filename)
video_path = '/tmp/' + video_id + '.mp4'
status, message = download_clip(video_id, video_path, start_time, end_time)
if not status:
print('Video not downloaded')
print(message)
sys.exit()
video = skvideo.io.vread(video_path)
frame_rate = get_frame_rate(video_path)
if not frame_rate:
print('Error. Could not determine frame rate of video')
sys.exit()
output_dir = os.path.join(config.output_dir, video_id)
mkdir(output_dir)
keypoints = transform_keypoints_to_image(data['2d_keypoints'],
video[0].squeeze())
renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)
for i in range(data['time'].size):
idx = int(round(data['time'][i] * frame_rate))
if idx >= video.shape[0]:
break
img = video[idx].squeeze()
image_name = '{:>04}'.format(i)
visualize(
img,
joints=keypoints[i].squeeze(),
vertices=data['vertices'][i].squeeze(),
camera=data['camera'][i].squeeze(),
image_name=image_name,
output_dir=output_dir,
renderer=renderer)
if i % 20 == 0:
print('Processed {:3d} / {:3d}'.format(i + 1, data['time'].size))
video_from_images(output_dir, os.path.join(output_dir, video_id + '.mp4'))
if __name__ == '__main__':
config_ = flags.FLAGS
config_(sys.argv)
main(config_)
|
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
"""Element Software volume clone"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_volume_clone
short_description: NetApp Element Software Create Volume Clone
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Create volume clones on Element OS
options:
name:
description:
- The name of the clone.
required: true
src_volume_id:
description:
- The id of the src volume to clone. id may be a numeric identifier or a volume name.
required: true
src_snapshot_id:
description:
- The id of the snapshot to clone. id may be a numeric identifier or a snapshot name.
account_id:
description:
- Account ID for the owner of this cloned volume. id may be a numeric identifier or an account name.
required: true
attributes:
description: A YAML dictionary of attributes that you would like to apply on this cloned volume.
size:
description:
- The size of the cloned volume in (size_unit).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
description:
- Access allowed for the volume.
- If unspecified, the access settings of the clone will be the same as the source.
- readOnly - Only read operations are allowed.
- readWrite - Reads and writes are allowed.
- locked - No reads or writes are allowed.
- replicationTarget - Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked.
'''
EXAMPLES = """
- name: Clone Volume
na_elementsw_volume_clone:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
name: CloneAnsibleVol
src_volume_id: 123
src_snapshot_id: 41
account_id: 3
size: 1
size_unit: gb
access: readWrite
attributes: {"virtual_network_id": 12345}
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_elementsw_module import NaElementSWModule
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class ElementOSVolumeClone(object):
"""
Contains methods to parse arguments,
derive details of Element Software objects
and send requests to Element OS via
the Solidfire SDK
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check paramenters and ensure SDK is installed
"""
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True),
src_volume_id=dict(required=True),
src_snapshot_id=dict(),
account_id=dict(required=True),
attributes=dict(type='dict', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(type='str',
default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.name = parameters['name']
self.src_volume_id = parameters['src_volume_id']
self.src_snapshot_id = parameters['src_snapshot_id']
self.account_id = parameters['account_id']
self.attributes = parameters['attributes']
self.size_unit = parameters['size_unit']
if parameters['size'] is not None:
self.size = parameters['size'] * \
self._size_unit_map[self.size_unit]
else:
self.size = None
self.access = parameters['access']
if HAS_SF_SDK is False:
self.module.fail_json(
msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
self.elementsw_helper = NaElementSWModule(self.sfe)
# add telemetry attributes
if self.attributes is not None:
self.attributes.update(self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone'))
else:
self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_volume_clone')
def get_account_id(self):
"""
Return account id if found
"""
try:
# Update and return self.account_id
self.account_id = self.elementsw_helper.account_exists(self.account_id)
return self.account_id
except Exception as err:
self.module.fail_json(msg="Error: account_id %s does not exist" % self.account_id, exception=to_native(err))
def get_snapshot_id(self):
"""
Return snapshot details if found
"""
src_snapshot = self.elementsw_helper.get_snapshot(self.src_snapshot_id, self.src_volume_id)
# Update and return self.src_snapshot_id
if src_snapshot is not None:
self.src_snapshot_id = src_snapshot.snapshot_id
# Return src_snapshot
return self.src_snapshot_id
return None
def get_src_volume_id(self):
"""
Return volume id if found
"""
src_vol_id = self.elementsw_helper.volume_exists(self.src_volume_id, self.account_id)
if src_vol_id is not None:
# Update and return self.volume_id
self.src_volume_id = src_vol_id
# Return src_volume_id
return self.src_volume_id
return None
def clone_volume(self):
"""Clone Volume from source"""
try:
self.sfe.clone_volume(volume_id=self.src_volume_id,
name=self.name,
new_account_id=self.account_id,
new_size=self.size,
access=self.access,
snapshot_id=self.src_snapshot_id,
attributes=self.attributes)
except Exception as err:
self.module.fail_json(msg="Error creating clone %s of size %s" % (self.name, self.size), exception=to_native(err))
def apply(self):
"""Perform pre-checks, call functions and exit"""
changed = False
result_message = ""
if self.get_account_id() is None:
self.module.fail_json(msg="Account id not found: %s" % (self.account_id))
# there is only one state. other operations
# are part of the volume module
# ensure that a volume with the clone name
# isn't already present
if self.elementsw_helper.volume_exists(self.name, self.account_id) is None:
# check for the source volume
if self.get_src_volume_id() is not None:
# check for a valid snapshot
if self.src_snapshot_id and not self.get_snapshot_id():
self.module.fail_json(msg="Snapshot id not found: %s" % (self.src_snapshot_id))
# change required
changed = True
else:
self.module.fail_json(msg="Volume id not found %s" % (self.src_volume_id))
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
else:
self.clone_volume()
result_message = "Volume cloned"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""Create object and call apply"""
volume_clone = ElementOSVolumeClone()
volume_clone.apply()
if __name__ == '__main__':
main()
|
|
import os
from netlib.http.http1 import read_response, read_request
from netlib import tcp, websockets, http, tutils
from netlib.http import status_codes
from netlib.tutils import treq
from netlib.exceptions import *
from .. import tservers
class WebSocketsEchoHandler(tcp.BaseHandler):
def __init__(self, connection, address, server):
super(WebSocketsEchoHandler, self).__init__(
connection, address, server
)
self.protocol = websockets.WebsocketsProtocol()
self.handshake_done = False
def handle(self):
while True:
if not self.handshake_done:
self.handshake()
else:
self.read_next_message()
def read_next_message(self):
frame = websockets.Frame.from_file(self.rfile)
self.on_message(frame.payload)
def send_message(self, message):
frame = websockets.Frame.default(message, from_client=False)
frame.to_file(self.wfile)
def handshake(self):
req = read_request(self.rfile)
key = self.protocol.check_client_handshake(req.headers)
preamble = 'HTTP/1.1 101 %s' % status_codes.RESPONSES.get(101)
self.wfile.write(preamble.encode() + b"\r\n")
headers = self.protocol.server_handshake_headers(key)
self.wfile.write(str(headers) + "\r\n")
self.wfile.flush()
self.handshake_done = True
def on_message(self, message):
if message is not None:
self.send_message(message)
class WebSocketsClient(tcp.TCPClient):
def __init__(self, address, source_address=None):
super(WebSocketsClient, self).__init__(address, source_address)
self.protocol = websockets.WebsocketsProtocol()
self.client_nonce = None
def connect(self):
super(WebSocketsClient, self).connect()
preamble = b'GET / HTTP/1.1'
self.wfile.write(preamble + b"\r\n")
headers = self.protocol.client_handshake_headers()
self.client_nonce = headers["sec-websocket-key"].encode("ascii")
self.wfile.write(bytes(headers) + b"\r\n")
self.wfile.flush()
resp = read_response(self.rfile, treq(method=b"GET"))
server_nonce = self.protocol.check_server_handshake(resp.headers)
if not server_nonce == self.protocol.create_server_nonce(self.client_nonce):
self.close()
def read_next_message(self):
return websockets.Frame.from_file(self.rfile).payload
def send_message(self, message):
frame = websockets.Frame.default(message, from_client=True)
frame.to_file(self.wfile)
class TestWebSockets(tservers.ServerTestBase):
handler = WebSocketsEchoHandler
def __init__(self):
self.protocol = websockets.WebsocketsProtocol()
def random_bytes(self, n=100):
return os.urandom(n)
def echo(self, msg):
client = WebSocketsClient(("127.0.0.1", self.port))
client.connect()
client.send_message(msg)
response = client.read_next_message()
assert response == msg
def test_simple_echo(self):
self.echo(b"hello I'm the client")
def test_frame_sizes(self):
# length can fit in the the 7 bit payload length
small_msg = self.random_bytes(100)
# 50kb, sligthly larger than can fit in a 7 bit int
medium_msg = self.random_bytes(50000)
# 150kb, slightly larger than can fit in a 16 bit int
large_msg = self.random_bytes(150000)
self.echo(small_msg)
self.echo(medium_msg)
self.echo(large_msg)
def test_default_builder(self):
"""
default builder should always generate valid frames
"""
msg = self.random_bytes()
client_frame = websockets.Frame.default(msg, from_client=True)
server_frame = websockets.Frame.default(msg, from_client=False)
def test_serialization_bijection(self):
"""
Ensure that various frame types can be serialized/deserialized back
and forth between to_bytes() and from_bytes()
"""
for is_client in [True, False]:
for num_bytes in [100, 50000, 150000]:
frame = websockets.Frame.default(
self.random_bytes(num_bytes), is_client
)
frame2 = websockets.Frame.from_bytes(
frame.to_bytes()
)
assert frame == frame2
bytes = b'\x81\x03cba'
assert websockets.Frame.from_bytes(bytes).to_bytes() == bytes
def test_check_server_handshake(self):
headers = self.protocol.server_handshake_headers("key")
assert self.protocol.check_server_handshake(headers)
headers["Upgrade"] = "not_websocket"
assert not self.protocol.check_server_handshake(headers)
def test_check_client_handshake(self):
headers = self.protocol.client_handshake_headers("key")
assert self.protocol.check_client_handshake(headers) == "key"
headers["Upgrade"] = "not_websocket"
assert not self.protocol.check_client_handshake(headers)
class BadHandshakeHandler(WebSocketsEchoHandler):
def handshake(self):
client_hs = read_request(self.rfile)
self.protocol.check_client_handshake(client_hs.headers)
preamble = 'HTTP/1.1 101 %s\r\n' % status_codes.RESPONSES.get(101)
self.wfile.write(preamble.encode())
headers = self.protocol.server_handshake_headers(b"malformed key")
self.wfile.write(bytes(headers) + b"\r\n")
self.wfile.flush()
self.handshake_done = True
class TestBadHandshake(tservers.ServerTestBase):
"""
Ensure that the client disconnects if the server handshake is malformed
"""
handler = BadHandshakeHandler
def test(self):
with tutils.raises(TcpDisconnect):
client = WebSocketsClient(("127.0.0.1", self.port))
client.connect()
client.send_message(b"hello")
class TestFrameHeader:
def test_roundtrip(self):
def round(*args, **kwargs):
f = websockets.FrameHeader(*args, **kwargs)
f2 = websockets.FrameHeader.from_file(tutils.treader(bytes(f)))
assert f == f2
round()
round(fin=1)
round(rsv1=1)
round(rsv2=1)
round(rsv3=1)
round(payload_length=1)
round(payload_length=100)
round(payload_length=1000)
round(payload_length=10000)
round(opcode=websockets.OPCODE.PING)
round(masking_key=b"test")
def test_human_readable(self):
f = websockets.FrameHeader(
masking_key=b"test",
fin=True,
payload_length=10
)
assert repr(f)
f = websockets.FrameHeader()
assert repr(f)
def test_funky(self):
f = websockets.FrameHeader(masking_key=b"test", mask=False)
raw = bytes(f)
f2 = websockets.FrameHeader.from_file(tutils.treader(raw))
assert not f2.mask
def test_violations(self):
tutils.raises("opcode", websockets.FrameHeader, opcode=17)
tutils.raises("masking key", websockets.FrameHeader, masking_key=b"x")
def test_automask(self):
f = websockets.FrameHeader(mask=True)
assert f.masking_key
f = websockets.FrameHeader(masking_key=b"foob")
assert f.mask
f = websockets.FrameHeader(masking_key=b"foob", mask=0)
assert not f.mask
assert f.masking_key
class TestFrame:
def test_roundtrip(self):
def round(*args, **kwargs):
f = websockets.Frame(*args, **kwargs)
raw = bytes(f)
f2 = websockets.Frame.from_file(tutils.treader(raw))
assert f == f2
round(b"test")
round(b"test", fin=1)
round(b"test", rsv1=1)
round(b"test", opcode=websockets.OPCODE.PING)
round(b"test", masking_key=b"test")
def test_human_readable(self):
f = websockets.Frame()
assert repr(f)
def test_masker():
tests = [
[b"a"],
[b"four"],
[b"fourf"],
[b"fourfive"],
[b"a", b"aasdfasdfa", b"asdf"],
[b"a" * 50, b"aasdfasdfa", b"asdf"],
]
for i in tests:
m = websockets.Masker(b"abcd")
data = b"".join([m(t) for t in i])
data2 = websockets.Masker(b"abcd")(data)
assert data2 == b"".join(i)
|
|
"""Module used to handle a API Server."""
import logging
import os
import sys
import warnings
from urllib.error import URLError
from urllib.request import urlopen
from flask import Flask, request, send_from_directory
from flask_socketio import SocketIO, join_room, leave_room
class APIServer:
"""Api server used to provide Kytos Controller routes."""
#: tuple: Default Flask HTTP methods.
DEFAULT_METHODS = ('GET',)
_NAPP_PREFIX = "/api/{napp.username}/{napp.name}/"
_CORE_PREFIX = "/api/kytos/core/"
def __init__(self, app_name, listen='0.0.0.0', port=8181):
"""Start a Flask+SocketIO server.
Args:
app_name(string): String representing a App Name
listen (string): host name used by api server instance
port (int): Port number used by api server instance
"""
dirname = os.path.dirname(os.path.abspath(__file__))
self.flask_dir = os.path.join(dirname, '../web-ui')
self.log = logging.getLogger('api_server')
self.listen = listen
self.port = port
self.app = Flask(app_name, root_path=self.flask_dir)
self.server = SocketIO(self.app, async_mode='threading')
self._enable_websocket_rooms()
# Disable trailing slash
self.app.url_map.strict_slashes = False
def _enable_websocket_rooms(self):
socket = self.server
socket.on_event('join', join_room)
socket.on_event('leave', leave_room)
def run(self):
"""Run the Flask API Server."""
try:
self.server.run(self.app, self.listen, self.port)
except OSError as exception:
msg = "Couldn't start API Server: {}".format(exception)
self.log.critical(msg)
sys.exit(msg)
def register_rest_endpoint(self, url, function, methods):
"""Deprecate in favor of @rest decorator."""
warnings.warn("From now on, use @rest decorator.", DeprecationWarning,
stacklevel=2)
if url.startswith('/'):
url = url[1:]
self._start_endpoint(f'/kytos/{url}', function, methods=methods)
def start_api(self):
"""Start this APIServer instance API.
Start /api/kytos/core/shutdown/ and status/ endpoints, web UI.
"""
self.register_core_endpoint('shutdown/', self.shutdown_api)
self.register_core_endpoint('status/', self.status_api)
self._register_web_ui()
def register_core_endpoint(self, rule, function):
"""Register an endpoint with the URL /api/kytos/core/<rule>.
Not used by NApps, but controller.
"""
self._start_endpoint(self._CORE_PREFIX + rule, function)
def _register_web_ui(self):
"""Register routes to the admin-ui homepage."""
self.app.add_url_rule('/', self.web_ui.__name__, self.web_ui)
self.app.add_url_rule('/index.html', self.web_ui.__name__, self.web_ui)
@staticmethod
def status_api():
"""Display kytos status using the route ``/kytos/status/``."""
return '{"response": "running"}', 201
def stop_api_server(self):
"""Send a shutdown request to stop Api Server."""
try:
url = f'http://127.0.0.1:{self.port}/api/kytos/core/shutdown'
urlopen(url)
except URLError:
pass
def shutdown_api(self):
"""Handle shutdown requests received by Api Server.
This method must be called by kytos using the method
stop_api_server, otherwise this request will be ignored.
"""
allowed_host = ['127.0.0.1:'+str(self.port),
'localhost:'+str(self.port)]
if request.host not in allowed_host:
return "", 403
self.server.stop()
return 'Server shutting down...', 200
def web_ui(self):
"""Serve the index.html page for the admin-ui."""
return send_from_directory(self.flask_dir, 'index.html')
# BEGIN decorator methods
@staticmethod
def decorate_as_endpoint(rule, **options):
"""Decorate methods as REST endpoints.
Example for URL ``/api/myusername/mynapp/sayhello/World``:
.. code-block:: python3
from flask.json import jsonify
from kytos.core.napps import rest
@rest('sayhello/<string:name>')
def say_hello(name):
return jsonify({"data": f"Hello, {name}!"})
``@rest`` parameters are the same as Flask's ``@app.route``. You can
also add ``methods=['POST']``, for example.
As we don't have the NApp instance now, we store the parameters in a
method attribute in order to add the route later, after we have both
APIServer and NApp instances.
"""
def store_route_params(function):
"""Store ``Flask`` ``@route`` parameters in a method attribute.
There can be many @route decorators in a single function.
"""
# To support any order: @classmethod, @rest or @rest, @classmethod
# class and static decorators return a descriptor with the function
# in __func__.
if isinstance(function, (classmethod, staticmethod)):
inner = function.__func__
else:
inner = function
# Add route parameters
if not hasattr(inner, 'route_params'):
inner.route_params = []
inner.route_params.append((rule, options))
# Return the same function, now with "route_params" attribute
return function
return store_route_params
def register_napp_endpoints(self, napp):
"""Add all NApp REST endpoints with @rest decorator.
URLs will be prefixed with ``/api/{username}/{napp_name}/``.
Args:
napp (Napp): Napp instance to register new endpoints.
"""
for function in self._get_decorated_functions(napp):
for rule, options in function.route_params:
absolute_rule = self.get_absolute_rule(rule, napp)
self._start_endpoint(absolute_rule, function, **options)
@staticmethod
def _get_decorated_functions(napp):
"""Return ``napp``'s methods having the @rest decorator."""
for name in dir(napp):
if not name.startswith('_'): # discarding private names
pub_attr = getattr(napp, name)
if callable(pub_attr) and hasattr(pub_attr, 'route_params'):
yield pub_attr
@classmethod
def get_absolute_rule(cls, rule, napp):
"""Prefix the rule, e.g. "flow" to "/api/user/napp/flow".
This code is used by kytos-utils when generating an OpenAPI skel.
"""
# Flask does require 2 slashes if specified, so we remove a starting
# slash if applicable.
relative_rule = rule[1:] if rule.startswith('/') else rule
return cls._NAPP_PREFIX.format(napp=napp) + relative_rule
# END decorator methods
def _start_endpoint(self, rule, function, **options):
"""Start ``function``'s endpoint.
Forward parameters to ``Flask.add_url_rule`` mimicking Flask
``@route`` decorator.
"""
endpoint = options.pop('endpoint', None)
self.app.add_url_rule(rule, endpoint, function, **options)
self.log.info('Started %s - %s', rule,
', '.join(options.get('methods', self.DEFAULT_METHODS)))
def remove_napp_endpoints(self, napp):
"""Remove all decorated endpoints.
Args:
napp (Napp): Napp instance to look for rest-decorated methods.
"""
prefix = self._NAPP_PREFIX.format(napp=napp)
indexes = []
for index, rule in enumerate(self.app.url_map.iter_rules()):
if rule.rule.startswith(prefix):
self.app.view_functions.pop(rule.endpoint)
indexes.append(index)
self.log.info('Stopped %s - %s', rule, ','.join(rule.methods))
for index in reversed(indexes):
# pylint: disable=protected-access
self.app.url_map._rules.pop(index)
# pylint: enable=protected-access
self.log.info(f'The Rest endpoints from %s were disabled.', prefix)
|
|
"""
Import map data.
"""
from eve_db.models import map as map_models
from importer_classes import SQLImporter, parse_int_bool, parse_char_notnull
class Importer_mapUniverse(SQLImporter):
model = map_models.MapUniverse
pks = (('id', 'universeID'),)
field_map = (('name', 'universeName'),
('x', 'x'),
('x_min', 'xMin'),
('x_max', 'xMax'),
('y', 'y'),
('y_min', 'yMin'),
('y_max', 'yMax'),
('z', 'z'),
('z_min', 'zMin'),
('z_max', 'zMax'),
('radius', 'radius'))
class Importer_mapRegions(SQLImporter):
DEPENDENCIES = ['chrFactions']
model = map_models.MapRegion
pks = (('id', 'regionID'),)
field_map = (('name', 'regionName'),
('x', 'x'),
('x_min', 'xMin'),
('x_max', 'xMax'),
('y', 'y'),
('y_min', 'yMin'),
('y_max', 'yMax'),
('z', 'z'),
('z_min', 'zMin'),
('z_max', 'zMax'),
('faction_id', 'factionID'),
('radius', 'radius'))
class Importer_mapRegionJumps(SQLImporter):
DEPENDENCIES = ['mapRegions']
model = map_models.MapRegionJump
pks = (('from_region', 'fromRegionID'), ('to_region', 'toRegionID'))
class Importer_mapConstellations(SQLImporter):
DEPENDENCIES = ['chrFactions', 'mapRegions']
model = map_models.MapConstellation
pks = (('id', 'constellationID'),)
field_map = (('name', 'constellationName'),
('x', 'x'),
('x_min', 'xMin'),
('x_max', 'xMax'),
('y', 'y'),
('y_min', 'yMin'),
('y_max', 'yMax'),
('z', 'z'),
('z_min', 'zMin'),
('z_max', 'zMax'),
('region_id', 'regionID'),
('faction_id', 'factionID'),
('radius', 'radius'))
class Importer_mapConstellationJumps(SQLImporter):
DEPENDENCIES = ['mapRegions', 'mapConstellations']
model = map_models.MapConstellationJump
pks = (('from_constellation', 'fromConstellationID'),
('to_constellation', 'toConstellationID'))
field_map = (('from_region_id', 'fromRegionID'),
('to_region_id', 'toRegionID'))
class Importer_mapSolarSystems(SQLImporter):
DEPENDENCIES = ['chrFactions', 'mapRegions', 'mapConstellations',
'invTypes']
model = map_models.MapSolarSystem
pks = (('id', 'solarSystemID'),)
field_map = (('name', 'solarSystemName'),
('x', 'x'),
('x_min', 'xMin'),
('x_max', 'xMax'),
('y', 'y'),
('y_min', 'yMin'),
('y_max', 'yMax'),
('z', 'z'),
('z_min', 'zMin'),
('z_max', 'zMax'),
('radius', 'radius'),
('luminosity', 'luminosity'),
('security_level', 'security'),
('security_class', 'securityClass', parse_char_notnull),
('is_border_system', 'border', parse_int_bool),
('is_fringe_system', 'fringe', parse_int_bool),
('is_corridor_system', 'corridor', parse_int_bool),
('is_hub_system', 'hub', parse_int_bool),
('is_international', 'international', parse_int_bool),
('has_interregional_link', 'regional', parse_int_bool),
('has_interconstellational_link', 'constellation', parse_int_bool),
('region_id', 'regionID'),
('faction_id', 'factionID'),
('constellation_id', 'constellationID'),
('sun_type_id', 'sunTypeID'))
class Importer_mapSolarSystemJumps(SQLImporter):
DEPENDENCIES = ['mapRegions', 'mapConstellations', 'mapSolarSystems']
model = map_models.MapSolarSystemJump
pks = (('from_solar_system', 'fromSolarSystemID'),
('to_solar_system', 'toSolarSystemID'))
field_map = (('from_region_id', 'fromRegionID'),
('to_region_id', 'toRegionID'),
('from_constellation_id', 'fromConstellationID'),
('to_constellation_id', 'toConstellationID'))
class Importer_mapJumps(SQLImporter):
DEPENDENCIES = ['mapDenormalize']
model = map_models.MapJump
pks = (('origin_gate', 'stargateID'),)
field_map = (('destination_gate_id', 'destinationID'),)
class Importer_mapDenormalize(SQLImporter):
DEPENDENCIES = ['invTypes', 'invGroups', 'mapSolarSystems',
'mapConstellations', 'mapRegions']
model = map_models.MapDenormalize
pks = (('id', 'itemID'),)
field_map = (('orbit_id', 'orbitID'),
('x', 'x'),
('y', 'y'),
('z', 'z'),
('radius', 'radius'),
('name', 'itemName'),
('security', 'security'),
('celestial_index', 'celestialIndex'),
('orbit_index', 'orbitIndex'),
('type_id', 'typeID'),
('group_id', 'groupID'),
('solar_system_id', 'solarSystemID'),
('constellation_id', 'constellationID'),
('region_id', 'regionID'))
class Importer_mapLandmarks(SQLImporter):
DEPENDENCIES = ['mapSolarSystems']
model = map_models.MapLandmark
pks = (('id', 'landmarkID'),)
field_map = (('name', 'landmarkName'),
('x', 'x'),
('y', 'y'),
('z', 'z'),
('solar_system_id', 'locationID'),
('icon_id', 'iconID'))
class Importer_mapCelestialStatistics(SQLImporter):
DEPENDENCIES = ['mapDenormalize']
model = map_models.MapCelestialStatistic
pks = (('celestial', 'celestialID'),)
field_map = (('temperature', 'temperature'),
('spectral_class', 'spectralClass'),
('luminosity', 'luminosity'),
('age', 'age'),
('life', 'life'),
('orbit_radius', 'orbitRadius'),
('eccentricity', 'eccentricity'),
('mass_dust', 'massDust'),
('mass_gas', 'massGas'),
('density', 'density'),
('surface_gravity', 'surfaceGravity'),
('escape_velocity', 'escapeVelocity'),
('orbit_period', 'orbitPeriod'),
('rotation_rate', 'rotationRate'),
('pressure', 'pressure'),
('radius', 'radius'),
('mass', 'mass'),
('is_locked', 'locked', parse_int_bool),
('is_fragmented', 'fragmented', parse_int_bool))
class Importer_mapLocationScenes(SQLImporter):
model = map_models.MapLocationScene
pks = (('id', 'locationID'),)
field_map = (('graphic', 'graphicID'),)
class Importer_mapLocationWormholeClasses(SQLImporter):
DEPENDENCIES = ['mapDenormalize']
model = map_models.MapLocationWormholeClass
pks = (('location_id', 'locationID'),)
field_map = (('wormhole_class', 'wormholeClassID'),)
class Importer_warCombatZones(SQLImporter):
DEPENDENCIES = ['chrFactions', 'mapSolarSystems']
model = map_models.WarCombatZone
pks = (('id', 'combatZoneID'),)
field_map = (('name', 'combatZoneName'),
('faction_id', 'factionID'),
('center_system_id', 'centerSystemID'),
('description', 'description'))
class Importer_warCombatZoneSystems(SQLImporter):
DEPENDENCIES = ['warCombatZones', 'mapSolarSystems']
model = map_models.WarCombatZoneSystem
pks = (('solar_system', 'solarSystemID'),)
field_map = (('combat_zone_id', 'combatZoneID'),)
|
|
from crowdcomputer import settings
from django.contrib.auth.models import User, Group
from django.db.models import Q
from django.http.response import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from general.models import Process, Application, Reward, Task, TaskActiviti, \
TaskInstance
from general.tasks import sendEmail
from general.utils import getResults, startTask, splitData, mergeData, \
splitObjects, joinObjects, filterData
from rest_framework import generics, status
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.generics import CreateAPIView
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from restapi.serializers import ProcessSerializer, HumanTaskSerializer, \
RewardSerializer
from restapi.utils import getorcreatedUserByEmail, notifyUser
from uuid import uuid4
import json
import logging
# from general.utils import startProcess, sendEmail
log = logging.getLogger(__name__)
@api_view(['GET'])
def api_root(request, format=None):
"""
The entry endpoint of our API.
"""
return Response({
# 'users': reverse('user-list', request=request),
# 'groups': reverse('group-list', request=request),
# 'process': reverse('process-list', request=request, format=format)
})
class ProcessCreate(CreateAPIView):
# model = Process
serializer_class = ProcessSerializer
def pre_save(self, obj):
user = self.request.user
if settings.DEBUG and user.is_anonymous():
user = User.objects.get(id=1)
obj.owner = user
token = self.request.META.get('HTTP_APP_ID')
if token:
obj.application = Application.objects.get(token=token)
else:
obj.application = Application.objects.get(name='crowdcomputer')
class HumanTaskCreate(CreateAPIView):
serializer_class = HumanTaskSerializer
def pre_save(self, obj):
# rew = Reward(quantity=self.request.DATA['reward_quantity'],type=self.request.DATA['reward_type'])
# rew.save()
# obj.reward=rew
obj.owner = self.request.user
obj.process = get_object_or_404(Process, pk=self.kwargs['pk'])
obj.uuid = uuid4()
# class TurkTaskCreate(CreateAPIView):
# serializer_class = TurkTaskSerializer
#
# def pre_save(self, obj):
# obj.user=self.request.user
# obj.process=get_object_or_404(Process,pk=self.kwargs['pk'])
# obj.uuid = uuid4()
# # reward has to be made in dollars.
class SplitTask(APIView):
def post(self, request, format=None):
data = eval(request.DATA.get('data', '[]'))
operation = request.DATA.get('operation', 'splitN')
n = eval(request.DATA.get('n', '1'))
m = eval(request.DATA.get('m', '0'))
# log.debug("pars: %s %s %s %s" % (n, m, operation, data))
log.debug("data %s",data)
log.debug("pars %s %s %s " %(operation,n,m))
res = splitData(data=data, operation=operation, n=n, m=m)
log.debug("result %s",res)
ret = {}
ret['result'] = res
return Response(ret, status.HTTP_200_OK)
class MergeTask(APIView):
def post(self, request, format=None):
data = eval(request.DATA.get('data', '[]'))
res = mergeData(data=data)
ret = {'result': res}
return Response(ret, status.HTTP_200_OK)
class SplitObjectTask(APIView):
def post(self, request, format=None):
data = eval(request.DATA.get('data', '[]'))
shared = request.DATA.get('shared', '[]')
fields = request.DATA.get('fields', '[]')
res = splitObjects(data, shared, fields)
ret = {'result': res}
return Response(ret, status.HTTP_200_OK)
class JoinObjectTask(APIView):
def post(self, request, format=None):
data = eval(request.DATA.get('data', '[]'))
field = request.DATA.get('field', '')
res = joinObjects(data, field)
ret = {}
ret['result'] = res
return Response(ret, status.HTTP_200_OK)
class FilterTask(APIView):
def post(self, request, format=None):
data = eval(request.DATA.get('data', '[]'))
conditions = request.DATA.get('conditions', '[]')
condition_operator = request.DATA.get('condition_operator', 'and')
res = filterData(data,conditions,condition_operator)
ret = {}
ret['result'] = res
return Response(ret, status.HTTP_200_OK)
class RewardCreate(CreateAPIView):
model = Reward
serializer_class = RewardSerializer
class Validate(APIView):
def get_object(self, pk,user):
try:
t = TaskInstance.objects.get(pk=pk)
if t.task.owner == user:
return t
else:
result={}
result['result']="task is not yours"
return Response(result, status.HTTP_401_UNAUTHORIZED)
except Task.DoesNotExist:
result={}
result['result']="Task does not exists"
return Response(result, status.HTTP_400_BAD_REQUEST)
def put(self,request,pk):
ti = self.get_object(pk,request.user)
if type(ti) is Response:
return ti;
validation = request.DATA.get('validation',"")
log.debug("validation %s",validation )
ti.parameters['validation']=validation
ti.save()
result={}
result['result']='ok'
ti.finish()
return Response(result, status.HTTP_200_OK)
class StartTask(APIView):
def get_object(self, pk,user):
try:
t = Task.objects.get(pk=pk)
if t.owner == user:
return t
else:
raise Http404
except Task.DoesNotExist:
log.debug("task does not exists")
raise Http404
# here we use put beacuse it seems a better design. POST would work as well.
def put(self, request, pk, format=None):
"""
:param request:
:param pk:
:param format:
:return: :raise:
"""
task = self.get_object(pk,request.user)
data = request.DATA.get('data', None)
name_receive = request.DATA.get('name', None)
task_type = task.parameters['type']
log.debug("type %s" % task_type)
# for marketplace this is fine
if task_type.lower() == "marketplace":
log.debug('marketplace')
ret = startTask(task, data)
elif task_type.lower() == "newsletter":
log.debug('newsletter')
newsletter = task.parameters['emails']
# if no data, then we create a instance per user
if data==None or len(data)==0:
log.debug("no data")
task.humantask.number_of_instances=len(newsletter)
log.debug("number of instances %", task.humantask.number_of_instances)
ret = startTask(task, data)
taskinstances = task.taskinstance_set.all()
i=0
# send email to each user
for email in newsletter:
executor = getorcreatedUserByEmail(email)
ti = taskinstances[i]
ti.executor=executor
ti.save()
notifyUser(request.user,executor,ti.uuid)
i=i+1
# if there is data, then create the tasks and send each instance to a user
else:
log.debug('data are here')
task.humantask.number_of_instances=1
ret = startTask(task, data)
taskinstances = task.taskinstance_set.all()
i=0
for email in newsletter:
executor = getorcreatedUserByEmail(email)
# if there are less instances then users, then some will not get anything
if len(taskinstances)>i:
ti = taskinstances[i]
ti.executor=executor
ti.save()
notifyUser(request.user,executor,ti.uuid)
i=i+1
task.save()
elif task_type.lower() == "contest":
log.debug("Contest, %s",data)
task.parameters['data']=data
log.debug("task parameters %s",task.parameters["data"])
task.save()
log.debug('contest')
ret = startTask(task, data)
elif task_type.lower() == "bid":
log.debug('bid')
ret = startTask(task, data)
else:
raise "no type specified"
activitiTask,created = TaskActiviti.objects.get_or_create(task=task,receive=name_receive)
result={}
task.process.status='PR'
task.process.save()
log.debug("process is %s",task.process.status)
if ret:
result['result']='ok'
return Response(result, status.HTTP_200_OK)
else:
result['result']='error'
return Response(result, status.HTTP_500_INTERNAL_SERVER_ERROR)
class TaskStatus(APIView):
def get_object(self, pk,user):
try:
t = Task.objects.get(pk=pk)
if t.owner == user:
return t
else:
raise Http404
except Task.DoesNotExist:
log.debug("task does not exists")
raise Http404
def get(self, request, pk, format=None):
task = self.get_object(pk,request.user)
result = {}
result['status']=task.status
return Response(result, status.HTTP_200_OK)
class TaskResults(APIView):
def get_object(self, pk):
try:
return Task.objects.get(pk=pk)
except Task.DoesNotExist:
raise Http404
# here we use put beacuse it seems a better design. POST would work as well.
def get(self, request, pk, format=None):
task = self.get_object(pk)
result={}
result['results']=getResults(task)
return Response(result, status.HTTP_200_OK)
# def post(self, request, pk, format=None):
# pass
# class JSONResponse(HttpResponse):
# """
# An HttpResponse that renders it's content into JSON.
# """
# def __init__(self, data, **kwargs):
# content = JSONRenderer().render(data)
# kwargs['content_type'] = 'application/json'
# super(JSONResponse, self).__init__(content, **kwargs)
#
# @api_view(['POST'])
# def StartTask(request, id):
#
# return JSONResponse(serializer.data, status=201)
# class UserList(generics.ListAPIView):
# """
# API endpoint that represents a list of users.
# """
# model = User
# serializer_class = UserSerializer
#
# class UserCreate(generics.CreateAPIView):
#
# model = User
# serializer_class = UserSerializer
#
# def post_save(self, obj, created=False):
# log.debug("post save")
# api_g, creted = Group.objects.get_or_create(name='api')
# api_g.user_set.add(obj)
# api_g.save()
# # obj.save()
#
# # this check if user exists, if so gives back that user, otherwhise a new one.
# #TODO: allow only the creat
#
# # def pre_save(self, obj):
# # log.debug('pre save')
# # api_g, creted = Group.objects.get_or_create(name='api')
# # api_g.user_set.add(obj)
# # api_g.save()
# # obj.save()
#
# # generics.CreateAPIView.pre_save(self, obj)
#
# # this is needed for checking the user exists or not.
# # adding the group is done in a bad way but it's the only method working.
# def create(self, request, *args, **kwargs):
# log.debug("create")
# serializer = self.get_serializer(data=request.DATA, files=request.FILES)
# try:
# if 'username' in request.DATA:
# log.debug("username %s" %request.DATA['username'])
# if ('email' in request.DATA) and (request.DATA['email']):
# # retrive users via email.
# users = User.objects.all().filter(email=request.DATA['email'])
# # take the first
# user=users[0]
# # in case there are many user with the same email (can it be?)
# if len(users)>1:
# # check if there's one not creted via api, so a user that is registered
# users.filter(~Q(groups__name='api'))
# # if it exsist then ok. the else case is not needed since user is alredy assigned before.
# if len(users)>=1:
# user=users[0]
# else:
# user = User.objects.get(username=request.DATA['username'])
# serializer = UserSerializer(user)
# headers = self.get_success_headers(serializer.data)
# in_api = user.groups.filter(name='api').exists()
# #
# # for g in user.groups.all():
# # log.debug("name %s",g.name)
# log.debug("in api %s ", in_api)
# return Response(serializer.data, status=status.HTTP_201_CREATED,
# headers=headers)
# except Exception, e:
# log.debug('exception %s',e)
# return generics.CreateAPIView.create(self, request, *args, **kwargs)
# # if serializer.is_valid():
# # self.pre_save(serializer.object)
# # self.object = serializer.save()
# # api_g= Group.objects.get(name='api')
# # api_g.user_set.add(self.object)
# # api_g.save()
# #
# # headers = self.get_success_headers(serializer.data)
# # return Response(serializer.data, status=status.HTTP_201_CREATED,
# # headers=headers)
# #
# # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# ## return generics.CreateAPIView.create(self, request, *args, **kwargs)
# # self.object.save()
#
# # self.object.group=api_g
# # self.group.save()
# # return createAPI
#
#
# class MyUserDetail(APIView):
# def get(self, request, format=None):
# user = User.objects.get(pk=request.user.pk)
# serializer = UserSerializer(user)
# return JSONResponse(serializer.data)
#
# class SendEmail(APIView):
# def get_user(self, pk):
# return get_object_or_404(User,pk=pk)
#
# def post(self, request, pk, format=None):
# email = self.get_user(pk).email;
# subject = request.POST['subject']
# text=request.POST['text']
# sender=request.POST['sender']
# app=get_object_or_404(general.models.Application,token=request.META.get('HTTP_APP_ID'))
# #sender="["+app.name+"] "+sender
# sendEmail(sender,subject,text,email)
# return HttpResponse(status=status.HTTP_200_OK)
#
#
# class UserDetail(generics.RetrieveAPIView):
# """
# API endpoint that represents a single user.
# """
# model = User
# serializer_class = UserSerializer
#
# #class GroupList(generics.ListCreateAPIView):
# # """
# # API endpoint that represents a list of groups.
# # """
# # model = Group
# # serializer_class = GroupSerializer
# #
# class GroupDetail(generics.RetrieveUpdateDestroyAPIView):
# """
# API endpoint that represents a single group.
# """
# model = Group
# serializer_class = GroupSerializer
#
# #
# # ----------- Other tutorial
# #
# #from django.views.decorators.csrf import csrf_exempt
# #from rest_framework.renderers import JSONRenderer
# #from rest_framework.parsers import JSONParser
# #
# class JSONResponse(HttpResponse):
# """
# An HttpResponse that renders it's content into JSON.
# """
# def __init__(self, data, **kwargs):
# content = JSONRenderer().render(data)
# kwargs['content_type'] = 'application/json'
# super(JSONResponse, self).__init__(content, **kwargs)
#
# #@csrf_exempt
# #@api_view(['GET', 'POST'])
# #def process_list(request):
# # """
# # List all code snippets, or create a new snippet.
# # """
# # if request.method == 'GET':
# # log.warning("no access control")
# # processes = Process.objects.all()
# # serializer = ProcessSerializer(processes)
# # return JSONResponse(serializer.data)
# #
# # elif request.method == 'POST':
# # log.warning("no access control")
# # data = JSONParser().parse(request)
# # serializer = ProcessSerializer(data=data)
# # if serializer.is_valid():
# # serializer.save()
# # return JSONResponse(serializer.data, status=201)
# # else:
# # return JSONResponse(serializer.errors, status=400)
# #
# #
# #@csrf_exempt
# #@api_view(['GET', 'PUT', 'DELETE'])
# #def process_detail(request, pk):
# # """
# # Retrieve, update or delete a code snippet.
# # """
# # try:
# # process = Process.objects.get(pk=pk)
# # except Process.DoesNotExist:
# # return HttpResponse(status=404)
# #
# # if request.method == 'GET':
# # serializer = ProcessSerializer(process)
# # return JSONResponse(serializer.data)
# #
# # elif request.method == 'PUT':
# # data = JSONParser().parse(request)
# # serializer = ProcessSerializer(process, data=data)
# # if serializer.is_valid():
# # serializer.save()
# # return JSONResponse(serializer.data)
# # else:
# # return JSONResponse(serializer.errors, status=400)
# #
# # elif request.method == 'DELETE':
# # process.delete()
# # return HttpResponse(status=204)
# #--- class based
#
#
# #this is to show the token to user
class TokenList(APIView):
def get(self, request, format=None):
token, created = Token.objects.get_or_create(user=self.request.user)
log.debug("token %s %s" %(created,token))
tt={}
tt['token']=token.key
return HttpResponse(json.dumps(tt), mimetype="application/json")
class TestView(APIView):
def get(self, request, format=None):
auth = request.META.get('HTTP_AUTHORIZATION')
app = request.META.get('HTTP_APP_ID')
# token, created = Token.objects.get_or_create(user=self.request.user)
log.debug("header %s %s"%(auth,app))
tt={}
tt['auth']=auth
tt['app']=app
return HttpResponse(json.dumps(tt), mimetype="application/json")
#
# #class ProcessList(APIView):
# #
# # def get(self, request, format=None):
# # log.warning("no access control")
# # processes = Process.objects.all()
# # serializer = ProcessSerializer(processes)
# # return JSONResponse(serializer.data)
# #
# # def post(self, request, format=None):
# # log.warning("no access control")
# # data = request.DATA
# # serializer = ProcessSerializer(data=data)
# # if serializer.is_valid():
# # serializer.save()
# # return JSONResponse(serializer.data, status=201)
# # else:
# # return JSONResponse(serializer.errors, status=400)
# #
# #
# class ProcessStartStop(APIView):
#
# def get_object(self, pk,user):
# try:
# process = Process.objects.get(pk=pk,user=user)
# return process
# except Process.DoesNotExist:
# return HttpResponse(status=404)
#
# def post(self, request, pk, format=None):
# process = self.get_object(pk,request.user)
# startProcess(process)
# return HttpResponse(status=200)
#
# # def put(self, request, pk, format=None):
# # process = self.get_object(pk)
# # data = request.DATA
# # serializer = ProcessSerializer(process, data=data)
# # if serializer.is_valid():
# # serializer.save()
# # return JSONResponse(serializer.data)
# # else:
# # return JSONResponse(serializer.errors, status=400)
#
# # def delete(self, request, pk, format=None):
# # process = self.get_object(pk)
# # process.delete()
# # return HttpResponse(status=204)
#
# #-- generics
#
# class TaskInstancesStatuses(APIView):
#
# def get(self, request, pk, format = None):
# task = get_object_or_404(Task,pk=pk)
# res ={}
# res['Total']=task.taskinstance_set.count()
# for status, description in Task.STATUS_CHOISE:
# log.debug("status %s, description %s" % (status,description))
# res[description]=task.taskinstance_set.filter(status=status).count()
# log.debug("res %s", res)
# return HttpResponse(json.dumps(res), mimetype="application/json")
#
#
# #print the list of all the processes of a user. allow only get
# class ProcessList(generics.ListCreateAPIView):
# model = Process
# serializer_class = ProcessSerializer
#
# def pre_save(self, obj):
# obj.user = self.request.user
# token = self.request.META.get('HTTP_APP_ID')
# log.debug('token for the app is %s',token)
# if token:
# obj.application=general.models.Application.objects.get(token=token)
#
# def get_queryset(self):
# qs=generics.ListCreateAPIView.get_queryset(self)
# user = self.request.user
# token = self.request.META.get('HTTP_APP_ID')
# log.debug("token %s" % token)
# if token is not None:
# application=general.models.Application.objects.get(token=token)
# return qs.filter(user=user).filter(application=application)
# else:
# return qs.filter(user=user)
#
#
#
#
# # print the process detail, so all the parameters. allow all the methods
# class ProcessDetail(generics.RetrieveUpdateDestroyAPIView):
# model = Process
# serializer_class = ProcessDetailSerializer
#
# def pre_save(self, obj):
# obj.user = self.request.user
#
#
#
# # all the detail of a task, allow all the methods
# class TaskDetail(generics.RetrieveUpdateDestroyAPIView):
# model = Task
# serializer_class = TaskDetailSerializer
#
# def pre_save(self, obj):
# obj.user = self.request.user
#
# # all the task of a process. allow only get
# class ProcessTaskList(generics.ListCreateAPIView):
# model = Task
# serializer_class = ProcessTaskSerializer
#
# def pre_save(self, obj):
# obj.user = self.request.user
#
# def get_queryset(self):
# pk=self.kwargs['pk']
# qs = Task.objects.all().filter(process=pk)
# user = self.request.user
# return qs.filter(user=user)
#
# class RewardCreate(generics.CreateAPIView):
# model = Reward
# serializer_class = RewardSerializer
#
# class RewardReadUpdate(generics.RetrieveUpdateAPIView):
# # permission are checked already by the permission class.
# model = Reward
# serializer_class = RewardSerializer
#
#
# # all the instances of a task. allow only get
# class TaskInstanceList(generics.ListAPIView):
# # here control on ownership is tricky. so don't implemented it yet
# model = TaskInstance
# serializer_class = TaskInstanceListSerializer
#
# def pre_save(self, obj):
# obj.user = self.request.user
#
# def get_queryset(self):
# qs=generics.ListAPIView.get_queryset(self)
# qs_filter = qs.filter(task = self.kwargs['pk']).filter(task__user=self.request.user)
# log.debug("qs_filter len %s",qs_filter.count())
# return qs_filter
#
# # show all the detail of an instance. allow all the methods.
# class TaskInstanceDetail(generics.RetrieveUpdateDestroyAPIView):
# model = TaskInstance
# serializer_class = TaskInstanceDetailSerializer
#
# # create Task
# class ProcessTaskCreate(generics.CreateAPIView):
# model=Task
# serializer_class = TaskDetailSerializer
#
# #
# #
# # def initialize_request(self, request, *args, **kargs):
# # init= generics.CreateAPIView.initialize_request(self, request, *args, **kargs)
# # init['date_deadline']=lambda: (date.today() + timedelta(days=7))
# # return init
#
# def pre_save(self, obj):
# obj.user=self.request.user
# obj.process=get_object_or_404(Process,pk=self.kwargs['pk'])
# obj.uuid = uuid4()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class FirewallRulesOperations(object):
"""FirewallRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-11-01"
self.config = config
def list_by_account(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the Data Lake Analytics firewall rules within the specified Data
Lake Analytics account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FirewallRule
:rtype:
~azure.mgmt.datalake.analytics.account.models.FirewallRulePaged[~azure.mgmt.datalake.analytics.account.models.FirewallRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FirewallRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FirewallRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, account_name, firewall_rule_name, start_ip_address, end_ip_address, custom_headers=None, raw=False, **operation_config):
"""Creates or updates the specified firewall rule. During update, the
firewall rule with the specified name will be replaced with this new
firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to create or
update.
:type firewall_rule_name: str
:param start_ip_address: The start IP address for the firewall rule.
This can be either ipv4 or ipv6. Start and End should be in the same
protocol.
:type start_ip_address: str
:param end_ip_address: The end IP address for the firewall rule. This
can be either ipv4 or ipv6. Start and End should be in the same
protocol.
:type end_ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FirewallRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.analytics.account.models.FirewallRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.CreateOrUpdateFirewallRuleParameters(start_ip_address=start_ip_address, end_ip_address=end_ip_address)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CreateOrUpdateFirewallRuleParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, account_name, firewall_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Data Lake Analytics firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to retrieve.
:type firewall_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FirewallRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.analytics.account.models.FirewallRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, firewall_rule_name, start_ip_address=None, end_ip_address=None, custom_headers=None, raw=False, **operation_config):
"""Updates the specified firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to update.
:type firewall_rule_name: str
:param start_ip_address: The start IP address for the firewall rule.
This can be either ipv4 or ipv6. Start and End should be in the same
protocol.
:type start_ip_address: str
:param end_ip_address: The end IP address for the firewall rule. This
can be either ipv4 or ipv6. Start and End should be in the same
protocol.
:type end_ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FirewallRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datalake.analytics.account.models.FirewallRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if start_ip_address is not None or end_ip_address is not None:
parameters = models.UpdateFirewallRuleParameters(start_ip_address=start_ip_address, end_ip_address=end_ip_address)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'UpdateFirewallRuleParameters')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, account_name, firewall_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified firewall rule from the specified Data Lake
Analytics account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to delete.
:type firewall_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/firewallRules/{firewallRuleName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
import numpy as np
import pandas as pd
from tensorflow.python.platform import googletest
class GenericFeatureStatisticsGeneratorTest(googletest.TestCase):
def setUp(self):
self.gfsg = GenericFeatureStatisticsGenerator()
def testProtoFromDataFrames(self):
data = [[1, 'hi'], [2, 'hello'], [3, 'hi']]
df = pd.DataFrame(data, columns=['testFeatureInt', 'testFeatureString'])
dataframes = [{'table': df, 'name': 'testDataset'}]
p = self.gfsg.ProtoFromDataFrames(dataframes)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(2, len(test_data.features))
if test_data.features[0].name == 'testFeatureInt':
numfeat = test_data.features[0]
stringfeat = test_data.features[1]
else:
numfeat = test_data.features[1]
stringfeat = test_data.features[0]
self.assertEqual('testFeatureInt', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
self.assertEqual('testFeatureString', stringfeat.name)
self.assertEqual(self.gfsg.fs_proto.STRING, stringfeat.type)
self.assertEqual(2, stringfeat.string_stats.unique)
def testNdarrayToEntry(self):
arr = np.array([1.0, 2.0, None, float('nan'), 3.0], dtype=float)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(2, entry['missing'])
arr = np.array(['a', 'b', float('nan'), 'c'], dtype=str)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(1, entry['missing'])
def testNdarrayToEntryTimeTypes(self):
arr = np.array(
[np.datetime64('2005-02-25'),
np.datetime64('2006-02-25')],
dtype=np.datetime64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([1109289600000000000, 1140825600000000000], entry['vals'])
arr = np.array(
[np.datetime64('2009-01-01') - np.datetime64('2008-01-01')],
dtype=np.timedelta64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([31622400000000000], entry['vals'])
def testDTypeToType(self):
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.int32)))
# Boolean and time types treated as int
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.bool)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.datetime64)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.timedelta64)))
self.assertEqual(self.gfsg.fs_proto.FLOAT,
self.gfsg.DtypeToType(np.dtype(np.float32)))
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.str)))
# Unsupported types treated as string for now
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.void)))
def testGetDatasetsProtoFromEntriesLists(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
hist = numfeat.num_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoSequenceExampleHistogram(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 2, 3],
'counts': [1, 2, 1],
'feat_lens': [1, 2, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
hist = p.datasets[0].features[
0].num_stats.common_stats.feature_list_length_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1.8, buckets[9].low_value)
self.assertEqual(2, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoWithWhitelist(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
entries['ignoreFeature'] = {
'vals': [5, 6],
'counts': [1, 1],
'missing': 1,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets, features=['testFeature'])
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(1, numfeat.num_stats.min)
def testGetDatasetsProtoWithMaxHistigramLevelsCount(self):
# Selected entries' lengths make it easy to compute average length
data = [['hi'], ['good'], ['hi'], ['hi'], ['a'], ['a']]
df = pd.DataFrame(data, columns=['testFeatureString'])
dataframes = [{'table': df, 'name': 'testDataset'}]
# Getting proto from ProtoFromDataFrames instead of GetDatasetsProto
# directly to avoid any hand written values ex: size of dataset.
p = self.gfsg.ProtoFromDataFrames(dataframes,
histogram_categorical_levels_count=2)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(6, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeatureString', numfeat.name)
top_values = numfeat.string_stats.top_values
self.assertEqual(3, top_values[0].frequency)
self.assertEqual('hi', top_values[0].value)
self.assertEqual(3, numfeat.string_stats.unique)
self.assertEqual(2, numfeat.string_stats.avg_length)
rank_hist = numfeat.string_stats.rank_histogram
buckets = rank_hist.buckets
self.assertEqual(2, len(buckets))
self.assertEqual('hi', buckets[0].label)
self.assertEqual(3, buckets[0].sample_count)
self.assertEqual('a', buckets[1].label)
self.assertEqual(2, buckets[1].sample_count)
if __name__ == '__main__':
googletest.main()
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import join, abspath, dirname, exists
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from shutil import move
from workspace_tools.paths import *
from workspace_tools.utils import mkdir, cmd
from workspace_tools.export import export, setup_user_prj
USR_PRJ_NAME = "usr_prj"
USER_PRJ = join(EXPORT_WORKSPACE, USR_PRJ_NAME)
USER_SRC = join(USER_PRJ, "src")
def setup_test_user_prj():
if exists(USER_PRJ):
print 'Test user project already generated...'
return
setup_user_prj(USER_PRJ, join(TEST_DIR, "rtos", "mbed", "basic"), [join(LIB_DIR, "rtos")])
# FAKE BUILD URL
open(join(USER_SRC, "mbed.bld"), 'w').write("http://mbed.org/users/mbed_official/code/mbed/builds/976df7c37ad5\n")
def fake_build_url_resolver(url):
# FAKE BUILD URL: Ignore the URL, always return the path to the mbed library
return {'path':MBED_LIBRARIES, 'name':'mbed'}
def test_export(toolchain, target, expected_error=None):
if toolchain is None and target is None:
base_dir = join(EXPORT_TMP, "zip")
else:
base_dir = join(EXPORT_TMP, toolchain, target)
temp_dir = join(base_dir, "temp")
mkdir(temp_dir)
zip_path, report = export(USER_PRJ, USR_PRJ_NAME, toolchain, target, base_dir, temp_dir, False, None, fake_build_url_resolver)
if report['success']:
move(zip_path, join(EXPORT_DIR, "export_%s_%s.zip" % (toolchain, target)))
print "[OK]"
else:
if expected_error is None:
print '[ERRROR] %s' % report['errormsg']
else:
if (zip_path is None) and (expected_error in report['errormsg']):
print '[OK]'
else:
print '[ERROR]'
print ' zip:', zip_path
print ' msg:', report['errormsg']
if __name__ == '__main__':
setup_test_user_prj()
for toolchain, target in [
('zip', 'LPC1768'),
('emblocks', 'LPC1768'),
('emblocks', 'LPC1549'),
('emblocks', 'LPC1114'),
('emblocks', 'LPC11U35_401'),
('emblocks', 'LPC11U35_501'),
('emblocks', 'LPCCAPPUCCINO'),
('emblocks', 'LPC2368'),
('emblocks', 'STM32F407'),
('emblocks', 'DISCO_F100RB'),
('emblocks', 'DISCO_F051R8'),
('emblocks', 'DISCO_F407VG'),
('emblocks', 'DISCO_F303VC'),
('emblocks', 'NRF51822'),
('emblocks', 'NUCLEO_F401RE'),
('emblocks', 'NUCLEO_F411RE'),
('emblocks', 'MTS_MDOT_F405RG'),
('emblocks', 'MTS_MDOT_F411RE'),
('coide', 'KL05Z'),
('coide', 'KL25Z'),
('coide', 'LPC1768'),
('coide', 'ARCH_PRO'),
('coide', 'DISCO_F407VG'),
('coide', 'NUCLEO_F401RE'),
('coide', 'NUCLEO_F411RE'),
('coide', 'DISCO_F429ZI'),
('coide', 'NUCLEO_F334R8'),
('coide', 'MTS_MDOT_F405RG'),
('coide', 'MTS_MDOT_F411RE'),
('uvision', 'LPC1768'),
('uvision', 'LPC11U24'),
('uvision', 'KL25Z'),
('uvision', 'LPC1347'),
('uvision', 'LPC1114'),
('uvision', 'LPC4088'),
('uvision', 'LPC4088_DM'),
('uvision', 'LPC4337'),
('uvision', 'HRM1017'),
('uvision', 'NUCLEO_F030R8'),
('uvision', 'NUCLEO_F070RB'),
('uvision', 'NUCLEO_F072RB'),
('uvision', 'NUCLEO_F091RC'),
('uvision', 'NUCLEO_F103RB'),
('uvision', 'NUCLEO_F302R8'),
('uvision', 'NUCLEO_F303RE'),
('uvision', 'NUCLEO_F334R8'),
('uvision', 'NUCLEO_F401RE'),
('uvision', 'NUCLEO_F411RE'),
('uvision', 'NUCLEO_L053R8'),
('uvision', 'NUCLEO_L152RE'),
('uvision', 'MTS_MDOT_F405RG'),
('lpcxpresso', 'LPC1768'),
('lpcxpresso', 'LPC4088'),
('lpcxpresso', 'LPC4088_DM'),
('lpcxpresso', 'LPC1114'),
('lpcxpresso', 'LPC11U35_401'),
('lpcxpresso', 'LPC11U35_501'),
('lpcxpresso', 'LPCCAPPUCCINO'),
('lpcxpresso', 'LPC1549'),
('lpcxpresso', 'LPC11U68'),
# Linux path: /home/emimon01/bin/gcc-cs/bin/
# Windows path: "C:/Program Files (x86)/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin/"
('codesourcery', 'LPC1768'),
# Linux path: /home/emimon01/bin/gcc-arm/bin/
# Windows path: C:/arm-none-eabi-gcc-4_7/bin/
('gcc_arm', 'LPC1768'),
('gcc_arm', 'LPC4088_DM'),
('gcc_arm', 'LPC1549'),
('gcc_arm', 'LPC1114'),
('gcc_arm', 'LPC11U35_401'),
('gcc_arm', 'LPC11U35_501'),
('gcc_arm', 'LPCCAPPUCCINO'),
('gcc_arm', 'LPC2368'),
('gcc_arm', 'STM32F407'),
('gcc_arm', 'DISCO_F100RB'),
('gcc_arm', 'DISCO_F051R8'),
('gcc_arm', 'DISCO_F407VG'),
('gcc_arm', 'DISCO_F303VC'),
('gcc_arm', 'NRF51822'),
('gcc_arm', 'HRM1017'),
('gcc_arm', 'NUCLEO_F401RE'),
('gcc_arm', 'NUCLEO_F411RE'),
('gcc_arm', 'DISCO_F429ZI'),
('gcc_arm', 'NUCLEO_F334R8'),
('gcc_arm', 'MTS_MDOT_F405RG'),
('gcc_arm', 'MTS_MDOT_F411RE'),
('ds5_5', 'LPC1768'), ('ds5_5', 'LPC11U24'),
('iar', 'LPC1768'),
('iar', 'LPC4088_DM'),
('iar', 'LPC1347'),
('iar', 'NUCLEO_F030R8'),
('iar', 'NUCLEO_F070RB'),
('iar', 'NUCLEO_F072RB'),
('iar', 'NUCLEO_F091RC'),
('iar', 'NUCLEO_F302R8'),
('iar', 'NUCLEO_F303RE'),
('iar', 'NUCLEO_F334R8'),
('iar', 'NUCLEO_F401RE'),
('iar', 'NUCLEO_F411RE'),
('iar', 'NUCLEO_L053R8'),
('iar', 'NUCLEO_L152RE'),
('iar', 'STM32F407'),
('iar', 'MTS_MDOT_F405RG'),
('iar', 'MTS_MDOT_F411RE'),
(None, None),
]:
print '\n=== Exporting to "%s::%s" ===' % (toolchain, target)
test_export(toolchain, target)
print "\n=== Test error messages ==="
test_export('lpcxpresso', 'LPC11U24', expected_error='lpcxpresso')
|
|
from __future__ import absolute_import
import itertools
import time
from sentry.similarity.backends.abstract import AbstractIndexBackend
from sentry.utils.iterators import chunked
from sentry.utils.redis import load_script
index = load_script('similarity/index.lua')
def band(n, value):
assert len(value) % n == 0
return list(chunked(value, len(value) / n))
def flatten(value):
return list(itertools.chain.from_iterable(value))
class RedisScriptMinHashIndexBackend(AbstractIndexBackend):
def __init__(self, cluster, namespace, signature_builder,
bands, interval, retention, candidate_set_limit):
self.cluster = cluster
self.namespace = namespace
self.signature_builder = signature_builder
self.bands = bands
self.interval = interval
self.retention = retention
self.candidate_set_limit = candidate_set_limit
def _build_signature_arguments(self, features):
if not features:
return [0] * self.bands
arguments = []
for bucket in band(self.bands, self.signature_builder(features)):
arguments.extend([1, ','.join(map('{}'.format, bucket)), 1])
return arguments
def __index(self, scope, args):
# scope must be passed into the script call as a key to allow the
# cluster client to determine what cluster the script should be
# executed on. The script itself will use the scope as the hashtag for
# all redis operations.
return index(self.cluster, [scope], args)
def _as_search_result(self, results):
score_replacements = {
-1.0: None, # both items don't have the feature (no comparison)
-2.0: 0, # one item doesn't have the feature (totally dissimilar)
}
def decode_search_result(result):
key, scores = result
return (
key,
map(
lambda score: score_replacements.get(score, score),
map(float, scores),
)
)
def get_comparison_key(result):
key, scores = result
scores = filter(
lambda score: score is not None,
scores,
)
return (
sum(scores) / len(scores) * -1, # average score, descending
len(scores) * -1, # number of indexes with scores, descending
key, # lexicographical sort on key, ascending
)
return sorted(
map(decode_search_result, results),
key=get_comparison_key,
)
def classify(self, scope, items, limit=None, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'CLASSIFY',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
limit if limit is not None else -1,
]
for idx, threshold, features in items:
arguments.extend([idx, threshold])
arguments.extend(self._build_signature_arguments(features))
return self._as_search_result(self.__index(scope, arguments))
def compare(self, scope, key, items, limit=None, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'COMPARE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
limit if limit is not None else -1,
key,
]
for idx, threshold in items:
arguments.extend([idx, threshold])
return self._as_search_result(self.__index(scope, arguments))
def record(self, scope, key, items, timestamp=None):
if not items:
return # nothing to do
if timestamp is None:
timestamp = int(time.time())
arguments = [
'RECORD',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
key,
]
for idx, features in items:
arguments.append(idx)
arguments.extend(self._build_signature_arguments(features))
return self.__index(scope, arguments)
def merge(self, scope, destination, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'MERGE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
destination,
]
for idx, source in items:
arguments.extend([idx, source])
return self.__index(scope, arguments)
def delete(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'DELETE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
]
for idx, key in items:
arguments.extend([idx, key])
return self.__index(scope, arguments)
def scan(self, scope, indices, batch=1000, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'SCAN',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
]
cursors = {idx: 0 for idx in indices}
while cursors:
requests = []
for idx, cursor in cursors.items():
requests.append([idx, cursor, batch])
responses = self.__index(scope, arguments + flatten(requests))
for (idx, _, _), (cursor, chunk) in zip(requests, responses):
cursor = int(cursor)
if cursor == 0:
del cursors[idx]
else:
cursors[idx] = cursor
yield idx, chunk
def flush(self, scope, indices, batch=1000, timestamp=None):
for index, chunk in self.scan(scope, indices, batch, timestamp):
if chunk:
self.cluster.delete(*chunk)
def export(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'EXPORT',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
]
for idx, key in items:
arguments.extend([idx, key])
return self.__index(scope, arguments)
def import_(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'IMPORT',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
self.candidate_set_limit,
scope,
]
for idx, key, data in items:
arguments.extend([idx, key, data])
return self.__index(scope, arguments)
|
|
from sympy.core.add import Add
from sympy.core.numbers import Rational, Float
from sympy.core.basic import C, sympify, cacheit
from sympy.core.singleton import S
from sympy.core.function import Function, ArgumentIndexError
from miscellaneous import sqrt
###############################################################################
########################## TRIGONOMETRIC FUNCTIONS ############################
###############################################################################
class TrigonometricFunction(Function):
"""Base class for trigonometric functions. """
def _peeloff_pi(arg):
"""
Split ARG into two parts, a "rest" and a multiple of pi/2.
This assumes ARG to be an Add.
The multiple of pi returned in the second position is always a Rational.
Examples:
>>> from sympy.functions.elementary.trigonometric import _peeloff_pi as peel
>>> from sympy import pi
>>> from sympy.abc import x, y
>>> peel(x + pi/2)
(x, pi/2)
>>> peel(x + 2*pi/3 + pi*y)
(x + pi*y + pi/6, pi/2)
"""
for a in Add.make_args(arg):
if a is S.Pi:
K = S.One
break
elif a.is_Mul:
K, p = a.as_two_terms()
if p is S.Pi and K.is_Rational:
break
else:
return arg, S.Zero
m1 = (K % S.Half) * S.Pi
m2 = K*S.Pi - m1
return arg - m2, m2
def _pi_coeff(arg, cycles=1):
"""
When arg is a Number times pi (e.g. 3*pi/2) then return the Number
normalized to be in the range [0, 2], else None.
When an even multiple of pi is encountered, if it is multiplying
something with known parity then the multiple is returned as 0 otherwise
as 2.
Examples:
>>> from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
>>> from sympy import pi
>>> from sympy.abc import x, y
>>> coeff(3*x*pi)
3*x
>>> coeff(11*pi/7)
11/7
>>> coeff(-11*pi/7)
3/7
>>> coeff(4*pi)
0
>>> coeff(5*pi)
1
>>> coeff(5.0*pi)
1
>>> coeff(5.5*pi)
3/2
>>> coeff(2 + pi)
"""
arg = sympify(arg)
if arg is S.Pi:
return S.One
elif not arg:
return S.Zero
elif arg.is_Mul:
cx = arg.coeff(S.Pi)
if cx:
c, x = cx.as_coeff_Mul() # pi is not included as coeff
if c.is_Float:
# recast exact binary fractions to Rationals
m = int(c*2)
if Float(float(m)/2) == c:
c = Rational(m, 2)
if x is not S.One or not (c.is_Rational and c.q != 1):
if x.is_integer:
c2 = c % 2
if c2 == 1:
return x
elif not c2:
if x.is_even is not None: # known parity
return S.Zero
return 2*x
else:
return c2*x
return cx
else:
return Rational(c.p % (2*c.q), c.q)
class sin(TrigonometricFunction):
"""
Usage
=====
sin(x) -> Returns the sine of x (measured in radians)
Notes
=====
sin(x) will evaluate automatically in the case x
is a multiple of pi, pi/2, pi/3, pi/4 and pi/6.
Examples
========
>>> from sympy import sin, pi
>>> from sympy.abc import x
>>> sin(x**2).diff(x)
2*x*cos(x**2)
>>> sin(1).diff(x)
0
>>> sin(pi)
0
>>> sin(pi/2)
1
>>> sin(pi/6)
1/2
See also
========
L{cos}, L{tan}
External links
--------------
U{Definitions in trigonometry<http://planetmath.org/encyclopedia/DefinitionsInTrigonometry.html>}
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return cos(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
return asin
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.Zero
elif arg is S.Infinity:
return
if arg.could_extract_minus_sign():
return -cls(-arg)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * C.sinh(i_coeff)
pi_coeff = _pi_coeff(arg)
if pi_coeff is not None:
if pi_coeff.is_integer:
return S.Zero
if not pi_coeff.is_Rational:
narg = pi_coeff*S.Pi
if narg != arg:
return cls(narg)
return None
cst_table_some = {
2 : S.One,
3 : S.Half*sqrt(3),
4 : S.Half*sqrt(2),
6 : S.Half,
}
cst_table_more = {
(1, 5) : sqrt((5 - sqrt(5)) / 8),
(2, 5) : sqrt((5 + sqrt(5)) / 8)
}
p = pi_coeff.p
q = pi_coeff.q
Q, P = p // q, p % q
try:
result = cst_table_some[q]
except KeyError:
if abs(P) > q // 2:
P = q - P
try:
result = cst_table_more[(P, q)]
except KeyError:
if P != p:
result = cls(C.Rational(P, q)*S.Pi)
else:
return None
if Q % 2 == 1:
return -result
else:
return result
if arg.is_Add:
x, m = _peeloff_pi(arg)
if m:
return sin(m)*cos(x)+cos(m)*sin(x)
if arg.func is asin:
return arg.args[0]
if arg.func is atan:
x = arg.args[0]
return x / sqrt(1 + x**2)
if arg.func is acos:
x = arg.args[0]
return sqrt(1 - x**2)
if arg.func is acot:
x = arg.args[0];
return 1 / (sqrt(1 + 1 / x**2) * x)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return -p * x**2 / (n*(n-1))
else:
return (-1)**(n//2) * x**(n)/C.factorial(n)
def _eval_rewrite_as_exp(self, arg):
exp, I = C.exp, S.ImaginaryUnit
return (exp(arg*I) - exp(-arg*I)) / (2*I)
def _eval_rewrite_as_cos(self, arg):
return -cos(arg + S.Pi/2)
def _eval_rewrite_as_tan(self, arg):
tan_half = tan(S.Half*arg)
return 2*tan_half/(1 + tan_half**2)
def _eval_rewrite_as_cot(self, arg):
cot_half = cot(S.Half*arg)
return 2*cot_half/(1 + cot_half**2)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (sin(re)*C.cosh(im), cos(re)*C.sinh(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_mul()
if not (coeff is S.One) and coeff.is_Integer and terms:
x = arg._new_rawargs(*terms)
y = (coeff-1)*x
if x is not None:
return (sin(x)*cos(y) + sin(y)*cos(x)).expand(trig=True)
return sin(arg)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_bounded(self):
arg = self.args[0]
if arg.is_real:
return True
def _sage_(self):
import sage.all as sage
return sage.sin(self.args[0]._sage_())
class cos(TrigonometricFunction):
"""
Usage
=====
cos(x) -> Returns the cosine of x (measured in radians)
Notes
=====
cos(x) will evaluate automatically in the case x
is a multiple of pi, pi/2, pi/3, pi/4 and pi/6.
Examples
========
>>> from sympy import cos, pi
>>> from sympy.abc import x
>>> cos(x**2).diff(x)
-2*x*sin(x**2)
>>> cos(1).diff(x)
0
>>> cos(pi)
-1
>>> cos(pi/2)
0
>>> cos(2*pi/3)
-1/2
See also
========
L{sin}, L{tan}
External links
--------------
U{Definitions in trigonometry<http://planetmath.org/encyclopedia/DefinitionsInTrigonometry.html>}
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return -sin(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
return acos
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.One
elif arg is S.Infinity:
return
if arg.could_extract_minus_sign():
return cls(-arg)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return C.cosh(i_coeff)
pi_coeff = _pi_coeff(arg)
if pi_coeff is not None:
if not pi_coeff.is_Rational:
if pi_coeff.is_integer:
return (S.NegativeOne)**pi_coeff
narg = pi_coeff*S.Pi
if narg != arg:
return cls(narg)
return None
cst_table_some = {
1 : S.One,
2 : S.Zero,
3 : S.Half,
4 : S.Half*sqrt(2),
6 : S.Half*sqrt(3),
}
cst_table_more = {
(1, 5) : (sqrt(5) + 1)/4,
(2, 5) : (sqrt(5) - 1)/4
}
p = pi_coeff.p
q = pi_coeff.q
Q, P = 2*p // q, p % q
try:
result = cst_table_some[q]
except KeyError:
if abs(P) > q // 2:
P = q - P
try:
result = cst_table_more[(P, q)]
except KeyError:
if P != p:
result = cls(C.Rational(P, q)*S.Pi)
else:
return None
if Q % 4 in (1, 2):
return -result
else:
return result
if arg.is_Add:
x, m = _peeloff_pi(arg)
if m:
return cos(m)*cos(x)-sin(m)*sin(x)
if arg.func is acos:
return arg.args[0]
if arg.func is atan:
x = arg.args[0]
return 1 / sqrt(1 + x**2)
if arg.func is asin:
x = arg.args[0]
return sqrt(1 - x ** 2)
if arg.func is acot:
x = arg.args[0]
return 1 / sqrt(1 + 1 / x**2)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return -p * x**2 / (n*(n-1))
else:
return (-1)**(n//2)*x**(n)/C.factorial(n)
def _eval_rewrite_as_exp(self, arg):
exp, I = C.exp, S.ImaginaryUnit
return (exp(arg*I) + exp(-arg*I)) / 2
def _eval_rewrite_as_sin(self, arg):
return sin(arg + S.Pi/2)
def _eval_rewrite_as_tan(self, arg):
tan_half = tan(S.Half*arg)**2
return (1-tan_half)/(1+tan_half)
def _eval_rewrite_as_cot(self, arg):
cot_half = cot(S.Half*arg)**2
return (cot_half-1)/(cot_half+1)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (cos(re)*C.cosh(im), -sin(re)*C.sinh(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand()
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
return (cos(x)*cos(y) - sin(y)*sin(x)).expand(trig=True)
else:
coeff, terms = arg.as_coeff_mul()
if not (coeff is S.One) and coeff.is_Integer and terms:
x = arg._new_rawargs(*terms)
return C.chebyshevt(coeff, cos(x))
return cos(arg)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return S.One
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_bounded(self):
arg = self.args[0]
if arg.is_real:
return True
def _sage_(self):
import sage.all as sage
return sage.cos(self.args[0]._sage_())
class tan(TrigonometricFunction):
"""
Usage
=====
tan(x) -> Returns the tangent of x (measured in radians)
Notes
=====
tan(x) will evaluate automatically in the case x is a
multiple of pi.
Examples
========
>>> from sympy import tan
>>> from sympy.abc import x
>>> tan(x**2).diff(x)
2*x*(tan(x**2)**2 + 1)
>>> tan(1).diff(x)
0
See also
========
L{sin}, L{tan}
External links
--------------
U{Definitions in trigonometry<http://planetmath.org/encyclopedia/DefinitionsInTrigonometry.html>}
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex==1:
return S.One + self**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
return atan
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.Zero
if arg.could_extract_minus_sign():
return -cls(-arg)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * C.tanh(i_coeff)
pi_coeff = _pi_coeff(arg, 2)
if pi_coeff is not None:
if pi_coeff.is_integer:
return S.Zero
if not pi_coeff.is_Rational:
narg = pi_coeff*S.Pi
if narg != arg:
return cls(narg)
return None
cst_table = {
2 : S.ComplexInfinity,
3 : sqrt(3),
4 : S.One,
6 : 1 / sqrt(3),
}
try:
result = cst_table[pi_coeff.q]
if (2*pi_coeff.p // pi_coeff.q) % 4 in (1, 3):
return -result
else:
return result
except KeyError:
if pi_coeff.p > pi_coeff.q:
p, q = pi_coeff.p % pi_coeff.q, pi_coeff.q
if 2 * p > q:
return -cls(Rational(q - p, q)*S.Pi)
return cls(Rational(p, q)*S.Pi)
if arg.is_Add:
x, m = _peeloff_pi(arg)
if m:
if (m*2/S.Pi) % 2 == 0:
return tan(x)
else:
return -cot(x)
if arg.func is atan:
return arg.args[0]
if arg.func is asin:
x = arg.args[0]
return x / sqrt(1 - x**2)
if arg.func is acos:
x = arg.args[0]
return sqrt(1 - x**2) / x
if arg.func is acot:
x = arg.args[0]
return 1 / x
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
a, b = ((n-1)//2), 2**(n+1)
B = C.bernoulli(n+1)
F = C.factorial(n+1)
return (-1)**a * b*(b-1) * B/F * x**n
def _eval_nseries(self, x, n, logx):
i = self.args[0].limit(x, 0)*2/S.Pi
if i and i.is_Integer:
return self.rewrite(cos)._eval_nseries(x, n=n, logx=logx)
return Function._eval_nseries(self, x, n=n, logx=logx)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = cos(re)**2 + C.sinh(im)**2
return (sin(re)*cos(re)/denom, C.sinh(im)*C.cosh(im)/denom)
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
return self
def _eval_rewrite_as_exp(self, arg):
exp, I = C.exp, S.ImaginaryUnit
neg_exp, pos_exp = exp(-arg*I), exp(arg*I)
return I*(neg_exp-pos_exp)/(neg_exp+pos_exp)
def _eval_rewrite_as_sin(self, x):
return 2*sin(x)**2/sin(2*x)
def _eval_rewrite_as_cos(self, x):
return -cos(x + S.Pi/2)/cos(x)
def _eval_rewrite_as_cot(self, arg):
return 1/cot(arg)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_bounded(self):
arg = self.args[0]
if arg.is_imaginary:
return True
def _eval_subs(self, old, new):
if self == old:
return new
arg = self.args[0]
argnew = arg.subs(old, new)
if arg != argnew and (argnew/(S.Pi/2)).is_odd:
return S.NaN
return tan(argnew)
def _sage_(self):
import sage.all as sage
return sage.tan(self.args[0]._sage_())
class cot(TrigonometricFunction):
"""
Usage
=====
cot(x) -> Returns the cotangent of x (measured in radians)
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return S.NegativeOne - self**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
return acot
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
if arg is S.Zero:
return S.ComplexInfinity
if arg.could_extract_minus_sign():
return -cls(-arg)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return -S.ImaginaryUnit * C.coth(i_coeff)
pi_coeff = _pi_coeff(arg, 2)
if pi_coeff is not None:
if pi_coeff.is_integer:
return S.ComplexInfinity
if not pi_coeff.is_Rational:
narg = pi_coeff*S.Pi
if narg != arg:
return cls(narg)
return None
cst_table = {
2 : S.Zero,
3 : 1 / sqrt(3),
4 : S.One,
6 : sqrt(3)
}
try:
result = cst_table[pi_coeff.q]
if (2*pi_coeff.p // pi_coeff.q) % 4 in (1, 3):
return -result
else:
return result
except KeyError:
if pi_coeff.p > pi_coeff.q:
p, q = pi_coeff.p % pi_coeff.q, pi_coeff.q
if 2 * p > q:
return -cls(Rational(q - p, q)*S.Pi)
return cls(Rational(p, q)*S.Pi)
if arg.is_Add:
x, m = _peeloff_pi(arg)
if m:
if (m*2/S.Pi) % 2 == 0:
return cot(x)
else:
return -tan(x)
if arg.func is acot:
return arg.args[0]
if arg.func is atan:
x = arg.args[0]
return 1 / x
if arg.func is asin:
x = arg.args[0]
return sqrt(1 - x**2) / x
if arg.func is acos:
x = arg.args[0]
return x / sqrt(1 - x**2)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return 1 / sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = C.bernoulli(n+1)
F = C.factorial(n+1)
return (-1)**((n+1)//2) * 2**(n+1) * B/F * x**n
def _eval_nseries(self, x, n, logx):
i = self.args[0].limit(x, 0)/S.Pi
if i and i.is_Integer:
return self.rewrite(cos)._eval_nseries(x, n=n, logx=logx)
return Function._eval_nseries(self, x, n=n, logx=logx)
def _eval_conjugate(self):
assert len(self.args) == 1
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sin(re)**2 + C.sinh(im)**2
return (sin(re)*cos(re)/denom, -C.sinh(im)*C.cosh(im)/denom)
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_rewrite_as_exp(self, arg):
exp, I = C.exp, S.ImaginaryUnit
neg_exp, pos_exp = exp(-arg*I), exp(arg*I)
return I*(pos_exp+neg_exp)/(pos_exp-neg_exp)
def _eval_rewrite_as_sin(self, x):
return 2*sin(2*x)/sin(x)**2
def _eval_rewrite_as_cos(self, x):
return -cos(x)/cos(x + S.Pi/2)
def _eval_rewrite_as_tan(self, arg):
return 1/tan(arg)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return 1/arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_subs(self, old, new):
if self == old:
return new
arg = self.args[0]
argnew = arg.subs(old, new)
if arg != argnew and (argnew/S.Pi).is_integer:
return S.NaN
return cot(argnew)
def _sage_(self):
import sage.all as sage
return sage.cot(self.args[0]._sage_())
###############################################################################
########################### TRIGONOMETRIC INVERSES ############################
###############################################################################
class asin(Function):
"""
Usage
=====
asin(x) -> Returns the arc sine of x (measured in radians)
Notes
====
asin(x) will evaluate automatically in the cases
oo, -oo, 0, 1, -1
Examples
========
>>> from sympy import asin, oo, pi
>>> asin(1)
pi/2
>>> asin(-1)
-pi/2
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.NegativeInfinity * S.ImaginaryUnit
elif arg is S.NegativeInfinity:
return S.Infinity * S.ImaginaryUnit
elif arg is S.Zero:
return S.Zero
elif arg is S.One:
return S.Pi / 2
elif arg is S.NegativeOne:
return -S.Pi / 2
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_number:
cst_table = {
sqrt(3)/2 : 3,
-sqrt(3)/2 : -3,
sqrt(2)/2 : 4,
-sqrt(2)/2 : -4,
1/sqrt(2) : 4,
-1/sqrt(2) : -4,
sqrt((5-sqrt(5))/8) : 5,
-sqrt((5-sqrt(5))/8) : -5,
S.Half : 6,
-S.Half : -6,
sqrt(2-sqrt(2))/2 : 8,
-sqrt(2-sqrt(2))/2 : -8,
(sqrt(5)-1)/4 : 10,
(1-sqrt(5))/4 : -10,
(sqrt(3)-1)/sqrt(2**3) : 12,
(1-sqrt(3))/sqrt(2**3) : -12,
(sqrt(5)+1)/4 : S(10)/3,
-(sqrt(5)+1)/4 : -S(10)/3
}
if arg in cst_table:
return S.Pi / cst_table[arg]
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * C.asinh(i_coeff)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n-2)**2/(n*(n-1)) * x**2
else:
k = (n - 1) // 2
R = C.RisingFactorial(S.Half, k)
F = C.factorial(k)
return R / F * x**n / n
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_acos(self, x):
return S.Pi/2 - acos(x)
def _eval_rewrite_as_atan(self, x):
return 2*atan(x/(1 + sqrt(1 - x**2)))
def _eval_rewrite_as_log(self, x):
return -S.ImaginaryUnit*C.log(S.ImaginaryUnit*x + sqrt(1-x**2))
def _eval_is_real(self):
return self.args[0].is_real and (self.args[0]>=-1 and self.args[0]<=1)
def _sage_(self):
import sage.all as sage
return sage.asin(self.args[0]._sage_())
class acos(Function):
"""
Usage
=====
acos(x) -> Returns the arc cosine of x (measured in radians)
Notes
=====
acos(x) will evaluate automatically in the cases
oo, -oo, 0, 1, -1
Examples
========
>>> from sympy import acos, oo, pi
>>> acos(1)
0
>>> acos(0)
pi/2
>>> acos(oo)
oo*I
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return -1/sqrt(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity * S.ImaginaryUnit
elif arg is S.NegativeInfinity:
return S.NegativeInfinity * S.ImaginaryUnit
elif arg is S.Zero:
return S.Pi / 2
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi
if arg.is_number:
cst_table = {
S.Half : S.Pi/3,
-S.Half : 2*S.Pi/3,
sqrt(2)/2 : S.Pi/4,
-sqrt(2)/2 : 3*S.Pi/4,
1/sqrt(2) : S.Pi/4,
-1/sqrt(2) : 3*S.Pi/4,
sqrt(3)/2 : S.Pi/6,
-sqrt(3)/2 : 5*S.Pi/6,
}
if arg in cst_table:
return cst_table[arg]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n-2)**2/(n*(n-1)) * x**2
else:
k = (n - 1) // 2
R = C.RisingFactorial(S.Half, k)
F = C.factorial(k)
return -R / F * x**n / n
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real and (self.args[0]>=-1 and self.args[0]<=1)
def _eval_rewrite_as_log(self, x):
return S.Pi/2 + S.ImaginaryUnit * C.log(S.ImaginaryUnit * x + sqrt(1 - x**2))
def _eval_rewrite_as_asin(self, x):
return S.Pi/2 - asin(x)
def _eval_rewrite_as_atan(self, x):
if x > -1 and x <= 1:
return 2 * atan(sqrt(1 - x**2)/(1 + x))
else:
raise ValueError("The argument must be bounded in the interval (-1,1]")
def _sage_(self):
import sage.all as sage
return sage.acos(self.args[0]._sage_())
class atan(Function):
"""
Usage
=====
atan(x) -> Returns the arc tangent of x (measured in radians)
Notes
=====
atan(x) will evaluate automatically in the cases
oo, -oo, 0, 1, -1
Examples
========
>>> from sympy import atan, oo, pi
>>> atan(0)
0
>>> atan(1)
pi/4
>>> atan(oo)
pi/2
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1+self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Pi / 2
elif arg is S.NegativeInfinity:
return -S.Pi / 2
elif arg is S.Zero:
return S.Zero
elif arg is S.One:
return S.Pi / 4
elif arg is S.NegativeOne:
return -S.Pi / 4
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_number:
cst_table = {
sqrt(3)/3 : 6,
-sqrt(3)/3 : -6,
1/sqrt(3) : 6,
-1/sqrt(3) : -6,
sqrt(3) : 3,
-sqrt(3) : -3,
(1+sqrt(2)) : S(8)/3,
-(1+sqrt(2)) : S(8)/3,
(sqrt(2)-1) : 8,
(1-sqrt(2)) : -8,
sqrt((5+2*sqrt(5))) : S(5)/2,
-sqrt((5+2*sqrt(5))) : -S(5)/2,
(2-sqrt(3)) : 12,
-(2-sqrt(3)) : -12
}
if arg in cst_table:
return S.Pi / cst_table[arg]
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * C.atanh(i_coeff)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return (-1)**((n-1)//2) * x**n / n
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_log(self, x):
return S.ImaginaryUnit/2 * \
(C.log((S(1) - S.ImaginaryUnit * x)/(S(1) + S.ImaginaryUnit * x)))
def _eval_aseries(self, n, args0, x, logx):
if args0[0] == S.Infinity:
return S.Pi/2 - atan(1/self.args[0])
elif args0[0] == S.NegativeInfinity:
return -S.Pi/2 - atan(1/self.args[0])
else:
return super(atan, self)._eval_aseries(n, args0, x, logx)
def _sage_(self):
import sage.all as sage
return sage.atan(self.args[0]._sage_())
class acot(Function):
"""
Usage
=====
acot(x) -> Returns the arc cotangent of x (measured in radians)
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return -1 / (1+self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.Zero:
return S.Pi/ 2
elif arg is S.One:
return S.Pi / 4
elif arg is S.NegativeOne:
return -S.Pi / 4
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_number:
cst_table = {
sqrt(3)/3 : 3,
-sqrt(3)/3 : -3,
1/sqrt(3) : 3,
-1/sqrt(3) : -3,
sqrt(3) : 6,
-sqrt(3) : -6,
(1+sqrt(2)) : 8,
-(1+sqrt(2)) : -8,
(1-sqrt(2)) : -S(8)/3,
(sqrt(2)-1) : S(8)/3,
sqrt(5+2*sqrt(5)) : 10,
-sqrt(5+2*sqrt(5)) : -10,
(2+sqrt(3)) : 12,
-(2+sqrt(3)) : -12,
(2-sqrt(3)) : S(12)/5,
-(2-sqrt(3)) : -S(12)/5,
}
if arg in cst_table:
return S.Pi / cst_table[arg]
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return -S.ImaginaryUnit * C.acoth(i_coeff)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi / 2 # FIX THIS
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return (-1)**((n+1)//2) * x**n / n
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
if args0[0] == S.Infinity:
return S.Pi/2 - acot(1/self.args[0])
elif args0[0] == S.NegativeInfinity:
return 3*S.Pi/2 - acot(1/self.args[0])
else:
return super(atan, self)._eval_aseries(n, args0, x, logx)
def _sage_(self):
import sage.all as sage
return sage.acot(self.args[0]._sage_())
def _eval_rewrite_as_log(self, x):
return S.ImaginaryUnit/2 * \
(C.log((x - S.ImaginaryUnit)/(x + S.ImaginaryUnit)))
class atan2(Function):
"""
atan2(y,x) -> Returns the atan(y/x) taking two arguments y and x.
Signs of both y and x are considered to determine the appropriate
quadrant of atan(y/x). The range is (-pi, pi].
"""
nargs = 2
@classmethod
def eval(cls, y, x):
sign_y = C.sign(y)
if y.is_zero:
if x.is_positive:
return S.Zero
elif x.is_zero:
return S.NaN
elif x.is_negative:
return S.Pi
elif x.is_zero:
if sign_y.is_Number:
return sign_y * S.Pi/2
else:
abs_yx = C.Abs(y/x)
if sign_y.is_Number and abs_yx.is_number:
phi = C.atan(abs_yx)
if x.is_positive:
return sign_y * phi
else:
return sign_y * (S.Pi - phi)
def _eval_is_real(self):
return self.args[0].is_real and self.args[1].is_real
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return y/(x**2 + y**2)
elif argindex == 2:
return -x/(x**2 + y**2)
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.atan2(self.args[0]._sage_(), self.args[1]._sage_())
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
=========================
Test for qplotutils.bench
=========================
Autogenerated package stub.
"""
import unittest
import logging
import sys
import os
import numpy as np
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtOpenGL import *
from qtpy.QtWidgets import *
from qplotutils.bench import *
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "philipp.baust@gmail.com"
__status__ = "Development"
_log = logging.getLogger(__name__)
class AbstractContainerTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
# def test_instantiate(self):
# """ Autogenerated. """
# obj = AbstractContainer() # TODO: may fail!
#
class BenchTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
BenchTests.app = QApplication([])
def setUp(self):
config = CONFIG
config.debug_layout = True
self.bench = Bench()
self.bench.setWindowTitle("Bench Example 01")
self.bench.resize(300, 400)
self.bench.show()
def test_add_docks(self):
self.assertAlmostEquals(len(self.bench.docks), 0)
# First dock
dock_01 = Dock()
self.bench.addDock(dock_01)
# Second dock
dock_02 = Dock(title="Dock 2")
self.bench.addDock(dock_02)
self.bench.repaint() # try to cover paintEvent
self.bench.update()
self.assertAlmostEquals(len(self.bench.docks), 2)
def test_close_dock(self):
self.assertAlmostEquals(len(self.bench.docks), 0)
# First dock
dock_01 = Dock()
self.bench.addDock(dock_01)
# Second dock
dock_02 = Dock(title="Dock 2")
self.bench.addDock(dock_02)
self.assertAlmostEquals(len(self.bench.docks), 2)
dock_02.close()
self.assertAlmostEquals(len(self.bench.docks), 1)
def test_move_dock(self):
# First dock
dock_01 = Dock()
self.bench.addDock(dock_01)
# Second dock
dock_02 = Dock(title="Dock 2")
self.bench.addDock(dock_02, placement=Placement.BOTTOM, ref=dock_01)
self.bench.dockMove(dock_01.uid, placement=Placement.TAB, ref_uid=dock_02.uid)
d = dock_02.parentContainer.docks
self.assertEqual(len(d), 2)
def test_close_dock(self):
# First dock
dock_01 = Dock()
self.bench.addDock(dock_01)
# Second dock
dock_02 = Dock(title="Dock 2")
self.bench.addDock(dock_02, placement=Placement.BOTTOM, ref=dock_01)
dock_01.close()
self.assertEqual(len(self.bench.docks), 1)
class BenchExceptionTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = BenchException() # TODO: may fail!
class BenchItemTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
cls.app = QApplication([])
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = BenchItem() # TODO: may fail!
class DockTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = Dock() # TODO: may fail!
class DropOverlayTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
# def test_instantiate(self):
# """ Autogenerated. """
# obj = DropOverlay() # TODO: may fail!
class PlacementTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = Placement() # TODO: may fail!
class SplitterContainerTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
#
# def test_instantiate(self):
# """ Autogenerated. """
# obj = SplitterContainer() # TODO: may fail!
class TabTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
cls.app = QApplication([])
def setUp(self):
""" Autogenerated. """
pass
# def test_instantiate(self):
# """ Autogenerated. """
# obj = Tab() # TODO: may fail!
class TabContainerTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
# def test_instantiate(self):
# """ Autogenerated. """
# obj = TabContainer() # TODO: may fail!
class TabHeaderTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = TabHeader() # TODO: may fail!
|
|
# -*- coding: utf-8 -*-
'''
A module to manage software on Windows
:depends: - win32com
- win32con
- win32api
- pywintypes
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import locale
import logging
import time
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
# Import third party libs
import salt.ext.six as six
# pylint: disable=import-error
try:
import win32api
import win32con
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
try:
import msgpack
except ImportError:
import msgpack_pure as msgpack
# pylint: enable=import-error
import shlex
# Import salt libs
from salt.exceptions import CommandExecutionError, SaltRenderError
import salt.utils
import salt.syspaths
from salt.exceptions import MinionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Set the virtual pkg module if the os is Windows
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return False
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
if len(names) == 0:
return ''
# Initialize the return dict with empty strings
ret = {}
for name in names:
ret[name] = ''
# Refresh before looking for the latest version available
if salt.utils.is_true(kwargs.get('refresh', True)):
refresh_db()
installed_pkgs = list_pkgs(versions_as_list=True)
log.trace('List of installed packages: {0}'.format(installed_pkgs))
# iterate over all requested package names
for name in names:
latest_installed = '0'
latest_available = '0'
# get latest installed version of package
if name in installed_pkgs:
log.trace('Sorting out the latest available version of {0}'.format(name))
latest_installed = sorted(installed_pkgs[name], cmp=_reverse_cmp_pkg_versions).pop()
log.debug('Latest installed version of package {0} is {1}'.format(name, latest_installed))
# get latest available (from winrepo_dir) version of package
pkg_info = _get_package_info(name)
log.trace('Raw winrepo pkg_info for {0} is {1}'.format(name, pkg_info))
latest_available = _get_latest_pkg_version(pkg_info)
if latest_available:
log.debug('Latest available version of package {0} is {1}'.format(name, latest_available))
# check, whether latest available version is newer than latest installed version
if salt.utils.compare_versions(ver1=str(latest_available),
oper='>',
ver2=str(latest_installed)):
log.debug('Upgrade of {0} from {1} to {2} is available'.format(name, latest_installed, latest_available))
ret[name] = latest_available
else:
log.debug('No newer version than {0} of {1} is available'.format(latest_installed, name))
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.alias_function(latest_version, 'available_version')
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
def list_upgrades(refresh=True):
'''
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if salt.utils.is_true(refresh):
refresh_db()
ret = {}
for name, data in six.iteritems(get_repo_data().get('repo', {})):
if version(name):
latest = latest_version(name)
if latest:
ret[name] = latest
return ret
def list_available(*names):
'''
Return a list of available versions of the specified package.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_available <package name>
salt '*' pkg.list_available <package name01> <package name02>
'''
if not names:
return ''
if len(names) == 1:
pkginfo = _get_package_info(names[0])
if not pkginfo:
return ''
versions = list(pkginfo.keys())
else:
versions = {}
for name in names:
pkginfo = _get_package_info(name)
if not pkginfo:
continue
versions[name] = list(pkginfo.keys()) if pkginfo else []
versions = sorted(versions, cmp=_reverse_cmp_pkg_versions)
return versions
def version(*names, **kwargs):
'''
Returns a version if the package is installed, else returns an empty string
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
'''
ret = {}
if len(names) == 1:
val = __salt__['pkg_resource.version'](*names, **kwargs)
if len(val):
return val
return ''
if len(names) > 1:
reverse_dict = {}
nums = __salt__['pkg_resource.version'](*names, **kwargs)
if len(nums):
for num, val in six.iteritems(nums):
if len(val) > 0:
try:
ret[reverse_dict[num]] = val
except KeyError:
ret[num] = val
return ret
return dict([(x, '') for x in names])
return ret
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
ret = {}
name_map = _get_name_map()
for pkg_name, val in six.iteritems(_get_reg_software()):
if pkg_name in name_map:
key = name_map[pkg_name]
if val in ['Not Found', None, False]:
# Look up version from winrepo
pkg_info = _get_package_info(key)
if not pkg_info:
continue
for pkg_ver in pkg_info.keys():
if pkg_info[pkg_ver]['full_name'] == pkg_name:
val = pkg_ver
else:
key = pkg_name
__salt__['pkg_resource.add_pkg'](ret, key, val)
__salt__['pkg_resource.sort_pkglist'](ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def _search_software(target):
'''
This searches the msi product databases for name matches
of the list of target products, it will return a dict with
values added to the list passed in
'''
search_results = {}
software = dict(_get_reg_software().items())
for key, value in six.iteritems(software):
if key is not None:
if target.lower() in key.lower():
search_results[key] = value
return search_results
def _get_reg_software():
'''
This searches the uninstall keys in the registry to find
a match in the sub keys, it will return a dict with the
display name as the key and the version as the value
'''
reg_software = {}
# This is a list of default OS reg entries that don't seem to be installed
# software and no version information exists on any of these items
ignore_list = ['AddressBook',
'Connection Manager',
'DirectDrawEx',
'Fontcore',
'IE40',
'IE4Data',
'IE5BAKEX',
'IEData',
'MobileOptionPack',
'SchedulingAgent',
'WIC'
]
encoding = locale.getpreferredencoding()
#attempt to corral the wild west of the multiple ways to install
#software in windows
reg_entries = dict(_get_machine_keys().items())
for reg_hive, reg_keys in six.iteritems(reg_entries):
for reg_key in reg_keys:
try:
reg_handle = win32api.RegOpenKeyEx(
reg_hive,
reg_key,
0,
win32con.KEY_READ)
except Exception:
pass
# Uninstall key may not exist for all users
for name, num, blank, time in win32api.RegEnumKeyEx(reg_handle):
prd_uninst_key = "\\".join([reg_key, name])
# These reg values aren't guaranteed to exist
windows_installer = _get_reg_value(
reg_hive,
prd_uninst_key,
'WindowsInstaller')
prd_name = _get_reg_value(
reg_hive,
prd_uninst_key,
"DisplayName")
try:
prd_name = prd_name.decode(encoding)
except Exception:
pass
prd_ver = _get_reg_value(
reg_hive,
prd_uninst_key,
"DisplayVersion")
if name not in ignore_list:
if prd_name != 'Not Found':
# some MS Office updates don't register a product name which means
# their information is useless
if prd_name != '':
reg_software[prd_name] = prd_ver
return reg_software
def _get_machine_keys():
'''
This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_LOCAL_MACHINE hive
'''
machine_hive_and_keys = {}
machine_keys = [
"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall",
"Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall"
]
machine_hive = win32con.HKEY_LOCAL_MACHINE
machine_hive_and_keys[machine_hive] = machine_keys
return machine_hive_and_keys
def _get_reg_value(reg_hive, reg_key, value_name=''):
'''
Read one value from Windows registry.
If 'name' is empty map, reads default value.
'''
try:
key_handle = win32api.RegOpenKeyEx(
reg_hive, reg_key, 0, win32con.KEY_ALL_ACCESS)
value_data, value_type = win32api.RegQueryValueEx(key_handle,
value_name)
win32api.RegCloseKey(key_handle)
except Exception:
value_data = 'Not Found'
return value_data
def refresh_db(saltenv='base'):
'''
Just recheck the repository and return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
__context__.pop('winrepo.data', None)
if 'win_repo_source_dir' in __opts__:
salt.utils.warn_until(
'Nitrogen',
'The \'win_repo_source_dir\' config option is deprecated, please '
'use \'winrepo_source_dir\' instead.'
)
winrepo_source_dir = __opts__['win_repo_source_dir']
else:
winrepo_source_dir = __opts__['winrepo_source_dir']
# Clear minion repo-ng cache
repo_path = '{0}\\files\\{1}\\win\\repo-ng\\salt-winrepo-ng'\
.format(__opts__['cachedir'], saltenv)
if not __salt__['file.remove'](repo_path):
log.error('pkg.refresh_db: failed to clear existing cache')
# Cache repo-ng locally
cached_files = __salt__['cp.cache_dir'](
winrepo_source_dir,
saltenv,
include_pat='*.sls'
)
genrepo(saltenv=saltenv)
return cached_files
def _get_local_repo_dir(saltenv='base'):
if 'win_repo_source_dir' in __opts__:
salt.utils.warn_until(
'Nitrogen',
'The \'win_repo_source_dir\' config option is deprecated, please '
'use \'winrepo_source_dir\' instead.'
)
winrepo_source_dir = __opts__['win_repo_source_dir']
else:
winrepo_source_dir = __opts__['winrepo_source_dir']
dirs = []
dirs.append(salt.syspaths.CACHE_DIR)
dirs.extend(['minion', 'files'])
dirs.append(saltenv)
dirs.extend(winrepo_source_dir[7:].strip('/').split('/'))
return os.sep.join(dirs)
def genrepo(saltenv='base'):
'''
Generate winrepo_cachefile based on sls files in the winrepo
CLI Example:
.. code-block:: bash
salt-run winrepo.genrepo
'''
ret = {}
repo = _get_local_repo_dir(saltenv)
if not os.path.exists(repo):
os.makedirs(repo)
winrepo = 'winrepo.p'
renderers = salt.loader.render(__opts__, __salt__)
for root, _, files in os.walk(repo):
for name in files:
if name.endswith('.sls'):
try:
config = salt.template.compile_template(
os.path.join(root, name),
renderers,
__opts__['renderer'])
except SaltRenderError as exc:
log.debug('Failed to compile {0}.'.format(os.path.join(root, name)))
log.debug('Error: {0}.'.format(exc))
continue
if config:
revmap = {}
for pkgname, versions in six.iteritems(config):
for version, repodata in six.iteritems(versions):
if not isinstance(version, six.string_types):
config[pkgname][str(version)] = \
config[pkgname].pop(version)
if not isinstance(repodata, dict):
log.debug('Failed to compile'
'{0}.'.format(os.path.join(root, name)))
continue
revmap[repodata['full_name']] = pkgname
ret.setdefault('repo', {}).update(config)
ret.setdefault('name_map', {}).update(revmap)
with salt.utils.fopen(os.path.join(repo, winrepo), 'w+b') as repo_cache:
repo_cache.write(msgpack.dumps(ret))
return ret
def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
'''
Install the passed package(s) on the system using winrepo
:param name:
The name of a single package, or a comma-separated list of packages to
install. (no spaces after the commas)
:type name: str, list, or None
:param str version:
The specific version to install. If omitted, the latest version will be
installed. If passed with multiple install, the version will apply to
all packages. Recommended for single installation only.
:param bool refresh: Boolean value representing whether or not to refresh
the winrepo db
:param pkgs: A list of packages to install from a software repository.
All packages listed under ``pkgs`` will be installed via a single
command.
:type pkgs: list or None
:param str saltenv: The salt environment to use. Default is ``base``.
:param dict kwargs: Any additional argument that may be passed from the
state module. If they don't apply, they are ignored.
:return: Return a dict containing the new package names and versions::
:rtype: dict
If the package is installed by ``pkg.install``:
.. code-block:: cfg
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already installed:
.. code-block:: cfg
{'<package>': {'current': '<current-version>'}}
The following example will refresh the winrepo and install a single package,
7zip.
CLI Example:
.. code-block:: bash
salt '*' pkg.install 7zip refresh=True
CLI Example:
.. code-block:: bash
salt '*' pkg.install 7zip
salt '*' pkg.install 7zip,filezilla
salt '*' pkg.install pkgs='["7zip","filezilla"]'
'''
ret = {}
if refresh:
refresh_db()
# Make sure name or pkgs is passed
if not name and not pkgs:
return 'Must pass a single package or a list of packages'
# Ignore pkg_type from parse_targets, Windows does not support the
# "sources" argument
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
if pkg_params is None or len(pkg_params) == 0:
log.error('No package definition found')
return {}
if not pkgs and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
pkg_params = {name: {'version': kwargs.get('version'),
'extra_install_flags': kwargs.get('extra_install_flags')}}
# Get a list of currently installed software for comparison at the end
old = list_pkgs()
# Loop through each package
changed = []
latest = []
for pkg_name, options in six.iteritems(pkg_params):
# Load package information for the package
pkginfo = _get_package_info(pkg_name)
# Make sure pkginfo was found
if not pkginfo:
log.error('Unable to locate package {0}'.format(pkg_name))
ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name)
continue
# Get the version number passed or the latest available
version_num = ''
if options:
version_num = options.get('version', False)
if not version_num:
version_num = _get_latest_pkg_version(pkginfo)
# Check if the version is already installed
if version_num == old.get(pkg_name) \
or (pkg_name in old and old[pkg_name] == 'Not Found'):
# Desired version number already installed
ret[pkg_name] = {'current': version_num}
continue
# If version number not installed, is the version available?
elif version_num not in pkginfo:
log.error('Version {0} not found for package '
'{1}'.format(version_num, pkg_name))
ret[pkg_name] = {'not found': version_num}
continue
if 'latest' in pkginfo:
latest.append(pkg_name)
# Get the installer
installer = pkginfo[version_num].get('installer')
# Is there an installer configured?
if not installer:
log.error('No installer configured for version {0} of package '
'{1}'.format(version_num, pkg_name))
ret[pkg_name] = {'no installer': version_num}
continue
# Is the installer in a location that requires caching
if installer.startswith(('salt:', 'http:', 'https:', 'ftp:')):
# Check for the 'cache_dir' parameter in the .sls file
# If true, the entire directory will be cached instead of the
# individual file. This is useful for installations that are not
# single files
cache_dir = pkginfo[version_num].get('cache_dir')
if cache_dir and installer.startswith('salt:'):
path, _ = os.path.split(installer)
__salt__['cp.cache_dir'](path, saltenv, False, None, 'E@init.sls$')
# Check to see if the installer is cached
cached_pkg = __salt__['cp.is_cached'](installer, saltenv)
if not cached_pkg:
# It's not cached. Cache it, mate.
cached_pkg = __salt__['cp.cache_file'](installer, saltenv)
# Check if the installer was cached successfully
if not cached_pkg:
log.error('Unable to cache file {0} from saltenv: {1}'.format(installer, saltenv))
ret[pkg_name] = {'unable to cache': installer}
continue
# Compare the hash of the cached installer to the source only if the
# file is hosted on salt:
if installer.startswith('salt:'):
if __salt__['cp.hash_file'](installer, saltenv) != \
__salt__['cp.hash_file'](cached_pkg):
try:
cached_pkg = __salt__['cp.cache_file'](installer, saltenv)
except MinionError as exc:
return '{0}: {1}'.format(exc, installer)
# Check if the installer was cached successfully
if not cached_pkg:
log.error('Unable to cache {0}'.format(installer))
ret[pkg_name] = {'unable to cache': installer}
continue
else:
# Run the installer directly (not hosted on salt:, https:, etc.)
cached_pkg = installer
# Fix non-windows slashes
cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg)
# Get install flags
install_flags = '{0}'.format(pkginfo[version_num].get('install_flags'))
if options and options.get('extra_install_flags'):
install_flags = '{0} {1}'.format(install_flags,
options.get('extra_install_flags', ''))
# Install the software
# Check Use Scheduler Option
if pkginfo[version_num].get('use_scheduler', False):
# Build Scheduled Task Parameters
if pkginfo[version_num].get('msiexec'):
cmd = 'msiexec.exe'
arguments = ['/i', cached_pkg]
if pkginfo['version_num'].get('allusers', True):
arguments.append('ALLUSERS="1"')
arguments.extend(shlex.split(install_flags))
else:
cmd = cached_pkg
arguments = shlex.split(install_flags)
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=' '.join(arguments),
start_in=cache_path,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
# Run Scheduled Task
__salt__['task.run_wait'](name='update-salt-software')
else:
# Build the install command
cmd = []
if pkginfo[version_num].get('msiexec'):
cmd.extend(['msiexec', '/i', cached_pkg])
if pkginfo[version_num].get('allusers', True):
cmd.append('ALLUSERS="1"')
else:
cmd.append(cached_pkg)
cmd.extend(shlex.split(install_flags))
# Launch the command
result = __salt__['cmd.run_stdout'](cmd,
cache_path,
output_loglevel='trace',
python_shell=False)
if result:
log.error('Failed to install {0}'.format(pkg_name))
log.error('error message: {0}'.format(result))
ret[pkg_name] = {'failed': result}
else:
changed.append(pkg_name)
# Get a new list of installed software
new = list_pkgs()
# For installers that have no specific version (ie: chrome)
# The software definition file will have a version of 'latest'
# In that case there's no way to know which version has been installed
# Just return the current installed version
# This has to be done before the loop below, otherwise the installation
# will not be detected
if latest:
for pkg_name in latest:
if old.get(pkg_name, 'old') == new.get(pkg_name, 'new'):
ret[pkg_name] = {'current': new[pkg_name]}
# Sometimes the installer takes awhile to update the registry
# This checks 10 times, 3 seconds between each for a registry change
tries = 0
difference = salt.utils.compare_dicts(old, new)
while not all(name in difference for name in changed) and tries < 10:
__salt__['reg.broadcast_change']()
time.sleep(3)
new = list_pkgs()
difference = salt.utils.compare_dicts(old, new)
tries += 1
log.debug("Try {0}".format(tries))
if tries == 10:
if not latest:
ret['_comment'] = 'Software not found in the registry.\n' \
'Could be a problem with the Software\n' \
'definition file. Verify the full_name\n' \
'and the version match the registry ' \
'exactly.\n' \
'Failed after {0} tries.'.format(tries)
# Compare the software list before and after
# Add the difference to ret
ret.update(difference)
return ret
def upgrade(refresh=True):
'''
Run a full system upgrade
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
log.warning('pkg.upgrade not implemented on Windows yet')
# Uncomment the below once pkg.upgrade has been implemented
# if salt.utils.is_true(refresh):
# refresh_db()
return {}
def remove(name=None, pkgs=None, version=None, **kwargs):
'''
Remove the passed package(s) from the system using winrepo
:param name:
The name of the package to be uninstalled.
:type name: str, list, or None
:param str version:
The version of the package to be uninstalled. If this option is used to
to uninstall multiple packages, then this version will be applied to all
targeted packages. Recommended using only when uninstalling a single
package. If this parameter is omitted, the latest version will be
uninstalled.
Multiple Package Options:
:param pkgs:
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
:type pkgs: list or None
.. versionadded:: 0.16.0
:return: Returns a dict containing the changes.
:rtype: dict
If the package is removed by ``pkg.remove``:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If the package is already uninstalled:
{'<package>': {'current': 'not installed'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
ret = {}
# Make sure name or pkgs is passed
if not name and not pkgs:
return 'Must pass a single package or a list of packages'
# Get package parameters
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
# Get a list of currently installed software for comparison at the end
old = list_pkgs()
# Loop through each package
changed = []
for target in pkg_params:
# Load package information for the package
pkginfo = _get_package_info(target)
# Make sure pkginfo was found
if not pkginfo:
log.error('Unable to locate package {0}'.format(name))
ret[target] = 'Unable to locate package {0}'.format(target)
continue
# Get latest version if no version passed, else use passed version
if not version:
version_num = _get_latest_pkg_version(pkginfo)
else:
version_num = version
if 'latest' in pkginfo and version_num not in pkginfo:
version_num = 'latest'
# Check to see if package is installed on the system
if target not in old:
log.error('{0} {1} not installed'.format(target, version))
ret[target] = {'current': 'not installed'}
continue
else:
if not version_num == old.get(target) \
and not old.get(target) == "Not Found" \
and version_num != 'latest':
log.error('{0} {1} not installed'.format(target, version))
ret[target] = {'current': '{0} not installed'.format(version_num)}
continue
# Get the uninstaller
uninstaller = pkginfo[version_num].get('uninstaller')
# If no uninstaller found, use the installer
if not uninstaller:
uninstaller = pkginfo[version_num].get('installer')
# If still no uninstaller found, fail
if not uninstaller:
log.error('Error: No installer or uninstaller configured for package {0}'.format(name))
ret[target] = {'no uninstaller': version_num}
continue
# Where is the uninstaller
if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')):
# Check to see if the uninstaller is cached
cached_pkg = __salt__['cp.is_cached'](uninstaller)
if not cached_pkg:
# It's not cached. Cache it, mate.
cached_pkg = __salt__['cp.cache_file'](uninstaller)
# Check if the uninstaller was cached successfully
if not cached_pkg:
log.error('Unable to cache {0}'.format(uninstaller))
ret[target] = {'unable to cache': uninstaller}
continue
else:
# Run the uninstaller directly (not hosted on salt:, https:, etc.)
cached_pkg = uninstaller
# Fix non-windows slashes
cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg)
# Get parameters for cmd
expanded_cached_pkg = str(os.path.expandvars(cached_pkg))
# Get uninstall flags
uninstall_flags = '{0}'.format(pkginfo[version_num].get('uninstall_flags', ''))
if kwargs.get('extra_uninstall_flags'):
uninstall_flags = '{0} {1}'.format(uninstall_flags,
kwargs.get('extra_uninstall_flags', ""))
# Uninstall the software
# Check Use Scheduler Option
if pkginfo[version_num].get('use_scheduler', False):
# Build Scheduled Task Parameters
if pkginfo[version_num].get('msiexec'):
cmd = 'msiexec.exe'
arguments = ['/x']
arguments.extend(shlex.split(uninstall_flags))
else:
cmd = expanded_cached_pkg
arguments = shlex.split(uninstall_flags)
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=' '.join(arguments),
start_in=cache_path,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
# Run Scheduled Task
__salt__['task.run_wait'](name='update-salt-software')
else:
# Build the install command
cmd = []
if pkginfo[version_num].get('msiexec'):
cmd.extend(['msiexec', '/x', expanded_cached_pkg])
else:
cmd.append(expanded_cached_pkg)
cmd.extend(shlex.split(uninstall_flags))
# Launch the command
result = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
if result:
log.error('Failed to install {0}'.format(target))
log.error('error message: {0}'.format(result))
ret[target] = {'failed': result}
else:
changed.append(target)
# Get a new list of installed software
new = list_pkgs()
tries = 0
difference = salt.utils.compare_dicts(old, new)
while not all(name in difference for name in changed) and tries <= 1000:
new = list_pkgs()
difference = salt.utils.compare_dicts(old, new)
tries += 1
if tries == 1000:
ret['_comment'] = 'Registry not updated.'
# Compare the software list before and after
# Add the difference to ret
ret.update(difference)
return ret
def purge(name=None, pkgs=None, version=None, **kwargs):
'''
Package purges are not supported, this function is identical to
``remove()``.
name
The name of the package to be deleted.
version
The version of the package to be deleted. If this option is used in
combination with the ``pkgs`` option below, then this version will be
applied to all targeted packages.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return remove(name=name, pkgs=pkgs, version=version, **kwargs)
def get_repo_data(saltenv='base'):
'''
Returns the cached winrepo data
CLI Example:
.. code-block:: bash
salt '*' pkg.get_repo_data
'''
#if 'winrepo.data' in __context__:
# return __context__['winrepo.data']
repocache_dir = _get_local_repo_dir(saltenv=saltenv)
winrepo = 'winrepo.p'
try:
with salt.utils.fopen(
os.path.join(repocache_dir, winrepo), 'rb') as repofile:
try:
repodata = msgpack.loads(repofile.read()) or {}
return repodata
except Exception as exc:
log.exception(exc)
return {}
except IOError as exc:
if exc.errno == errno.ENOENT:
# File doesn't exist
raise CommandExecutionError(
'Windows repo cache doesn\'t exist, pkg.refresh_db likely '
'needed'
)
log.error('Not able to read repo file')
log.exception(exc)
return {}
def get_name_map():
return _get_name_map()
def _get_name_map():
'''
Return a reverse map of full pkg names to the names recognized by winrepo.
'''
u_name_map = {}
name_map = get_repo_data().get('name_map', {})
for k in name_map.keys():
u_name_map[k.decode('utf-8')] = name_map[k]
return u_name_map
def _get_package_info(name):
'''
Return package info.
Returns empty map if package not available
TODO: Add option for version
'''
return get_repo_data().get('repo', {}).get(name, {})
def _reverse_cmp_pkg_versions(pkg1, pkg2):
'''
Compare software package versions
'''
if LooseVersion(pkg1) > LooseVersion(pkg2):
return 1
else:
return -1
def _get_latest_pkg_version(pkginfo):
if len(pkginfo) == 1:
return next(six.iterkeys(pkginfo))
return sorted(pkginfo, cmp=_reverse_cmp_pkg_versions).pop()
def compare_versions(ver1='', oper='==', ver2=''):
'''
Compare software package versions
'''
return salt.utils.compare_versions(ver1, oper, ver2)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in pararllel across mulitple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specifed by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.image_summary('images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, height, width,
resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.image_summary('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.image_summary('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.op_scope([image, height, width], scope, 'eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=True,
capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, bbox, _ = parse_example_proto(
example_serialized)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_index_batch, [batch_size])
|
|
import math
import interpolators
import scipy.interpolate
import numpy as np
import scipy as sp
from scipy import stats
import sys
import pandas as pd
class PowerCurve:
def __init__(self, powerCurveLevels, referenceDensity, rotorGeometry, powerCol, turbCol, wsCol = None,
countCol = None, fixedTurbulence = None, ratedPower = None,turbulenceRenormalisation=True,
name = 'Undefined', interpolationMode = 'Cubic', required = False):
self.actualPower = powerCol #strings defining column names
self.inputHubWindSpeed = wsCol
self.hubTurbulence = turbCol
self.dataCount = countCol
self.name = name
self.interpolationMode = interpolationMode
self.required = required
if (self.hubTurbulence is not None) and fixedTurbulence != None:
raise Exception("Cannot specify both turbulence levels and fixed turbulence")
self.availablePower = AvailablePower(rotorGeometry.area, referenceDensity)
self.powerCurveLevels = powerCurveLevels
self.referenceDensity = referenceDensity
self.rotorGeometry = rotorGeometry
has_pc = len(self.powerCurveLevels.index) != 0
self.firstWindSpeed = min(self.powerCurveLevels.index) if has_pc else None
self.cutInWindSpeed = self.calculateCutInWindSpeed(powerCurveLevels) if has_pc else None
self.cutOutWindSpeed = self.calculateCutOutWindSpeed(powerCurveLevels) if has_pc else None
if self.inputHubWindSpeed is None:
ws_data = None
else:
ws_data = powerCurveLevels[self.inputHubWindSpeed]
print "calculating power function"
self.powerFunction = self.createPowerFunction(powerCurveLevels[self.actualPower], ws_data) if has_pc else None
print "power function calculated"
self.ratedPower = self.getRatedPower(ratedPower, powerCurveLevels[self.actualPower]) if has_pc else None
if 'Data Count' in self.powerCurveLevels.columns:
self.hours = self.powerCurveLevels['Data Count'].sum()*1.0/6.0
else:
self.hours = 0.0
self.turbulenceFunction = self.createTurbulenceFunction(powerCurveLevels[self.hubTurbulence], ws_data) if has_pc else None
if (turbulenceRenormalisation and has_pc):
print "Calculating zero turbulence curve for {0} Power Curve".format(self.name)
try:
self.calcZeroTurbulencePowerCurve()
print "Calculation of zero turbulence curve for {0} Power Curve successful".format(self.name)
except Exception as error:
err_msg ="Calculation of zero turbulence curve for {0} Power Curve unsuccessful: {1}".format(self.name, error)
print self.required
if not self.required:
print err_msg
else:
raise Exception(err_msg)
def calcZeroTurbulencePowerCurve(self):
keys = sorted(self.powerCurveLevels[self.actualPower].keys())
integrationRange = IntegrationRange(0.0, 100.0, 0.1)
self.zeroTurbulencePowerCurve = ZeroTurbulencePowerCurve(keys, self.getArray(self.powerCurveLevels[self.actualPower], keys), self.getArray(self.powerCurveLevels[self.hubTurbulence], keys), integrationRange, self.availablePower)
self.simulatedPower = SimulatedPower(self.zeroTurbulencePowerCurve, integrationRange)
def getRatedPower(self, ratedPower, powerCurveLevels):
if ratedPower == None:
return powerCurveLevels.max()
else:
return ratedPower
def getThresholdWindSpeed(self):
return float(interpolators.LinearPowerCurveInterpolator(self.powerCurveLevels[self.actualPower].as_matrix(), list(self.powerCurveLevels[self.actualPower].index), self.ratedPower)(0.85*self.ratedPower) * 1.5)
def getTurbulenceLevels(self, powerCurveLevels, turbulenceLevels, fixedTurbulence):
if fixedTurbulence != None:
turbulenceLevels = pd.Series(index = powerCurveLevels.index)
for level in powerCurveLevels.index:
turbulenceLevels[level] = fixedTurbulence
else:
turbulenceLevels = turbulenceLevels
return turbulenceLevels
def getArray(self, dictionary, keys):
array = []
for key in keys:
array.append(dictionary[key])
return array
def createTurbulenceFunction(self, y_data, x_data):
if x_data is None:
x_data = pd.Series(y_data.index, index = y_data.index)
x, y = [], []
for i in y_data.index:
if i in x_data.index:
x.append(x_data[i])
else:
x.append(i)
y.append(y_data[i])
return interpolators.LinearTurbulenceInterpolator(x, y)
def createPowerFunction(self, y_data, x_data):
if x_data is None:
x_data = pd.Series(y_data.index, index = y_data.index)
x, y = [], []
for i in y_data.index:
if i in x_data.index and not np.isnan(x_data[i]):
x.append(x_data[i])
else:
x.append(i)
y.append(y_data[i])
print i, x[-1], y[-1]
if self.interpolationMode == 'Linear':
return interpolators.LinearPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
elif self.interpolationMode == 'Cubic':
return interpolators.CubicPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
elif self.interpolationMode == 'Marmander':
return interpolators.MarmanderPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
else:
raise Exception('Unknown interpolation mode: %s' % self.interpolationMode)
def power(self, windSpeed, turbulence = None, extraTurbCorrection = False):
referencePower = self.powerFunction(windSpeed)
if turbulence == None:
power = referencePower
else:
referenceTurbulence = self.referenceTurbulence(windSpeed)
power = referencePower + self.simulatedPower.power(windSpeed, turbulence) - self.simulatedPower.power(windSpeed, referenceTurbulence)
if extraTurbCorrection: power *= self.calculateExtraTurbulenceCorrection(windSpeed, turbulence, referenceTurbulence)
power = max([0.0, power])
power = min([self.ratedPower, power])
return power
def calculateExtraTurbulenceCorrection(self, windSpeed, turbulence, referenceTurbulence):
saddle = 9.0
xprime = saddle - windSpeed
tprime = (referenceTurbulence - turbulence) / referenceTurbulence
if xprime < 0.0 or tprime < 0.0: return 1.0
a = -0.02 * math.tanh(2.0 * tprime)
b = -0.03 * (math.exp(1.5 * tprime) - 1.0)
loss = a * xprime + b
return 1 + loss
def referenceTurbulence(self, windSpeed):
if windSpeed < self.firstWindSpeed:
return self.turbulenceFunction(self.firstWindSpeed)
elif windSpeed > self.cutOutWindSpeed:
return self.turbulenceFunction(self.cutOutWindSpeed)
else:
return self.turbulenceFunction(windSpeed)
def calculateCutInWindSpeed(self, powerCurveLevels):
return min(self.nonZeroLevels(powerCurveLevels))
def calculateCutOutWindSpeed(self, powerCurveLevels):
return max(self.nonZeroLevels(powerCurveLevels))
def nonZeroLevels(self, powerCurveLevels):
levels = []
for windSpeed in self.powerCurveLevels.index:
if self.powerCurveLevels[self.actualPower][windSpeed] > 0.0:
levels.append(windSpeed)
return levels
def __str__(self):
value = "Wind Speed\tPower\n"
for windSpeed in self.powerCurveLevels:
value += "%0.2f\t%0.2f\n" % (windSpeed, self.power(windSpeed))
return value
class RotorGeometry:
def __init__(self, diameter, hubHeight):
self.diameter = diameter
self.radius = diameter / 2
self.area = math.pi * self.radius ** 2
self.hubHeight = hubHeight
self.lowerTip = self.hubHeight - self.radius
self.upperTip = self.hubHeight + self.radius
def withinRotor(self, height):
return height > self.lowerTip and height < self.upperTip
class InterpolatedNormDist:
def __init__(self):
#speed optimisation
self.xstep = 0.05
self.xend = 5.0
self.xstart = -self.xend
self.steps = int((self.xend - self.xstart) / self.xstep) + 1
x = np.linspace(self.xstart, self.xend, self.steps)
y = []
normDist = NormDist()
for i in range(len(x)):
y.append(normDist.probability(x[i], 0.0, 1.0))
self.f = scipy.interpolate.interp1d(x, y, bounds_error = False, fill_value = 0.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = 1.0 / windSpeedStandardDeviation
standardDeviationsFromMean = oneOverStandardDeviation * (windSpeed - windSpeedMean)
return self.f(standardDeviationsFromMean) * oneOverStandardDeviation
class DictionaryNormDist:
def __init__(self):
#speed optimisation
self.decimalPlaces = 2
self.xstep = 0.1 ** self.decimalPlaces
self.xend = 5.0
self.xstart = -self.xend
x = np.arange(self.xstart, self.xend + self.xstep, self.xstep)
self.dictionary = {}
normDist = NormDist()
for i in range(len(x)):
self.dictionary[self.key(x[i])] = normDist.probability(x[i], 0.0, 1.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = self.oneOver(windSpeedStandardDeviation)
standardDeviationsFromMean = self.standardDeviationsFromMean(windSpeed, windSpeedMean, oneOverStandardDeviation)
if self.inDictionary(standardDeviationsFromMean):
return self.lookUpDictionary(standardDeviationsFromMean) * oneOverStandardDeviation
else:
return 0.0
def oneOver(self, value):
return 1.0 / value
def standardDeviationsFromMean(self, value, mean, oneOverStandardDeviation):
return oneOverStandardDeviation * (value - mean)
def inDictionary(self, value):
if value < self.xstart: return False
if value > self.xend: return False
return True
def lookUpDictionary(self, value):
return self.dictionary[self.key(value)]
def key(self, value):
return round(value, self.decimalPlaces)
class IntegrationProbabilities:
def __init__(self, windSpeeds, windSpeedStep):
#speed otpimised normal distribution
self.windSpeeds = windSpeeds
self.a = windSpeedStep / math.sqrt(2.0 * math.pi)
def probabilities(self, windSpeedMean, windSpeedStdDev):
if windSpeedStdDev == 0:
return np.nan
oneOverStandardDeviation = 1.0 / windSpeedStdDev
oneOverStandardDeviationSq = oneOverStandardDeviation * oneOverStandardDeviation
b = self.a * oneOverStandardDeviation
c = -0.5 * oneOverStandardDeviationSq
windSpeedMinusMeans = (self.windSpeeds - windSpeedMean)
windSpeedMinusMeanSq = windSpeedMinusMeans * windSpeedMinusMeans
d = c * windSpeedMinusMeanSq
return b * np.exp(d)
class IntegrationRange:
def __init__(self, minimumWindSpeed, maximumWindSpeed, windSpeedStep):
self.minimumWindSpeed = minimumWindSpeed
self.maximumWindSpeed = maximumWindSpeed
self.windSpeedStep = windSpeedStep
self.windSpeeds = np.arange(minimumWindSpeed, maximumWindSpeed, windSpeedStep)
self.integrationProbabilities = IntegrationProbabilities(self.windSpeeds, self.windSpeedStep)
def probabilities(self, windSpeedMean, windSpeedStdDev):
return self.integrationProbabilities.probabilities(windSpeedMean, windSpeedStdDev)
class AvailablePower:
def __init__(self, area, density):
self.area = area
self.density = density
def power(self, windSpeed):
return 0.5 * self.density * self.area * windSpeed * windSpeed * windSpeed / 1000.0
def powerCoefficient(self, windSpeed, actualPower):
return actualPower / self.power(windSpeed)
class ZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.integrationRange = integrationRange
self.initialZeroTurbulencePowerCurve = InitialZeroTurbulencePowerCurve(referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower)
simulatedReferencePowerCurve = SimulatedPowerCurve(referenceWindSpeeds, self.initialZeroTurbulencePowerCurve, referenceTurbulences, integrationRange)
self.windSpeeds = referenceWindSpeeds
self.powers = []
for i in range(len(self.windSpeeds)):
power = referencePowers[i] - simulatedReferencePowerCurve.powers[i] + self.initialZeroTurbulencePowerCurve.powers[i]
self.powers.append(power)
#print "%f %f" % (self.windSpeeds[i], self.powers[i])
self.powerFunction = scipy.interpolate.interp1d(self.windSpeeds, self.powers)
self.minWindSpeed = min(self.windSpeeds)
self.maxWindSpeed = max(self.windSpeeds)
self.maxPower = max(self.powers)
self.dfPowerLevels = pd.DataFrame(self.powers, index = self.windSpeeds, columns = ['Power'])
def power(self, windSpeed):
if windSpeed <= self.minWindSpeed:
return 0.0
elif windSpeed >= self.maxWindSpeed:
return self.maxPower
else:
return self.powerFunction(windSpeed)
class InitialZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.maxIterations = 5
self.integrationRange = integrationRange
self.availablePower = availablePower
self.referenceWindSpeeds = referenceWindSpeeds
self.referencePowers = referencePowers
self.referenceTurbulences = referenceTurbulences
self.referencePowerCurveStats = IterationPowerCurveStats(referenceWindSpeeds, referencePowers, availablePower)
#print "%f %f %f" % (self.referencePowerCurveStats.ratedPower, self.referencePowerCurveStats.cutInWindSpeed, self.referencePowerCurveStats.cpMax)
self.selectedStats = self.solve(self.referencePowerCurveStats)
selectedIteration = InitialZeroTurbulencePowerCurveIteration(referenceWindSpeeds,
self.availablePower,
self.selectedStats.ratedPower,
self.selectedStats.cutInWindSpeed,
self.selectedStats.cpMax)
self.ratedWindSpeed = selectedIteration.ratedWindSpeed
self.windSpeeds = selectedIteration.windSpeeds
self.powers = selectedIteration.powers
self.power = selectedIteration.power
def solve(self, previousIterationStats, iterationCount = 1):
if iterationCount > self.maxIterations: raise Exception("Failed to solve initial zero turbulence curve in permitted number of iterations")
iterationZeroTurbCurve = InitialZeroTurbulencePowerCurveIteration(self.integrationRange.windSpeeds,
self.availablePower,
previousIterationStats.ratedPower,
previousIterationStats.cutInWindSpeed,
previousIterationStats.cpMax)
iterationSimulatedCurve = SimulatedPowerCurve(self.referenceWindSpeeds, iterationZeroTurbCurve, self.referenceTurbulences, self.integrationRange)
iterationSimulatedCurveStats = IterationPowerCurveStats(iterationSimulatedCurve.windSpeeds, iterationSimulatedCurve.powers, self.availablePower)
convergenceCheck = IterationPowerCurveConvergenceCheck(self.referencePowerCurveStats, iterationSimulatedCurveStats)
#print "%f %f %f" % (iterationSimulatedCurveStats.ratedPower, iterationSimulatedCurveStats.cutInWindSpeed, iterationSimulatedCurveStats.cpMax)
#print "%s %s %s" % (convergenceCheck.ratedPowerConverged, convergenceCheck.cutInConverged, convergenceCheck.cpMaxConverged)
if convergenceCheck.isConverged:
return previousIterationStats
else:
return self.solve(IncrementedPowerCurveStats(previousIterationStats, convergenceCheck), iterationCount + 1)
class IterationPowerCurveConvergenceCheck:
def __init__(self, referenceStats, iterationStats):
self.threholdPowerDiff = referenceStats.ratedPower * 0.001
self.threholdCutInWindSpeedDiff = 0.5
self.threholdCpMaxDiff = 0.01
self.ratedPowerDiff = iterationStats.ratedPower - referenceStats.ratedPower
self.cutInDiff = iterationStats.cutInWindSpeed - referenceStats.cutInWindSpeed
self.cpMaxDiff = iterationStats.cpMax - referenceStats.cpMax
self.ratedPowerConverged = abs(self.ratedPowerDiff) < self.threholdPowerDiff
self.cutInConverged = abs(self.cutInDiff) <= self.threholdCutInWindSpeedDiff
self.cpMaxConverged = abs(self.cpMaxDiff) <= self.threholdCpMaxDiff
self.isConverged = self.ratedPowerConverged and self.cutInConverged and self.cpMaxConverged
class IncrementedPowerCurveStats:
def __init__(self, previousIterationStats, convergenceCheck):
if convergenceCheck.ratedPowerConverged:
self.ratedPower = previousIterationStats.ratedPower
else:
self.ratedPower = previousIterationStats.ratedPower - convergenceCheck.ratedPowerDiff
if convergenceCheck.cutInConverged:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed
else:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed - convergenceCheck.cutInDiff
if convergenceCheck.cpMaxConverged:
self.cpMax = previousIterationStats.cpMax
else:
self.cpMax = previousIterationStats.cpMax - convergenceCheck.cpMaxDiff
class InitialZeroTurbulencePowerCurveIteration:
def __init__(self, windSpeeds, availablePower, ratedPower, cutInWindSpeed, cpMax):
self.windSpeeds = windSpeeds
self.powers = []
self.ratedWindSpeed = ((2.0 * ratedPower * 1000.0)/(availablePower.density * cpMax * availablePower.area)) ** (1.0 / 3.0)
self.ratedPower = ratedPower
self.cutInWindSpeed = cutInWindSpeed
self.cpMax = cpMax
self.availablePower = availablePower
for windSpeed in self.windSpeeds:
self.powers.append(self.power(windSpeed))
def power(self, windSpeed):
if windSpeed > self.cutInWindSpeed:
if windSpeed < self.ratedWindSpeed:
return self.availablePower.power(windSpeed) * self.cpMax
else:
return self.ratedPower
else:
return 0.0
class IterationPowerCurveStats:
def __init__(self, windSpeeds, powers, availablePower):
self.ratedPower = max(powers)
thresholdPower = self.ratedPower * 0.001
operatingWindSpeeds = []
cps = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
power = powers[i]
cp = availablePower.powerCoefficient(windSpeed, power)
cps.append(availablePower.powerCoefficient(windSpeed, power))
if power >= thresholdPower: operatingWindSpeeds.append(windSpeed)
self.cpMax = max(cps)
if len(operatingWindSpeeds) > 0:
self.cutInWindSpeed = min(operatingWindSpeeds)
else:
self.cutInWindSpeed = 0.0
class SimulatedPower:
def __init__(self, zeroTurbulencePowerCurve, integrationRange):
self.zeroTurbulencePowerCurve = zeroTurbulencePowerCurve
self.integrationRange = integrationRange
integrationPowers = []
for windSpeed in np.nditer(self.integrationRange.windSpeeds):
integrationPowers.append(self.zeroTurbulencePowerCurve.power(windSpeed))
self.integrationPowers = np.array(integrationPowers)
def power(self, windSpeed, turbulence):
standardDeviation = windSpeed * turbulence
integrationProbabilities = self.integrationRange.probabilities(windSpeed, standardDeviation)
return np.sum(integrationProbabilities * self.integrationPowers) / np.sum(integrationProbabilities)
class SimulatedPowerCurve:
def __init__(self, windSpeeds, zeroTurbulencePowerCurve, turbulences, integrationRange):
simulatedPower = SimulatedPower(zeroTurbulencePowerCurve, integrationRange)
self.windSpeeds = windSpeeds
self.turbulences = turbulences
self.powers = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
turbulence = turbulences[i]
power = simulatedPower.power(windSpeed, turbulence)
self.powers.append(power)
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
import itertools
import operator
import random
import re
import zlib
import six
from botocore.exceptions import ClientError
from dateutil.parser import parse
from concurrent.futures import as_completed
import jmespath
from c7n.actions import (
ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction
)
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
FilterRegistry, AgeFilter, ValueFilter, Filter, DefaultVpcBase
)
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n import query, utils
from c7n.utils import type_schema, filter_empty
from c7n.resources.iam import CheckPermissions
from c7n.resources.securityhub import PostFinding
RE_ERROR_INSTANCE_ID = re.compile("'(?P<instance_id>i-.*?)'")
filters = FilterRegistry('ec2.filters')
actions = ActionRegistry('ec2.actions')
@resources.register('ec2')
class EC2(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'instance'
enum_spec = ('describe_instances', 'Reservations[].Instances[]', None)
id = 'InstanceId'
filter_name = 'InstanceIds'
filter_type = 'list'
name = 'PublicDnsName'
date = 'LaunchTime'
dimension = 'InstanceId'
config_type = "AWS::EC2::Instance"
default_report_fields = (
'CustodianDate',
'InstanceId',
'tag:Name',
'InstanceType',
'LaunchTime',
'VpcId',
'PrivateIpAddress',
)
filter_registry = filters
action_registry = actions
# if we have to do a fallback scenario where tags don't come in describe
permissions = ('ec2:DescribeTags',)
def __init__(self, ctx, data):
super(EC2, self).__init__(ctx, data)
self.queries = QueryFilter.parse(self.data.get('query', []))
def resources(self, query=None):
q = self.resource_query()
if q is not None:
query = query or {}
query['Filters'] = q
return super(EC2, self).resources(query=query)
def resource_query(self):
qf = []
qf_names = set()
# allow same name to be specified multiple times and append the queries
# under the same name
for q in self.queries:
qd = q.query()
if qd['Name'] in qf_names:
for qf in qf:
if qd['Name'] == qf['Name']:
qf['Values'].extend(qd['Values'])
else:
qf_names.add(qd['Name'])
qf.append(qd)
return qf
def get_source(self, source_type):
if source_type == 'describe':
return DescribeEC2(self)
elif source_type == 'config':
return query.ConfigSource(self)
raise ValueError('invalid source %s' % source_type)
class DescribeEC2(query.DescribeSource):
def augment(self, resources):
"""EC2 API and AWOL Tags
While ec2 api generally returns tags when doing describe_x on for
various resources, it may also silently fail to do so unless a tag
is used as a filter.
See footnote on for official documentation.
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#Using_Tags_CLI
Apriori we may be using custodian to ensure tags (including
name), so there isn't a good default to ensure that we will
always get tags from describe_x calls.
"""
# First if we're in event based lambda go ahead and skip this,
# tags can't be trusted in ec2 instances immediately post creation.
if not resources or self.manager.data.get(
'mode', {}).get('type', '') in (
'cloudtrail', 'ec2-instance-state'):
return resources
# AWOL detector, so we don't make extraneous api calls.
resource_count = len(resources)
search_count = min(int(resource_count % 0.05) + 1, 5)
if search_count > resource_count:
search_count = resource_count
found = False
for r in random.sample(resources, search_count):
if 'Tags' in r:
found = True
break
if found:
return resources
# Okay go and do the tag lookup
client = utils.local_session(self.manager.session_factory).client('ec2')
tag_set = self.manager.retry(
client.describe_tags,
Filters=[{'Name': 'resource-type',
'Values': ['instance']}])['Tags']
resource_tags = {}
for t in tag_set:
t.pop('ResourceType')
rid = t.pop('ResourceId')
resource_tags.setdefault(rid, []).append(t)
m = self.manager.get_model()
for r in resources:
r['Tags'] = resource_tags.get(r[m.id], ())
return resources
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[].GroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetId"
@filters.register('vpc')
class VpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@filters.register('check-permissions')
class ComputePermissions(CheckPermissions):
def get_iam_arns(self, resources):
profile_arn_map = {
r['IamInstanceProfile']['Arn']: r['IamInstanceProfile']['Id']
for r in resources if 'IamInstanceProfile' in r}
# py2 compat on dict ordering
profile_arns = list(profile_arn_map.items())
profile_role_map = {
arn: profile['Roles'][0]['Arn']
for arn, profile in zip(
[p[0] for p in profile_arns],
self.manager.get_resource_manager(
'iam-profile').get_resources(
[p[1] for p in profile_arns]))}
return [
profile_role_map.get(r.get('IamInstanceProfile', {}).get('Arn'))
for r in resources]
@filters.register('state-age')
class StateTransitionAge(AgeFilter):
"""Age an instance has been in the given state.
.. code-block:: yaml
policies:
- name: ec2-state-running-7-days
resource: ec2
filters:
- type: state-age
op: ge
days: 7
"""
RE_PARSE_AGE = re.compile(r"\(.*?\)")
# this filter doesn't use date_attribute, but needs to define it
# to pass AgeFilter's validate method
date_attribute = "dummy"
schema = type_schema(
'state-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'})
def get_resource_date(self, i):
v = i.get('StateTransitionReason')
if not v:
return None
dates = self.RE_PARSE_AGE.findall(v)
if dates:
return parse(dates[0][1:-1])
return None
class StateTransitionFilter(object):
"""Filter instances by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the instances states
they are valid for.
For more details see
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html
"""
valid_origin_states = ()
def filter_instance_state(self, instances, states=None):
states = states or self.valid_origin_states
orig_length = len(instances)
results = [i for i in instances
if i['State']['Name'] in states]
self.log.info("%s %d of %d instances" % (
self.__class__.__name__, len(results), orig_length))
return results
@filters.register('ebs')
class AttachedVolume(ValueFilter):
"""EC2 instances with EBS backed volume
Filters EC2 instances with EBS backed storage devices (non ephemeral)
:Example:
.. code-block:: yaml
policies:
- name: ec2-encrypted-ebs-volumes
resource: ec2
filters:
- type: ebs
key: Encrypted
value: true
"""
schema = type_schema(
'ebs', rinherit=ValueFilter.schema,
**{'operator': {'enum': ['and', 'or']},
'skip-devices': {'type': 'array', 'items': {'type': 'string'}}})
schema_alias = False
def get_permissions(self):
return self.manager.get_resource_manager('ebs').get_permissions()
def process(self, resources, event=None):
self.volume_map = self.get_volume_mapping(resources)
self.skip = self.data.get('skip-devices', [])
self.operator = self.data.get(
'operator', 'or') == 'or' and any or all
return list(filter(self, resources))
def get_volume_mapping(self, resources):
volume_map = {}
manager = self.manager.get_resource_manager('ebs')
for instance_set in utils.chunks(resources, 200):
volume_ids = []
for i in instance_set:
for bd in i.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd:
continue
volume_ids.append(bd['Ebs']['VolumeId'])
for v in manager.get_resources(volume_ids):
if not v['Attachments']:
continue
volume_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
return volume_map
def __call__(self, i):
volumes = self.volume_map.get(i['InstanceId'])
if not volumes:
return False
if self.skip:
for v in list(volumes):
for a in v.get('Attachments', []):
if a['Device'] in self.skip:
volumes.remove(v)
return self.operator(map(self.match, volumes))
@filters.register('termination-protected')
class DisableApiTermination(Filter):
"""EC2 instances with ``disableApiTermination`` attribute set
Filters EC2 instances with ``disableApiTermination`` attribute set to true.
:Example:
.. code-block:: yaml
policies:
- name: termination-protection-enabled
resource: ec2
filters:
- type: termination-protected
:Example:
.. code-block:: yaml
policies:
- name: termination-protection-NOT-enabled
resource: ec2
filters:
- not:
- type: termination-protected
"""
schema = type_schema('termination-protected')
permissions = ('ec2:DescribeInstanceAttribute',)
def get_permissions(self):
perms = list(self.permissions)
perms.extend(self.manager.get_permissions())
return perms
def process(self, resources, event=None):
client = utils.local_session(
self.manager.session_factory).client('ec2')
return [r for r in resources
if self.is_termination_protection_enabled(client, r)]
def is_termination_protection_enabled(self, client, inst):
attr_val = self.manager.retry(
client.describe_instance_attribute,
Attribute='disableApiTermination',
InstanceId=inst['InstanceId']
)
return attr_val['DisableApiTermination']['Value']
class InstanceImageBase(object):
def prefetch_instance_images(self, instances):
image_ids = [i['ImageId'] for i in instances if 'c7n:instance-image' not in i]
self.image_map = self.get_local_image_mapping(image_ids)
def get_base_image_mapping(self):
return {i['ImageId']: i for i in
self.manager.get_resource_manager('ami').resources()}
def get_instance_image(self, instance):
image = instance.get('c7n:instance-image', None)
if not image:
image = instance['c7n:instance-image'] = self.image_map.get(instance['ImageId'], None)
return image
def get_local_image_mapping(self, image_ids):
base_image_map = self.get_base_image_mapping()
resources = {i: base_image_map[i] for i in image_ids if i in base_image_map}
missing = list(set(image_ids) - set(resources.keys()))
if missing:
loaded = self.manager.get_resource_manager('ami').get_resources(missing, False)
resources.update({image['ImageId']: image for image in loaded})
return resources
@filters.register('image-age')
class ImageAge(AgeFilter, InstanceImageBase):
"""EC2 AMI age filter
Filters EC2 instances based on the age of their AMI image (in days)
:Example:
.. code-block:: yaml
policies:
- name: ec2-ancient-ami
resource: ec2
filters:
- type: image-age
op: ge
days: 90
"""
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'})
def get_permissions(self):
return self.manager.get_resource_manager('ami').get_permissions()
def process(self, resources, event=None):
self.prefetch_instance_images(resources)
return super(ImageAge, self).process(resources, event)
def get_resource_date(self, i):
image = self.get_instance_image(i)
if image:
return parse(image['CreationDate'])
else:
return parse("2000-01-01T01:01:01.000Z")
@filters.register('image')
class InstanceImage(ValueFilter, InstanceImageBase):
schema = type_schema('image', rinherit=ValueFilter.schema)
schema_alias = False
def get_permissions(self):
return self.manager.get_resource_manager('ami').get_permissions()
def process(self, resources, event=None):
self.prefetch_instance_images(resources)
return super(InstanceImage, self).process(resources, event)
def __call__(self, i):
image = self.get_instance_image(i)
# Finally, if we have no image...
if not image:
self.log.warning(
"Could not locate image for instance:%s ami:%s" % (
i['InstanceId'], i["ImageId"]))
# Match instead on empty skeleton?
return False
return self.match(image)
@filters.register('offhour')
class InstanceOffHour(OffHour, StateTransitionFilter):
"""Custodian OffHour filter
Filters running EC2 instances with the intent to stop at a given hour of
the day. A list of days to excluded can be included as a list of strings
with the format YYYY-MM-DD. Alternatively, the list (using the same syntax)
can be taken from a specified url.
:Example:
.. code-block:: yaml
policies:
- name: offhour-evening-stop
resource: ec2
filters:
- type: offhour
tag: custodian_downtime
default_tz: et
offhour: 20
actions:
- stop
- name: offhour-evening-stop-skip-holidays
resource: ec2
filters:
- type: offhour
tag: custodian_downtime
default_tz: et
offhour: 20
skip-days: ['2017-12-25']
actions:
- stop
- name: offhour-evening-stop-skip-holidays-from
resource: ec2
filters:
- type: offhour
tag: custodian_downtime
default_tz: et
offhour: 20
skip-days-from:
expr: 0
format: csv
url: 's3://location/holidays.csv'
actions:
- stop
"""
valid_origin_states = ('running',)
def process(self, resources, event=None):
return super(InstanceOffHour, self).process(
self.filter_instance_state(resources))
@filters.register('network-location')
class EC2NetworkLocation(net_filters.NetworkLocation, StateTransitionFilter):
valid_origin_states = ('pending', 'running', 'shutting-down', 'stopping',
'stopped')
def process(self, resources, event=None):
resources = self.filter_instance_state(resources)
if not resources:
return []
return super(EC2NetworkLocation, self).process(resources)
@filters.register('onhour')
class InstanceOnHour(OnHour, StateTransitionFilter):
"""Custodian OnHour filter
Filters stopped EC2 instances with the intent to start at a given hour of
the day. A list of days to excluded can be included as a list of strings
with the format YYYY-MM-DD. Alternatively, the list (using the same syntax)
can be taken from a specified url.
:Example:
.. code-block:: yaml
policies:
- name: onhour-morning-start
resource: ec2
filters:
- type: onhour
tag: custodian_downtime
default_tz: et
onhour: 6
actions:
- start
- name: onhour-morning-start-skip-holidays
resource: ec2
filters:
- type: onhour
tag: custodian_downtime
default_tz: et
onhour: 6
skip-days: ['2017-12-25']
actions:
- start
- name: onhour-morning-start-skip-holidays-from
resource: ec2
filters:
- type: onhour
tag: custodian_downtime
default_tz: et
onhour: 6
skip-days-from:
expr: 0
format: csv
url: 's3://location/holidays.csv'
actions:
- start
"""
valid_origin_states = ('stopped',)
def process(self, resources, event=None):
return super(InstanceOnHour, self).process(
self.filter_instance_state(resources))
@filters.register('ephemeral')
class EphemeralInstanceFilter(Filter):
"""EC2 instances with ephemeral storage
Filters EC2 instances that have ephemeral storage (an instance-store backed
root device)
:Example:
.. code-block:: yaml
policies:
- name: ec2-ephemeral-instances
resource: ec2
filters:
- type: ephemeral
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
"""
schema = type_schema('ephemeral')
def __call__(self, i):
return self.is_ephemeral(i)
@staticmethod
def is_ephemeral(i):
for bd in i.get('BlockDeviceMappings', []):
if bd['DeviceName'] in ('/dev/sda1', '/dev/xvda', 'xvda'):
if 'Ebs' in bd:
return False
return True
return True
@filters.register('instance-uptime')
class UpTimeFilter(AgeFilter):
date_attribute = "LaunchTime"
schema = type_schema(
'instance-uptime',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'})
@filters.register('instance-age')
class InstanceAgeFilter(AgeFilter):
"""Filters instances based on their age (in days)
:Example:
.. code-block:: yaml
policies:
- name: ec2-30-days-plus
resource: ec2
filters:
- type: instance-age
op: ge
days: 30
"""
date_attribute = "LaunchTime"
ebs_key_func = operator.itemgetter('AttachTime')
schema = type_schema(
'instance-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'},
hours={'type': 'number'},
minutes={'type': 'number'})
def get_resource_date(self, i):
# LaunchTime is basically how long has the instance
# been on, use the oldest ebs vol attach time
ebs_vols = [
block['Ebs'] for block in i['BlockDeviceMappings']
if 'Ebs' in block]
if not ebs_vols:
# Fall back to using age attribute (ephemeral instances)
return super(InstanceAgeFilter, self).get_resource_date(i)
# Lexographical sort on date
ebs_vols = sorted(ebs_vols, key=self.ebs_key_func)
return ebs_vols[0]['AttachTime']
@filters.register('default-vpc')
class DefaultVpc(DefaultVpcBase):
""" Matches if an ec2 database is in the default vpc
"""
schema = type_schema('default-vpc')
def __call__(self, ec2):
return ec2.get('VpcId') and self.match(ec2.get('VpcId')) or False
def deserialize_user_data(user_data):
data = base64.b64decode(user_data)
# try raw and compressed
try:
return data.decode('utf8')
except UnicodeDecodeError:
return zlib.decompress(data, 16).decode('utf8')
@filters.register('user-data')
class UserData(ValueFilter):
"""Filter on EC2 instances which have matching userdata.
Note: It is highly recommended to use regexes with the ?sm flags, since Custodian
uses re.match() and userdata spans multiple lines.
:example:
.. code-block:: yaml
policies:
- name: ec2_userdata_stop
resource: ec2
filters:
- type: user-data
op: regex
value: (?smi).*password=
actions:
- stop
"""
schema = type_schema('user-data', rinherit=ValueFilter.schema)
schema_alias = False
batch_size = 50
annotation = 'c7n:user-data'
permissions = ('ec2:DescribeInstanceAttribute',)
def __init__(self, data, manager):
super(UserData, self).__init__(data, manager)
self.data['key'] = '"c7n:user-data"'
def process(self, resources, event=None):
client = utils.local_session(self.manager.session_factory).client('ec2')
results = []
with self.executor_factory(max_workers=3) as w:
futures = {}
for instance_set in utils.chunks(resources, self.batch_size):
futures[w.submit(
self.process_instance_set,
client, instance_set)] = instance_set
for f in as_completed(futures):
if f.exception():
self.log.error(
"Error processing userdata on instance set %s", f.exception())
results.extend(f.result())
return results
def process_instance_set(self, client, resources):
results = []
for r in resources:
if self.annotation not in r:
try:
result = client.describe_instance_attribute(
Attribute='userData',
InstanceId=r['InstanceId'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstanceId.NotFound':
continue
if 'Value' not in result['UserData']:
r[self.annotation] = None
else:
r[self.annotation] = deserialize_user_data(
result['UserData']['Value'])
if self.match(r):
results.append(r)
return results
@filters.register('singleton')
class SingletonFilter(Filter, StateTransitionFilter):
"""EC2 instances without autoscaling or a recover alarm
Filters EC2 instances that are not members of an autoscaling group
and do not have Cloudwatch recover alarms.
:Example:
.. code-block:: yaml
policies:
- name: ec2-recover-instances
resource: ec2
filters:
- singleton
actions:
- type: tag
key: problem
value: instance is not resilient
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-recover.html
"""
schema = type_schema('singleton')
permissions = ('cloudwatch:DescribeAlarmsForMetric',)
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
in_asg = ValueFilter({
'key': 'tag:aws:autoscaling:groupName',
'value': 'not-null'}).validate()
def process(self, instances, event=None):
return super(SingletonFilter, self).process(
self.filter_instance_state(instances))
def __call__(self, i):
if self.in_asg(i):
return False
else:
return not self.has_recover_alarm(i)
def has_recover_alarm(self, i):
client = utils.local_session(self.manager.session_factory).client('cloudwatch')
alarms = client.describe_alarms_for_metric(
MetricName='StatusCheckFailed_System',
Namespace='AWS/EC2',
Dimensions=[
{
'Name': 'InstanceId',
'Value': i['InstanceId']
}
]
)
for i in alarms['MetricAlarms']:
for a in i['AlarmActions']:
if (
a.startswith('arn:aws:automate:') and
a.endswith(':ec2:recover')
):
return True
return False
@EC2.filter_registry.register('ssm')
class SsmStatus(ValueFilter):
"""Filter ec2 instances by their ssm status information.
:Example:
Find ubuntu 18.04 instances are active with ssm.
.. code-block:: yaml
policies:
- name: ec2-ssm-check
resource: ec2
filters:
- type: ssm
key: PingStatus
value: Online
- type: ssm
key: PlatformName
value: Ubuntu
- type: ssm
key: PlatformVersion
value: 18.04
"""
schema = type_schema('ssm', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('ssm:DescribeInstanceInformation',)
annotation = 'c7n:SsmState'
def process(self, resources, event=None):
client = utils.local_session(self.manager.session_factory).client('ssm')
results = []
for resource_set in utils.chunks(
[r for r in resources if self.annotation not in r], 50):
self.process_resource_set(client, resource_set)
for r in resources:
if self.match(r[self.annotation]):
results.append(r)
return results
def process_resource_set(self, client, resources):
instance_ids = [i['InstanceId'] for i in resources]
info_map = {
info['InstanceId']: info for info in
client.describe_instance_information(
Filters=[{'Key': 'InstanceIds', 'Values': instance_ids}]).get(
'InstanceInformationList', [])}
for r in resources:
r[self.annotation] = info_map.get(r['InstanceId'], {})
@EC2.action_registry.register("post-finding")
class InstanceFinding(PostFinding):
def format_resource(self, r):
ip_addresses = jmespath.search(
"NetworkInterfaces[].PrivateIpAddresses[].PrivateIpAddress", r)
# limit to max 10 ip addresses, per security hub service limits
ip_addresses = ip_addresses and ip_addresses[:10] or ip_addresses
details = {
"Type": r["InstanceType"],
"ImageId": r["ImageId"],
"IpV4Addresses": ip_addresses,
"KeyName": r.get("KeyName"),
"LaunchedAt": r["LaunchTime"].isoformat()
}
if "VpcId" in r:
details["VpcId"] = r["VpcId"]
if "SubnetId" in r:
details["SubnetId"] = r["SubnetId"]
if "IamInstanceProfile" in r:
details["IamInstanceProfileArn"] = r["IamInstanceProfile"]["Arn"]
instance = {
"Type": "AwsEc2Instance",
"Id": "arn:{}:ec2:{}:{}:instance/{}".format(
utils.REGION_PARTITION_MAP.get(self.manager.config.region, 'aws'),
self.manager.config.region,
self.manager.config.account_id,
r["InstanceId"]),
"Region": self.manager.config.region,
"Tags": {t["Key"]: t["Value"] for t in r.get("Tags", [])},
"Details": {"AwsEc2Instance": filter_empty(details)},
}
instance = filter_empty(instance)
return instance
@actions.register('start')
class Start(BaseAction, StateTransitionFilter):
"""Starts a previously stopped EC2 instance.
:Example:
.. code-block:: yaml
policies:
- name: ec2-start-stopped-instances
resource: ec2
query:
- instance-state-name: stopped
actions:
- start
http://docs.aws.amazon.com/cli/latest/reference/ec2/start-instances.html
"""
valid_origin_states = ('stopped',)
schema = type_schema('start')
permissions = ('ec2:StartInstances',)
batch_size = 10
exception = None
def _filter_ec2_with_volumes(self, instances):
return [i for i in instances if len(i['BlockDeviceMappings']) > 0]
def process(self, instances):
instances = self._filter_ec2_with_volumes(
self.filter_instance_state(instances))
if not len(instances):
return
client = utils.local_session(self.manager.session_factory).client('ec2')
failures = {}
# Play nice around aws having insufficient capacity...
for itype, t_instances in utils.group_by(
instances, 'InstanceType').items():
for izone, z_instances in utils.group_by(
t_instances, 'Placement.AvailabilityZone').items():
for batch in utils.chunks(z_instances, self.batch_size):
fails = self.process_instance_set(client, batch, itype, izone)
if fails:
failures["%s %s" % (itype, izone)] = [i['InstanceId'] for i in batch]
if failures:
fail_count = sum(map(len, failures.values()))
msg = "Could not start %d of %d instances %s" % (
fail_count, len(instances), utils.dumps(failures))
self.log.warning(msg)
raise RuntimeError(msg)
def process_instance_set(self, client, instances, itype, izone):
# Setup retry with insufficient capacity as well
retryable = ('InsufficientInstanceCapacity', 'RequestLimitExceeded',
'Client.RequestLimitExceeded'),
retry = utils.get_retry(retryable, max_attempts=5)
instance_ids = [i['InstanceId'] for i in instances]
while instance_ids:
try:
retry(client.start_instances, InstanceIds=instance_ids)
break
except ClientError as e:
if e.response['Error']['Code'] in retryable:
# we maxed out on our retries
return True
elif e.response['Error']['Code'] == 'IncorrectInstanceState':
instance_ids.remove(extract_instance_id(e))
else:
raise
def extract_instance_id(state_error):
"Extract an instance id from an error"
instance_id = None
match = RE_ERROR_INSTANCE_ID.search(str(state_error))
if match:
instance_id = match.groupdict().get('instance_id')
if match is None or instance_id is None:
raise ValueError("Could not extract instance id from error: %s" % state_error)
return instance_id
@actions.register('resize')
class Resize(BaseAction, StateTransitionFilter):
"""Change an instance's size.
An instance can only be resized when its stopped, this action
can optionally restart an instance if needed to effect the instance
type change. Instances are always left in the run state they were
found in.
There are a few caveats to be aware of, instance resizing
needs to maintain compatibility for architecture, virtualization type
hvm/pv, and ebs optimization at minimum.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html
"""
schema = type_schema(
'resize',
**{'restart': {'type': 'boolean'},
'type-map': {'type': 'object'},
'default': {'type': 'string'}})
valid_origin_states = ('running', 'stopped')
def get_permissions(self):
perms = ('ec2:DescribeInstances', 'ec2:ModifyInstanceAttribute')
if self.data.get('restart', False):
perms += ('ec2:StopInstances', 'ec2:StartInstances')
return perms
def process(self, resources):
stopped_instances = self.filter_instance_state(
resources, ('stopped',))
running_instances = self.filter_instance_state(
resources, ('running',))
if self.data.get('restart') and running_instances:
Stop({'terminate-ephemeral': False},
self.manager).process(running_instances)
client = utils.local_session(
self.manager.session_factory).client('ec2')
waiter = client.get_waiter('instance_stopped')
try:
waiter.wait(
InstanceIds=[r['InstanceId'] for r in running_instances])
except ClientError as e:
self.log.exception(
"Exception stopping instances for resize:\n %s" % e)
for instance_set in utils.chunks(itertools.chain(
stopped_instances, running_instances), 20):
self.process_resource_set(instance_set)
if self.data.get('restart') and running_instances:
client.start_instances(
InstanceIds=[i['InstanceId'] for i in running_instances])
return list(itertools.chain(stopped_instances, running_instances))
def process_resource_set(self, instance_set):
type_map = self.data.get('type-map')
default_type = self.data.get('default')
client = utils.local_session(
self.manager.session_factory).client('ec2')
for i in instance_set:
self.log.debug(
"resizing %s %s" % (i['InstanceId'], i['InstanceType']))
new_type = type_map.get(i['InstanceType'], default_type)
if new_type == i['InstanceType']:
continue
try:
client.modify_instance_attribute(
InstanceId=i['InstanceId'],
InstanceType={'Value': new_type})
except ClientError as e:
self.log.exception(
"Exception resizing instance:%s new:%s old:%s \n %s" % (
i['InstanceId'], new_type, i['InstanceType'], e))
@actions.register('stop')
class Stop(BaseAction, StateTransitionFilter):
"""Stops a running EC2 instances
:Example:
.. code-block:: yaml
policies:
- name: ec2-stop-running-instances
resource: ec2
query:
- instance-state-name: running
actions:
- stop
"""
valid_origin_states = ('running',)
schema = type_schema('stop', **{'terminate-ephemeral': {'type': 'boolean'}})
def get_permissions(self):
perms = ('ec2:StopInstances',)
if self.data.get('terminate-ephemeral', False):
perms += ('ec2:TerminateInstances',)
return perms
def split_on_storage(self, instances):
ephemeral = []
persistent = []
for i in instances:
if EphemeralInstanceFilter.is_ephemeral(i):
ephemeral.append(i)
else:
persistent.append(i)
return ephemeral, persistent
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
# Ephemeral instance can't be stopped.
ephemeral, persistent = self.split_on_storage(instances)
if self.data.get('terminate-ephemeral', False) and ephemeral:
self._run_instances_op(
client.terminate_instances,
[i['InstanceId'] for i in ephemeral])
if persistent:
self._run_instances_op(
client.stop_instances,
[i['InstanceId'] for i in persistent])
return instances
def _run_instances_op(self, op, instance_ids):
while instance_ids:
try:
return self.manager.retry(op, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectInstanceState':
instance_ids.remove(extract_instance_id(e))
raise
@actions.register('reboot')
class Reboot(BaseAction, StateTransitionFilter):
"""reboots a previously running EC2 instance.
:Example:
.. code-block:: yaml
policies:
- name: ec2-reboot-instances
resource: ec2
query:
- instance-state-name: running
actions:
- reboot
http://docs.aws.amazon.com/cli/latest/reference/ec2/reboot-instances.html
"""
valid_origin_states = ('running',)
schema = type_schema('reboot')
permissions = ('ec2:RebootInstances',)
batch_size = 10
exception = None
def _filter_ec2_with_volumes(self, instances):
return [i for i in instances if len(i['BlockDeviceMappings']) > 0]
def process(self, instances):
instances = self._filter_ec2_with_volumes(
self.filter_instance_state(instances))
if not len(instances):
return
client = utils.local_session(self.manager.session_factory).client('ec2')
failures = {}
for batch in utils.chunks(instances, self.batch_size):
fails = self.process_instance_set(client, batch)
if fails:
failures = [i['InstanceId'] for i in batch]
if failures:
fail_count = sum(map(len, failures.values()))
msg = "Could not reboot %d of %d instances %s" % (
fail_count, len(instances),
utils.dumps(failures))
self.log.warning(msg)
raise RuntimeError(msg)
def process_instance_set(self, client, instances):
# Setup retry with insufficient capacity as well
retryable = ('InsufficientInstanceCapacity', 'RequestLimitExceeded',
'Client.RequestLimitExceeded'),
retry = utils.get_retry(retryable, max_attempts=5)
instance_ids = [i['InstanceId'] for i in instances]
try:
retry(client.reboot_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in retryable:
return True
raise
@actions.register('terminate')
class Terminate(BaseAction, StateTransitionFilter):
""" Terminate a set of instances.
While ec2 offers a bulk delete api, any given instance can be configured
with api deletion termination protection, so we can't use the bulk call
reliabily, we need to process the instances individually. Additionally
If we're configured with 'force' then we'll turn off instance termination
protection.
:Example:
.. code-block:: yaml
policies:
- name: ec2-process-termination
resource: ec2
filters:
- type: marked-for-op
op: terminate
actions:
- terminate
"""
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
schema = type_schema('terminate', force={'type': 'boolean'})
def get_permissions(self):
permissions = ("ec2:TerminateInstances",)
if self.data.get('force'):
permissions += ('ec2:ModifyInstanceAttribute',)
return permissions
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
if self.data.get('force'):
self.log.info("Disabling termination protection on instances")
self.disable_deletion_protection(
client,
[i for i in instances if i.get('InstanceLifecycle') != 'spot'])
# limit batch sizes to avoid api limits
for batch in utils.chunks(instances, 100):
self.manager.retry(
client.terminate_instances,
InstanceIds=[i['InstanceId'] for i in instances])
def disable_deletion_protection(self, client, instances):
def process_instance(i):
try:
self.manager.retry(
client.modify_instance_attribute,
InstanceId=i['InstanceId'],
Attribute='disableApiTermination',
Value='false')
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectInstanceState':
return
raise
with self.executor_factory(max_workers=2) as w:
list(w.map(process_instance, instances))
@actions.register('snapshot')
class Snapshot(BaseAction):
"""Snapshots volumes attached to an EC2 instance
:Example:
.. code-block:: yaml
policies:
- name: ec2-snapshots
resource: ec2
actions:
- type: snapshot
copy-tags:
- Name
"""
schema = type_schema(
'snapshot',
**{'copy-tags': {'type': 'array', 'items': {'type': 'string'}},
'copy-volume-tags': {'type': 'boolean'},
'exclude-boot': {'type': 'boolean', 'default': False}})
permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',)
def validate(self):
if self.data.get('copy-tags') and 'copy-volume-tags' in self.data:
raise PolicyValidationError(
"Can specify copy-tags or copy-volume-tags, not both")
def process(self, resources):
client = utils.local_session(self.manager.session_factory).client('ec2')
err = None
with self.executor_factory(max_workers=2) as w:
futures = {}
for resource in resources:
futures[w.submit(
self.process_volume_set, client, resource)] = resource
for f in as_completed(futures):
if f.exception():
err = f.exception()
resource = futures[f]
self.log.error(
"Exception creating snapshot set instance:%s \n %s" % (
resource['InstanceId'], err))
if err:
raise err
def process_volume_set(self, client, resource):
params = dict(
InstanceSpecification={
'ExcludeBootVolume': self.data.get('exclude-boot', False),
'InstanceId': resource['InstanceId']})
if 'copy-tags' in self.data:
params['TagSpecifications'] = [{
'ResourceType': 'snapshot',
'Tags': self.get_snapshot_tags(resource)}]
elif self.data.get('copy-volume-tags', True):
params['CopyTagsFromSource'] = 'volume'
try:
result = self.manager.retry(client.create_snapshots, **params)
resource['c7n:snapshots'] = [
s['SnapshotId'] for s in result['Snapshots']]
except ClientError as e:
err_code = e.response['Error']['Code']
if err_code not in (
'InvalidInstanceId.NotFound',
'ConcurrentSnapshotLimitExceeded',
'IncorrectState'):
raise
self.log.warning(
"action:snapshot instance:%s error:%s",
resource['InstanceId'], err_code)
def get_snapshot_tags(self, resource):
tags = [
{'Key': 'custodian_snapshot', 'Value': ''}]
copy_keys = self.data.get('copy-tags', [])
copy_tags = []
if copy_keys:
for t in resource.get('Tags', []):
if t['Key'] in copy_keys:
copy_tags.append(t)
tags.extend(copy_tags)
return tags
@actions.register('modify-security-groups')
class EC2ModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify security groups on an instance."""
permissions = ("ec2:ModifyNetworkInterfaceAttribute",)
sg_expr = jmespath.compile("Groups[].GroupId")
def process(self, instances):
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
# handle multiple ENIs
interfaces = []
for i in instances:
for eni in i['NetworkInterfaces']:
if i.get('c7n:matched-security-groups'):
eni['c7n:matched-security-groups'] = i[
'c7n:matched-security-groups']
if i.get('c7n:NetworkLocation'):
eni['c7n:NetworkLocation'] = i[
'c7n:NetworkLocation']
interfaces.append(eni)
groups = super(EC2ModifyVpcSecurityGroups, self).get_groups(interfaces)
for idx, i in enumerate(interfaces):
client.modify_network_interface_attribute(
NetworkInterfaceId=i['NetworkInterfaceId'],
Groups=groups[idx])
@actions.register('autorecover-alarm')
class AutorecoverAlarm(BaseAction, StateTransitionFilter):
"""Adds a cloudwatch metric alarm to recover an EC2 instance.
This action takes effect on instances that are NOT part
of an ASG.
:Example:
.. code-block:: yaml
policies:
- name: ec2-autorecover-alarm
resource: ec2
filters:
- singleton
actions:
- autorecover-alarm
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-recover.html
"""
schema = type_schema('autorecover-alarm')
permissions = ('ec2:DescribeInstanceStatus',
'ec2:RecoverInstances',
'ec2:DescribeInstanceRecoveryAttribute')
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
filter_asg_membership = ValueFilter({
'key': 'tag:aws:autoscaling:groupName',
'value': 'empty'}).validate()
def process(self, instances):
instances = self.filter_asg_membership.process(
self.filter_instance_state(instances))
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('cloudwatch')
for i in instances:
client.put_metric_alarm(
AlarmName='recover-{}'.format(i['InstanceId']),
AlarmDescription='Auto Recover {}'.format(i['InstanceId']),
ActionsEnabled=True,
AlarmActions=[
'arn:{}:automate:{}:ec2:recover'.format(
utils.REGION_PARTITION_MAP.get(
self.manager.config.region, 'aws'),
i['Placement']['AvailabilityZone'][:-1])
],
MetricName='StatusCheckFailed_System',
Namespace='AWS/EC2',
Statistic='Minimum',
Dimensions=[
{
'Name': 'InstanceId',
'Value': i['InstanceId']
}
],
Period=60,
EvaluationPeriods=2,
Threshold=0,
ComparisonOperator='GreaterThanThreshold'
)
@actions.register('set-instance-profile')
class SetInstanceProfile(BaseAction, StateTransitionFilter):
"""Sets (add, modify, remove) the instance profile for a running EC2 instance.
:Example:
.. code-block:: yaml
policies:
- name: set-default-instance-profile
resource: ec2
filters:
- IamInstanceProfile: absent
actions:
- type: set-instance-profile
name: default
https://docs.aws.amazon.com/cli/latest/reference/ec2/associate-iam-instance-profile.html
https://docs.aws.amazon.com/cli/latest/reference/ec2/disassociate-iam-instance-profile.html
"""
schema = type_schema(
'set-instance-profile',
**{'name': {'type': 'string'}})
permissions = (
'ec2:AssociateIamInstanceProfile',
'ec2:DisassociateIamInstanceProfile',
'iam:PassRole')
valid_origin_states = ('running', 'pending', 'stopped', 'stopping')
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
client = utils.local_session(self.manager.session_factory).client('ec2')
profile_name = self.data.get('name')
profile_instances = [i for i in instances if i.get('IamInstanceProfile')]
if profile_instances:
associations = {
a['InstanceId']: (a['AssociationId'], a['IamInstanceProfile']['Arn'])
for a in client.describe_iam_instance_profile_associations(
Filters=[
{'Name': 'instance-id',
'Values': [i['InstanceId'] for i in profile_instances]},
{'Name': 'state', 'Values': ['associating', 'associated']}]
).get('IamInstanceProfileAssociations', ())}
else:
associations = {}
for i in instances:
if profile_name and i['InstanceId'] not in associations:
client.associate_iam_instance_profile(
IamInstanceProfile={'Name': profile_name},
InstanceId=i['InstanceId'])
continue
# Removing profile and no profile on instance.
elif profile_name is None and i['InstanceId'] not in associations:
continue
p_assoc_id, p_arn = associations[i['InstanceId']]
# Already associated to target profile, skip
if profile_name and p_arn.endswith('/%s' % profile_name):
continue
if profile_name is None:
client.disassociate_iam_instance_profile(
AssociationId=p_assoc_id)
else:
client.replace_iam_instance_profile_association(
IamInstanceProfile={'Name': profile_name},
AssociationId=p_assoc_id)
return instances
@actions.register('propagate-spot-tags')
class PropagateSpotTags(BaseAction):
"""Propagate Tags that are set at Spot Request level to EC2 instances.
:Example:
.. code-block:: yaml
policies:
- name: ec2-spot-instances
resource: ec2
filters:
- State.Name: pending
- instanceLifecycle: spot
actions:
- type: propagate-spot-tags
only_tags:
- Name
- BillingTag
"""
schema = type_schema(
'propagate-spot-tags',
**{'only_tags': {'type': 'array', 'items': {'type': 'string'}}})
permissions = (
'ec2:DescribeInstances',
'ec2:DescribeSpotInstanceRequests',
'ec2:DescribeTags',
'ec2:CreateTags')
MAX_TAG_COUNT = 50
def process(self, instances):
instances = [
i for i in instances if i['InstanceLifecycle'] == 'spot']
if not len(instances):
self.log.warning(
"action:%s no spot instances found, implicit filter by action" % (
self.__class__.__name__.lower()))
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
request_instance_map = {}
for i in instances:
request_instance_map.setdefault(
i['SpotInstanceRequestId'], []).append(i)
# ... and describe the corresponding spot requests ...
requests = client.describe_spot_instance_requests(
Filters=[{
'Name': 'spot-instance-request-id',
'Values': list(request_instance_map.keys())}]).get(
'SpotInstanceRequests', [])
updated = []
for r in requests:
if not r.get('Tags'):
continue
updated.extend(
self.process_request_instances(
client, r, request_instance_map[r['SpotInstanceRequestId']]))
return updated
def process_request_instances(self, client, request, instances):
# Now we find the tags we can copy : either all, either those
# indicated with 'only_tags' parameter.
copy_keys = self.data.get('only_tags', [])
request_tags = {t['Key']: t['Value'] for t in request['Tags']
if not t['Key'].startswith('aws:')}
if copy_keys:
for k in set(copy_keys).difference(request_tags):
del request_tags[k]
update_instances = []
for i in instances:
instance_tags = {t['Key']: t['Value'] for t in i.get('Tags', [])}
# We may overwrite tags, but if the operation changes no tag,
# we will not proceed.
for k, v in request_tags.items():
if k not in instance_tags or instance_tags[k] != v:
update_instances.append(i['InstanceId'])
if len(set(instance_tags) | set(request_tags)) > self.MAX_TAG_COUNT:
self.log.warning(
"action:%s instance:%s too many tags to copy (> 50)" % (
self.__class__.__name__.lower(),
i['InstanceId']))
continue
for iset in utils.chunks(update_instances, 20):
client.create_tags(
DryRun=self.manager.config.dryrun,
Resources=iset,
Tags=[{'Key': k, 'Value': v} for k, v in request_tags.items()])
self.log.debug(
"action:%s tags updated on instances:%r" % (
self.__class__.__name__.lower(),
update_instances))
return update_instances
# Valid EC2 Query Filters
# http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeInstances.html
EC2_VALID_FILTERS = {
'architecture': ('i386', 'x86_64'),
'availability-zone': str,
'iam-instance-profile.arn': str,
'image-id': str,
'instance-id': str,
'instance-lifecycle': ('spot',),
'instance-state-name': (
'pending',
'terminated',
'running',
'shutting-down',
'stopping',
'stopped'),
'instance.group-id': str,
'instance.group-name': str,
'tag-key': str,
'tag-value': str,
'tag:': str,
'tenancy': ('dedicated', 'default', 'host'),
'vpc-id': str}
class QueryFilter(object):
@classmethod
def parse(cls, data):
results = []
for d in data:
if not isinstance(d, dict):
raise ValueError(
"EC2 Query Filter Invalid structure %s" % d)
results.append(cls(d).validate())
return results
def __init__(self, data):
self.data = data
self.key = None
self.value = None
def validate(self):
if not len(list(self.data.keys())) == 1:
raise PolicyValidationError(
"EC2 Query Filter Invalid %s" % self.data)
self.key = list(self.data.keys())[0]
self.value = list(self.data.values())[0]
if self.key not in EC2_VALID_FILTERS and not self.key.startswith(
'tag:'):
raise PolicyValidationError(
"EC2 Query Filter invalid filter name %s" % (self.data))
if self.value is None:
raise PolicyValidationError(
"EC2 Query Filters must have a value, use tag-key"
" w/ tag name as value for tag present checks"
" %s" % self.data)
return self
def query(self):
value = self.value
if isinstance(self.value, six.string_types):
value = [self.value]
return {'Name': self.key, 'Values': value}
@filters.register('instance-attribute')
class InstanceAttribute(ValueFilter):
"""EC2 Instance Value FIlter on a given instance attribute.
Filters EC2 Instances with the given instance attribute
:Example:
.. code-block:: yaml
policies:
- name: ec2-unoptimized-ebs
resource: ec2
filters:
- type: instance-attribute
attribute: ebsOptimized
key: "Value"
value: false
"""
valid_attrs = (
'instanceType',
'kernel',
'ramdisk',
'userData',
'disableApiTermination',
'instanceInitiatedShutdownBehavior',
'rootDeviceName',
'blockDeviceMapping',
'productCodes',
'sourceDestCheck',
'groupSet',
'ebsOptimized',
'sriovNetSupport',
'enaSupport')
schema = type_schema(
'instance-attribute',
rinherit=ValueFilter.schema,
attribute={'enum': valid_attrs},
required=('attribute',))
schema_alias = False
def get_permissions(self):
return ('ec2:DescribeInstanceAttribute',)
def process(self, resources, event=None):
attribute = self.data['attribute']
self.get_instance_attribute(resources, attribute)
return [resource for resource in resources
if self.match(resource['c7n:attribute-%s' % attribute])]
def get_instance_attribute(self, resources, attribute):
client = utils.local_session(
self.manager.session_factory).client('ec2')
for resource in resources:
instance_id = resource['InstanceId']
fetched_attribute = self.manager.retry(
client.describe_instance_attribute,
Attribute=attribute,
InstanceId=instance_id)
keys = list(fetched_attribute.keys())
keys.remove('ResponseMetadata')
keys.remove('InstanceId')
resource['c7n:attribute-%s' % attribute] = fetched_attribute[
keys[0]]
@resources.register('launch-template-version')
class LaunchTemplate(query.QueryResourceManager):
class resource_type(query.TypeInfo):
id = 'LaunchTemplateId'
name = 'LaunchTemplateName'
service = 'ec2'
date = 'CreateTime'
enum_spec = (
'describe_launch_templates', 'LaunchTemplates', None)
filter_name = 'LaunchTemplateIds'
filter_type = 'list'
arn_type = "launch-template"
def augment(self, resources):
client = utils.local_session(
self.session_factory).client('ec2')
template_versions = []
for r in resources:
template_versions.extend(
client.describe_launch_template_versions(
LaunchTemplateId=r['LaunchTemplateId']).get(
'LaunchTemplateVersions', ()))
return template_versions
def get_resources(self, rids, cache=True):
# Launch template versions have a compound primary key
#
# Support one of four forms of resource ids:
#
# - array of launch template ids
# - array of tuples (launch template id, version id)
# - array of dicts (with LaunchTemplateId and VersionNumber)
# - array of dicts (with LaunchTemplateId and LatestVersionNumber)
#
# If an alias version is given $Latest, $Default, the alias will be
# preserved as an annotation on the returned object 'c7n:VersionAlias'
if not rids:
return []
t_versions = {}
if isinstance(rids[0], tuple):
for tid, tversion in rids:
t_versions.setdefault(tid, []).append(tversion)
elif isinstance(rids[0], dict):
for tinfo in rids:
t_versions.setdefault(
tinfo['LaunchTemplateId'], []).append(
tinfo.get('VersionNumber', tinfo.get('LatestVersionNumber')))
elif isinstance(rids[0], six.string_types):
for tid in rids:
t_versions[tid] = []
client = utils.local_session(self.session_factory).client('ec2')
results = []
# We may end up fetching duplicates on $Latest and $Version
for tid, tversions in t_versions.items():
try:
ltv = client.describe_launch_template_versions(
LaunchTemplateId=tid, Versions=tversions).get(
'LaunchTemplateVersions')
except ClientError as e:
if e.response['Error']['Code'] == "InvalidLaunchTemplateId.NotFound":
continue
if e.response['Error']['Code'] == "InvalidLaunchTemplateId.VersionNotFound":
continue
raise
if not tversions:
tversions = [str(t['VersionNumber']) for t in ltv]
for tversion, t in zip(tversions, ltv):
if not tversion.isdigit():
t['c7n:VersionAlias'] = tversion
results.append(t)
return results
def get_asg_templates(self, asgs):
templates = {}
for a in asgs:
t = None
if 'LaunchTemplate' in a:
t = a['LaunchTemplate']
elif 'MixedInstancesPolicy' in a:
t = a['MixedInstancesPolicy'][
'LaunchTemplate']['LaunchTemplateSpecification']
if t is None:
continue
templates.setdefault(
(t['LaunchTemplateId'],
t['Version']), []).append(a['AutoScalingGroupName'])
return templates
@resources.register('ec2-reserved')
class ReservedInstance(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
name = id = 'ReservedInstancesId'
date = 'Start'
enum_spec = (
'describe_reserved_instances', 'ReservedInstances', None)
filter_name = 'ReservedInstancesIds'
filter_type = 'list'
arn_type = "reserved-instances"
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import math
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
# If it's available, load the specialized feature generator. If this doesn't
# work, try building with bazel instead of running the Python script directly.
try:
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top
except ImportError:
frontend_op = None
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
"""Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
"""
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_wav_file(filename):
"""Loads an audio file and returns a float PCM-encoded array of samples.
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
"""
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
return sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: filename}).audio.flatten()
def save_wav_file(filename, wav_data, sample_rate):
"""Saves audio sample data to a .wav audio file.
Args:
filename: Path to save the file to.
wav_data: 2D array of float PCM-encoded audio data.
sample_rate: Samples per second to encode in the file.
"""
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
sample_rate_placeholder = tf.placeholder(tf.int32, [])
wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])
wav_encoder = contrib_audio.encode_wav(wav_data_placeholder,
sample_rate_placeholder)
wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
sess.run(
wav_saver,
feed_dict={
wav_filename_placeholder: filename,
sample_rate_placeholder: sample_rate,
wav_data_placeholder: np.reshape(wav_data, (-1, 1))
})
def get_features_range(model_settings):
"""Returns the expected min/max for generated features.
Args:
model_settings: Information about the current model being trained.
Returns:
Min/max float pair holding the range of features.
Raises:
Exception: If preprocessing mode isn't recognized.
"""
# TODO(petewarden): These values have been derived from the observed ranges
# of spectrogram and MFCC inputs. If the preprocessing pipeline changes,
# they may need to be updated.
if model_settings['preprocess'] == 'average':
features_min = 0.0
features_max = 127.5
elif model_settings['preprocess'] == 'mfcc':
features_min = -247.0
features_max = 30.0
elif model_settings['preprocess'] == 'micro':
features_min = 0.0
features_max = 26.0
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (model_settings['preprocess']))
return features_min, features_max
class AudioProcessor(object):
"""Handles loading, partitioning, and preparing audio training data."""
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
model_settings, summaries_dir):
if data_dir:
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
self.prepare_processing_graph(model_settings, summaries_dir)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
If the data set we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a
directory.
If the data_url is none, don't download anything and expect the data
directory to contain the correct files already.
Args:
data_url: Web location of the tar file containing the data set.
dest_directory: File path to extract data to.
"""
if not data_url:
return
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
try:
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
except:
tf.logging.error('Failed to download URL: %s to folder: %s', data_url,
filepath)
tf.logging.error('Please make sure you have enough free space and'
' an internet connection')
raise
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded %s (%d bytes)', filename,
statinfo.st_size)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def prepare_data_index(self, silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage):
"""Prepares a list of the samples organized by set and label.
The training loop needs a list of all the available data, organized by
which partition it should belong to, and with ground truth labels attached.
This function analyzes the folders below the `data_dir`, figures out the
right
labels for each file based on the name of the subdirectory it belongs to,
and uses a stable hash to assign it to a data set partition.
Args:
silence_percentage: How much of the resulting data should be background.
unknown_percentage: How much should be audio outside the wanted classes.
wanted_words: Labels of the classes we want to be able to recognize.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
Dictionary containing a list of file information for each set partition,
and a lookup map for each class to determine its numeric index.
Raises:
Exception: If expected files are not found.
"""
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(wanted_words):
wanted_words_index[wanted_word] = index + 2
self.data_index = {'validation': [], 'testing': [], 'training': []}
unknown_index = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Look through all the subfolders to find audio samples
search_path = os.path.join(self.data_dir, '*', '*.wav')
for wav_path in gfile.Glob(search_path):
_, word = os.path.split(os.path.dirname(wav_path))
word = word.lower()
# Treat the '_background_noise_' folder as a special case, since we expect
# it to contain long audio samples we mix in to improve training.
if word == BACKGROUND_NOISE_DIR_NAME:
continue
all_words[word] = True
set_index = which_set(wav_path, validation_percentage, testing_percentage)
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
if word in wanted_words_index:
self.data_index[set_index].append({'label': word, 'file': wav_path})
else:
unknown_index[set_index].append({'label': word, 'file': wav_path})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(wanted_words):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_index['training'][0]['file']
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_index[set_index])
silence_size = int(math.ceil(set_size * silence_percentage / 100))
for _ in range(silence_size):
self.data_index[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_index[set_index])
unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_index[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(wanted_words)
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def prepare_background_data(self):
"""Searches a folder for background noise audio, and loads it into memory.
It's expected that the background audio samples will be in a subdirectory
named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
the sample rate of the training data, but can be much longer in duration.
If the '_background_noise_' folder doesn't exist at all, this isn't an
error, it's just taken to mean that no background noise augmentation should
be used. If the folder does exist, but it's empty, that's treated as an
error.
Returns:
List of raw PCM-encoded audio samples of background noise.
Raises:
Exception: If files aren't found in the folder.
"""
self.background_data = []
background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
if not os.path.exists(background_dir):
return self.background_data
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
'*.wav')
for wav_path in gfile.Glob(search_path):
wav_data = sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
self.background_data.append(wav_data)
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
def prepare_processing_graph(self, model_settings, summaries_dir):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
shifts it in time, adds in background noise, calculates a spectrogram, and
then builds an MFCC fingerprint from that.
This must be called with an active TensorFlow session running, and it
creates multiple placeholder inputs, and one output:
- wav_filename_placeholder_: Filename of the WAV to load.
- foreground_volume_placeholder_: How loud the main clip should be.
- time_shift_padding_placeholder_: Where to pad the clip.
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- output_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
summaries_dir: Path to save training summary information to.
Raises:
ValueError: If the preprocessing mode isn't recognized.
Exception: If the preprocessor wasn't compiled in.
"""
with tf.get_default_graph().name_scope('data'):
desired_samples = model_settings['desired_samples']
self.wav_filename_placeholder_ = tf.placeholder(
tf.string, [], name='wav_filename')
wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
wav_decoder = contrib_audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
# Allow the audio sample's volume to be adjusted.
self.foreground_volume_placeholder_ = tf.placeholder(
tf.float32, [], name='foreground_volume')
scaled_foreground = tf.multiply(wav_decoder.audio,
self.foreground_volume_placeholder_)
# Shift the sample's start position, and pad any gaps with zeros.
self.time_shift_padding_placeholder_ = tf.placeholder(
tf.int32, [2, 2], name='time_shift_padding')
self.time_shift_offset_placeholder_ = tf.placeholder(
tf.int32, [2], name='time_shift_offset')
padded_foreground = tf.pad(
scaled_foreground,
self.time_shift_padding_placeholder_,
mode='CONSTANT')
sliced_foreground = tf.slice(padded_foreground,
self.time_shift_offset_placeholder_,
[desired_samples, -1])
# Mix in background noise.
self.background_data_placeholder_ = tf.placeholder(
tf.float32, [desired_samples, 1], name='background_data')
self.background_volume_placeholder_ = tf.placeholder(
tf.float32, [], name='background_volume')
background_mul = tf.multiply(self.background_data_placeholder_,
self.background_volume_placeholder_)
background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
# Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
spectrogram = contrib_audio.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
tf.summary.image(
'spectrogram', tf.expand_dims(spectrogram, -1), max_outputs=1)
# The number of buckets in each FFT row in the spectrogram will depend on
# how many input samples there are in each window. This can be quite
# large, with a 160 sample window producing 127 buckets for example. We
# don't need this level of detail for classification, so we often want to
# shrink them down to produce a smaller result. That's what this section
# implements. One method is to use average pooling to merge adjacent
# buckets, but a more sophisticated approach is to apply the MFCC
# algorithm to shrink the representation.
if model_settings['preprocess'] == 'average':
self.output_ = tf.nn.pool(
tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
tf.summary.image('shrunk_spectrogram', self.output_, max_outputs=1)
elif model_settings['preprocess'] == 'mfcc':
self.output_ = contrib_audio.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
tf.summary.image(
'mfcc', tf.expand_dims(self.output_, -1), max_outputs=1)
elif model_settings['preprocess'] == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running'
' TensorFlow directly from Python, you need to build and run'
' through Bazel'
)
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(tf.multiply(background_clamp, 32768), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
self.output_ = tf.multiply(micro_frontend, (10.0 / 256.0))
tf.summary.image(
'micro',
tf.expand_dims(tf.expand_dims(self.output_, -1), 0),
max_outputs=1)
else:
raise ValueError(
'Unknown preprocess mode "%s" (should be "mfcc", '
' "average", or "micro")' % (model_settings['preprocess']))
# Merge all the summaries and write them out to /tmp/retrain_logs (by
# default)
self.merged_summaries_ = tf.summary.merge_all(scope='data')
if summaries_dir:
self.summary_writer_ = tf.summary.FileWriter(summaries_dir + '/data',
tf.get_default_graph())
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
Args:
mode: Which partition, must be 'training', 'validation', or 'testing'.
Returns:
Number of samples in the partition.
"""
return len(self.data_index[mode])
def get_data(self, how_many, offset, model_settings, background_frequency,
background_volume_range, time_shift, mode, sess):
"""Gather samples from the data set, applying transformations as needed.
When the mode is 'training', a random selection of samples will be returned,
otherwise the first N clips in the partition will be used. This ensures that
validation always uses the same samples, reducing noise in the metrics.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
offset: Where to start when fetching deterministically.
model_settings: Information about the current model being trained.
background_frequency: How many clips will have background noise, 0.0 to
1.0.
background_volume_range: How loud the background noise will be.
time_shift: How much to randomly shift the clips by in time.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
sess: TensorFlow session that was active when processor was created.
Returns:
List of sample data for the transformed samples, and list of label indexes
Raises:
ValueError: If background samples are too short.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = max(0, min(how_many, len(candidates) - offset))
# Data and labels will be populated and returned.
data = np.zeros((sample_count, model_settings['fingerprint_size']))
labels = np.zeros(sample_count)
desired_samples = model_settings['desired_samples']
use_background = self.background_data and (mode == 'training')
pick_deterministically = (mode != 'training')
# Use the processing graph we created earlier to repeatedly to generate the
# final output sample data we'll use in training.
for i in xrange(offset, offset + sample_count):
# Pick which audio sample to use.
if how_many == -1 or pick_deterministically:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# If we're time shifting, set up the offset for this sample.
if time_shift > 0:
time_shift_amount = np.random.randint(-time_shift, time_shift)
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
input_dict = {
self.wav_filename_placeholder_: sample['file'],
self.time_shift_padding_placeholder_: time_shift_padding,
self.time_shift_offset_placeholder_: time_shift_offset,
}
# Choose a section of background noise to mix in.
if use_background or sample['label'] == SILENCE_LABEL:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
if len(background_samples) <= model_settings['desired_samples']:
raise ValueError(
'Background sample is too short! Need more than %d'
' samples but only %d were found' %
(model_settings['desired_samples'], len(background_samples)))
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
background_offset + desired_samples)]
background_reshaped = background_clipped.reshape([desired_samples, 1])
if sample['label'] == SILENCE_LABEL:
background_volume = np.random.uniform(0, 1)
elif np.random.uniform(0, 1) < background_frequency:
background_volume = np.random.uniform(0, background_volume_range)
else:
background_volume = 0
else:
background_reshaped = np.zeros([desired_samples, 1])
background_volume = 0
input_dict[self.background_data_placeholder_] = background_reshaped
input_dict[self.background_volume_placeholder_] = background_volume
# If we want silence, mute out the main sample but leave the background.
if sample['label'] == SILENCE_LABEL:
input_dict[self.foreground_volume_placeholder_] = 0
else:
input_dict[self.foreground_volume_placeholder_] = 1
# Run the graph to produce the output audio.
summary, data_tensor = sess.run(
[self.merged_summaries_, self.output_], feed_dict=input_dict)
self.summary_writer_.add_summary(summary)
data[i - offset, :] = data_tensor.flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset] = label_index
return data, labels
def get_features_for_wav(self, wav_filename, model_settings, sess):
"""Applies the feature transformation process to the input_wav.
Runs the feature generation process (generally producing a spectrogram from
the input samples) on the WAV file. This can be useful for testing and
verifying implementations being run on other platforms.
Args:
wav_filename: The path to the input audio file.
model_settings: Information about the current model being trained.
sess: TensorFlow session that was active when processor was created.
Returns:
Numpy data array containing the generated features.
"""
desired_samples = model_settings['desired_samples']
input_dict = {
self.wav_filename_placeholder_: wav_filename,
self.time_shift_padding_placeholder_: [[0, 0], [0, 0]],
self.time_shift_offset_placeholder_: [0, 0],
self.background_data_placeholder_: np.zeros([desired_samples, 1]),
self.background_volume_placeholder_: 0,
self.foreground_volume_placeholder_: 1,
}
# Run the graph to produce the output audio.
data_tensor = sess.run([self.output_], feed_dict=input_dict)
return data_tensor
def get_unprocessed_data(self, how_many, model_settings, mode):
"""Retrieve sample data for the given partition, with no transformations.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
model_settings: Information about the current model being trained.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
Returns:
List of sample data for the samples, and list of labels in one-hot form.
"""
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = how_many
desired_samples = model_settings['desired_samples']
words_list = self.words_list
data = np.zeros((sample_count, desired_samples))
labels = []
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
foreground_volume_placeholder = tf.placeholder(tf.float32, [])
scaled_foreground = tf.multiply(wav_decoder.audio,
foreground_volume_placeholder)
for i in range(sample_count):
if how_many == -1:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
input_dict = {wav_filename_placeholder: sample['file']}
if sample['label'] == SILENCE_LABEL:
input_dict[foreground_volume_placeholder] = 0
else:
input_dict[foreground_volume_placeholder] = 1
data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()
label_index = self.word_to_index[sample['label']]
labels.append(words_list[label_index])
return data, labels
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
import webob.exc
from oslo.config import cfg
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.notifiers import nova
from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api
from neutron import policy
from neutron import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._publisher_id = notifier_api.publisher_id('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in data.iteritems()
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except exceptions.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context, name, resource)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context, action, obj)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception as ex:
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id} if parent_id
else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the exception
LOG.exception(_("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
raise ex
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.create.start',
notifier_api.CONF.default_notification_level,
body)
body = Controller.prepare_request_body(request.context, body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
# Ensure policy engine is initialized
policy.init()
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource])
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
self._nova_notifier.send_network_change(
action, {}, {self._resource: obj})
return notify({self._resource: self._view(
request.context, obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.delete.start',
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._nova_notifier.send_network_change(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.update.start',
notifier_api.CONF.default_notification_level,
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.iteritems()
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._nova_notifier.send_network_change(
action, orig_object_copy, result)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
else:
msg = _("Running without keystone AuthN requires "
" that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug(_("Request body: %(body)s"), {'body': body})
prep_req_body = lambda x: Controller.prepare_request_body(
context,
x if resource in x else {resource: x},
is_create,
resource,
attr_info,
allow_bulk)
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
bulk_body = [prep_req_body(item) for item in body[collection]]
if not bulk_body:
raise webob.exc.HTTPBadRequest(_("Resources required"))
return {collection: bulk_body}
res_dict = body.get(resource)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in attr_info.iteritems():
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in attr_info.iteritems():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in attr_info.iteritems():
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
|
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import proc
from collections import OrderedDict
import json
import logging
import os
import subprocess
from . import sst_bf
from . import sst_cp
ENV_LSCPU_SYSFS = "CMK_DEV_LSCPU_SYSFS"
# Returns a dictionary of socket_id (int) to intel.topology.Socket.
def discover(sst_bf_discovery):
isol = isolcpus()
if isol:
logging.info("Isolated logical cores: {}".format(
",".join([str(c) for c in isol])))
sst_bf_cpus = []
if sst_bf_discovery:
sst_bf_cpus = sst_bf.cpus()
if sst_bf_cpus:
logging.info("High priority SST-BF cores: {}".format(
",".join([str(c) for c in sst_bf_cpus])))
return parse(lscpu(), isol, sst_bf_cpus)
class Platform:
def __init__(self, sockets):
self.sockets = sockets
def has_isolated_cores(self):
for socket in self.sockets.values():
if socket.has_isolated_cores():
return True
return False
def has_sst_bf_cores(self):
for socket in self.sockets.values():
if socket.has_sst_bf_cores():
return True
return False
def has_isolated_sst_bf_cores(self):
for socket in self.sockets.values():
if socket.has_isolated_sst_bf_cores():
return True
return False
def get_socket(self, id):
if id not in self.sockets:
return None
return self.sockets[id]
def get_cores(self, mode="packed"):
return self.get_cores_general(mode, False)
def get_isolated_cores(self, mode="packed"):
return self.get_cores_general(mode, True)
def get_isolated_sst_bf_cores(self, mode="packed"):
return self.get_cores_general(mode, True, True)
def get_cores_general(self, mode, isolated=False, sst_bf=False):
if mode not in ["spread", "packed"]:
logging.warning("Wrong mode has been selected."
"Fallback to vertical")
mode = "packed"
if mode == "packed":
return self.allocate_packed(isolated, sst_bf)
if mode == "spread":
return self.allocate_spread(isolated, sst_bf)
def allocate_packed(self, isolated_cores=False, sst_bf_cores=False):
cores = []
for socket in self.sockets.values():
if isolated_cores and sst_bf_cores:
cores += socket.get_isolated_sst_bf_cores()
elif isolated_cores and not sst_bf_cores:
cores += socket.get_isolated_cores()
elif not isolated_cores and sst_bf_cores:
cores += socket.get_sst_bf_cores()
else:
cores += socket.get_cores()
return cores
def allocate_spread(self, isolated_cores=False, sst_bf_cores=False):
output_cores = []
socket_cores = {}
for socket in self.sockets:
if isolated_cores and sst_bf_cores:
socket_cores[socket] = \
self.sockets[socket].get_isolated_sst_bf_cores()
elif isolated_cores and not sst_bf_cores:
socket_cores[socket] = \
self.sockets[socket].get_isolated_cores()
elif not isolated_cores and sst_bf_cores:
socket_cores[socket] = self.sockets[socket].get_sst_bf_cores()
else:
socket_cores[socket] = self.sockets[socket].get_cores()
while len(socket_cores) > 0:
sockets = [socket for socket in socket_cores]
for socket in sockets:
if len(socket_cores[socket]) == 0:
del(socket_cores[socket])
continue
output_cores.append(socket_cores[socket][0])
del(socket_cores[socket][0])
return output_cores
def get_shared_cores(self):
cores = []
for socket in self.sockets.values():
cores += socket.get_shared_cores()
return cores
def get_cores_from_pool(self, pool):
cores = []
for socket in self.sockets.values():
cores += socket.get_cores_from_pool(pool)
return cores
def get_epp_cores(self, epp_value, num_required,
unavailable_cores=[]):
return sst_cp.get_epp_cores(self, epp_value, num_required,
unavailable_cores)
def get_epp_cores_no_limit(self, epp_value):
return sst_cp.get_epp_cores_no_limit(self, epp_value)
class Socket:
def __init__(self, socket_id, cores=None):
if not cores:
cores = {}
self.socket_id = socket_id
self.cores = OrderedDict(
sorted(cores.items(), key=lambda pair: pair[1].core_id))
def has_isolated_cores(self):
for core in self.cores.values():
if core.is_isolated():
return True
return False
def has_sst_bf_cores(self):
for core in self.cores.values():
if core.is_sst_bf():
return True
return False
def has_isolated_sst_bf_cores(self):
for core in self.cores.values():
if core.is_sst_bf() and core.is_isolated():
return True
return False
def get_cores(self):
return [core for core in self.cores.values()]
def get_isolated_cores(self):
return [core for core in self.cores.values() if core.is_isolated()]
def get_sst_bf_cores(self):
return [core for core in self.cores.values() if core.is_sst_bf()]
def get_isolated_sst_bf_cores(self):
return [core for core in self.cores.values()
if core.is_sst_bf() and core.is_isolated()]
def get_shared_cores(self):
return [core for core in self.cores.values() if not core.is_isolated()]
def get_cores_from_pool(self, pool):
return [core for core in self.cores.values() if core.pool == pool]
def as_dict(self, include_pool=True):
return {
"id": self.socket_id,
"cores": [c.as_dict(include_pool) for c in self.cores.values()]
}
def json(self):
return json.dumps(self.as_dict(), indent=2, sort_keys=True)
class Core:
def __init__(self, core_id, cpus=None):
if not cpus:
cpus = {}
self.core_id = core_id
self.pool = None
self.cpus = OrderedDict(
sorted(cpus.items(), key=lambda pair: pair[1].cpu_id))
def cpu_ids(self):
return list(self.cpus.keys())
def is_isolated(self):
if len(self.cpus) == 0:
return False
for cpu_id in self.cpus:
if not self.cpus[cpu_id].isolated:
return False
return True
def is_sst_bf(self):
if len(self.cpus) == 0:
return False
for cpu_id in self.cpus:
if not self.cpus[cpu_id].sst_bf:
return False
return True
def as_dict(self, include_pool=True):
result = {
"id": self.core_id,
"cpus": [c.as_dict() for c in self.cpus.values()]
}
if include_pool:
result["pool"] = self.pool
return result
class CPU:
def __init__(self, cpu_id):
self.cpu_id = cpu_id
self.isolated = False
self.sst_bf = False
def as_dict(self):
if self.sst_bf:
return {
"id": self.cpu_id,
"isolated": self.isolated,
"sst_bf": self.sst_bf,
}
else:
return {
"id": self.cpu_id,
"isolated": self.isolated,
}
# Returns of map of socket id (integer) to sockets (Socket type).
# lscpu has the following format:
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # CPU,Core,Socket,Node,,L1d,L1i,L2,L3
# 0,0,0,0,,0,0,0,0
# 1,1,0,0,,1,1,1,0
def parse(lscpu_output, isolated_cpus=None, sst_bf_cpus=None):
if not isolated_cpus:
isolated_cpus = []
if not sst_bf_cpus:
sst_bf_cpus = []
sockets = {}
for line in lscpu_output.split("\n"):
if line and not line.startswith("#"):
cpuinfo = line.split(",")
socket_id = int(cpuinfo[2])
core_id = int(cpuinfo[1])
cpu_id = int(cpuinfo[0])
if socket_id not in sockets:
sockets[socket_id] = Socket(socket_id)
socket = sockets[socket_id]
if core_id not in socket.cores:
socket.cores[core_id] = Core(core_id)
core = socket.cores[core_id]
cpu = CPU(cpu_id)
if cpu.cpu_id in isolated_cpus:
cpu.isolated = True
if cpu.cpu_id in sst_bf_cpus:
cpu.sst_bf = True
core.cpus[cpu_id] = cpu
return Platform(sockets)
def lscpu():
sys_fs_path = os.getenv(ENV_LSCPU_SYSFS)
if sys_fs_path is None:
cmd_out = subprocess.check_output("lscpu -p", shell=True)
else:
cmd_out = subprocess.check_output(
"lscpu -p -s %s" % sys_fs_path, shell=False)
return cmd_out.decode("UTF-8")
def isolcpus():
with open(os.path.join(proc.procfs(), "cmdline")) as f:
return parse_isolcpus(f.read())
# Returns list of isolated cpu ids from /proc/cmdline content.
def parse_isolcpus(cmdline):
cpus = []
# Ensure that newlines are removed.
cmdline_stripped = cmdline.rstrip()
cmdline_fields = cmdline_stripped.split()
for cmdline_field in cmdline_fields:
pair = cmdline_field.split("=")
if len(pair) != 2:
continue
key = pair[0]
value = pair[1]
if key == "isolcpus":
cpus_str = value.split(",")
cpus += parse_cpus_str(cpus_str)
# Get unique cpu_ids from list
cpus = list(set(cpus))
return cpus
def parse_cpus_str(cpus_str):
cpus = []
for cpu_id in cpus_str:
if "-" not in cpu_id:
cpus.append(int(cpu_id))
continue
cpu_range = cpu_id.split("-")
cpu_range = ' '.join(cpu_range).split()
if len(cpu_range) != 2:
continue
cpus += range(int(cpu_range[0]), int(cpu_range[1])+1)
return cpus
|
|
# -*- coding: utf-8 -*-
"""
Vertical electrical sounding (VES) manager class.
"""
import numpy as np
import pygimli as pg
# from pygimli.frameworks import Modelling, Block1DModelling
from pygimli.frameworks import Block1DModelling, MethodManager1d
from pygimli.frameworks.modelling import DEFAULT_STYLES
class VESModelling(Block1DModelling):
"""Vertical Electrical Sounding (VES) forward operator.
Attributes
----------
am :
Part of data basis. Distances between A and M electrodes.
A is first power, M is first potential electrode.
bm :
Part of data basis. Distances between B and M electrodes.
B is second power, M is first potential electrode.
an :
Part of data basis. Distances between A and N electrodes.
A is first power, N is second potential electrode.
bn :
Part of data basis. Distances between B and N electrodes.
B is second power, N is second potential electrode.
ab2 :
Half distance between A and B.
mn2 :
Half distance between A and B.
Only used for input (feeding am etc.).
"""
def __init__(self, ab2=None, mn2=None, **kwargs):
r"""Constructor
"""
self.am = None
self.bm = None
self.an = None
self.bn = None
self.ab2 = None
self.mn2 = None
super(VESModelling, self).__init__(**kwargs)
if 'dataContainerERT' in kwargs:
data = kwargs['dataContainerERT']
if isinstance(data, pg.DataContainerERT):
kwargs['am'] = [data.sensorPosition(data('a')[i]).distance(
data('m')[i]) for i in range(data.size())]
kwargs['an'] = [data.sensorPosition(data('a')[i]).distance(
data('n')[i]) for i in range(data.size())]
kwargs['bm'] = [data.sensorPosition(data('b')[i]).distance(
data('m')[i]) for i in range(data.size())]
kwargs['bn'] = [data.sensorPosition(data('b')[i]).distance(
data('n')[i]) for i in range(data.size())]
self.setDataSpace(ab2=ab2, mn2=mn2, **kwargs)
def createStartModel(self, rhoa):
r"""
"""
if self.nLayers == 0:
pg.critical("Model space is not been initialized.")
startThicks = np.logspace(np.log10(min(self.mn2)/2),
np.log10(max(self.ab2)/5),
self.nLayers - 1)
startThicks = pg.utils.diff(pg.cat([0.0], startThicks))
# layer thickness properties
self.setRegionProperties(0, startModel=startThicks, trans='log')
# resistivity properties
self.setRegionProperties(1, startModel=np.median(rhoa), trans='log')
return super(VESModelling, self).createStartModel()
def setDataSpace(self, ab2=None, mn2=None,
am=None, bm=None, an=None, bn=None,
**kwargs):
"""Set data basis, i.e., arrays for all am, an, bm, bn distances.
Parameters
----------
"""
# Sometimes you don't have AB2/MN2 but provide am etc.
self.am = am
self.an = an
self.bm = bm
self.bn = bn
if ab2 is not None and mn2 is not None: # overrides am etc.
if isinstance(mn2, float):
mn2 = np.ones(len(ab2))*mn2
if len(ab2) != len(mn2):
print("ab2", ab2)
print("mn2", mn2)
raise Exception("length of ab2 is unequal length of mn2")
self.am = ab2 - mn2
self.an = ab2 + mn2
self.bm = ab2 + mn2
self.bn = ab2 - mn2
elif (am is not None and bm is not None and an is not None and
bn is not None):
self.am = am
self.bm = bm
self.an = an
self.bn = bn
if self.am is not None and self.bm is not None:
self.ab2 = (self.am + self.bm) / 2
self.mn2 = abs(self.am - self.an) / 2
self.k = (2.0 * np.pi) / (1.0 / self.am - 1.0 / self.an -
1.0 / self.bm + 1.0 / self.bn)
def response(self, par):
return self.response_mt(par, 0)
def response_mt(self, par, i=0):
if self.am is not None and self.bm is not None:
nLayers = (len(par)+1) // 2
fop = pg.core.DC1dModelling(nLayers,
self.am, self.bm, self.an, self.bn)
else:
pg.critical("No data space defined don't know what to calculate.")
return fop.response(par)
def drawModel(self, ax, model, **kwargs):
pg.viewer.mpl.drawModel1D(ax=ax,
model=model,
plot=kwargs.pop('plot', 'loglog'),
xlabel=r'Resistivity ($\Omega$m)', **kwargs)
ax.set_ylabel('Depth in (m)')
def drawData(self, ax, data, error=None, label=None, **kwargs):
r"""Draw modeled apparent resistivity data.
Parameters
----------
ax: axes
Matplotlib axes object to draw into.
data: iterable
Apparent resistivity values to draw.
error: iterable [None]
Adds an error bar if you have error values.
label: str ['$\varrho_a$']
Set legend label for the amplitude.
Other parameters
----------------
ab2: iterable
Override ab2 that fits data size.
mn2: iterable
Override mn2 that fits data size.
plot: function name
Matplotlib plot function, e.g., plot, loglog, semilogx or semilogy
"""
ab2 = kwargs.pop('ab2', self.ab2)
# mn2 = kwargs.pop('mn2', self.mn2)
plot = kwargs.pop('plot', 'loglog')
ra = data
raE = error
style = dict(pg.frameworks.modelling.DEFAULT_STYLES.get(label,
pg.frameworks.modelling.DEFAULT_STYLES['Default']))
style.update(kwargs)
a1 = ax
plot = getattr(a1, plot)
if label is None:
label = r'$\varrho_a$'
plot(ra, ab2, label=label, **style)
if raE is not None:
raErr = np.array(ra * raE)
if pg.isArray(raErr, len(ra)):
a1.errorbar(ra, ab2,
xerr=raErr, barsabove=True,
**DEFAULT_STYLES.get('Error',
DEFAULT_STYLES['Default']),
label='_nolegend_')
a1.set_ylim(max(ab2), min(ab2))
a1.set_xlabel(r'Apparent resistivity ($\Omega$m)')
a1.set_ylabel(r'AB/2 (m)')
a1.grid(True)
a1.legend()
class VESCModelling(VESModelling):
"""Vertical Electrical Sounding (VES) forward operator. (complex)
Vertical Electrical Sounding (VES) forward operator for complex
resistivity values. see: :py:mod:`pygimli.physics.ert.VESModelling`
"""
def __init__(self, **kwargs):
super(VESCModelling, self).__init__(nPara=2, **kwargs)
self.phiAxe = None
def phaseModel(self, model):
"""Return the current phase model values."""
nLay = (len(model) + 1) // 3
return pg.cat(model[0:nLay-1], 1000. * model[nLay*2-1::])
def resModel(self, model):
"""Return the resistivity model values."""
nLay = (len(model) + 1) // 3
return model[0:nLay*2-1]
def createStartModel(self, rhoa):
startThicks = np.logspace(np.log10(min(self.mn2)/2),
np.log10(max(self.ab2)/5),
self._nLayers-1)
startThicks = pg.utils.diff(pg.cat([0.0], startThicks))
# layer thickness properties
self.setRegionProperties(0, startModel=startThicks,
trans='log')
# resistivity properties
self.setRegionProperties(1, startModel=np.median(rhoa),
trans='log')
self.setRegionProperties(2, startModel=np.median(rhoa[len(rhoa)//2::]),
trans='log')
sm = self.regionManager().createStartModel()
return sm
def response_mt(self, par, i=0):
""" Multithread response for parametrization.
Returns [|rhoa|, +phi(rad)] for [thicks, res, phi(rad)]
"""
if self.am is not None and self.bm is not None:
nLayers = (len(par) + 1) // 3
fop = pg.core.DC1dModellingC(nLayers,
self.am, self.bm, self.an, self.bn)
else:
pg.critical("No data basis known.")
return fop.response(par)
def drawModel(self, ax, model, **kwargs):
"""Draw 1D VESC Modell."""
a1 = ax
a2 = pg.viewer.mpl.createTwinY(ax)
super(VESCModelling, self).drawModel(a1,
model=self.resModel(model),
**kwargs)
plot = kwargs.pop('plot', 'semilogy')
if plot == 'loglog':
plot = 'semilogy'
elif plot == 'semilogx':
plot = 'plot'
pg.viewer.mpl.drawModel1D(ax=a2,
model=self.phaseModel(model),
plot=plot,
color='C2',
xlabel='Phase (mrad)',
**kwargs)
a2.set_xlabel('neg. phase (mRad)', color='C2')
def drawData(self, ax, data, error=None, labels=None, ab2=None, mn2=None,
**kwargs):
r"""Draw modeled apparent resistivity and apparent phase data.
Parameters
----------
ax: axes
Matplotlib axes object to draw into.
data: iterable
Apparent resistivity values to draw. [rhoa phia].
error: iterable [None]
Rhoa in Ohm m and phia in radiand.
Adds an error bar if you have error values. [err_rhoas err_phia]
The error of amplitudes are assumed to be relative and the error
of the phases is assumed to be absolute in mrad.
labels: str [r'$\varrho_a$', r'$\varphi_a$']
Set legend labels for amplitude and phase.
Other parameters:
-----------------
ab2: iterable
Override ab2 that fits data size.
mn2: iterable
Override mn2 that fits data size.
plot: function name
Matplotlib plot function, e.g., plot, loglog, semilogx or semilogy
"""
a1 = None
a2 = None
if hasattr(ax, '__iter__'):
if len(ax) == 2:
a1 = ax[0]
a2 = ax[1]
else:
a1 = ax
a2 = pg.viewer.mpl.createTwinY(ax)
if ab2 is not None and mn2 is not None:
self.setDataSpace(ab2=ab2, mn2=mn2)
ra = data[0:len(data)//2]
phi = data[len(data)//2::] * 1000. # mRad
phiE = None # abs err
raE = None # rel err
if error is not None:
if type(error) is float:
raE = np.ones(len(data)//2) * error
phiE = np.ones(len(data)//2) * error
else:
raE = error[0:len(data)//2]
phiE = error[len(data)//2::]
if labels is None:
labels = [r'$\varrho_a$', r'$\varphi_a$']
label = kwargs.pop('label', 'Data')
style = dict(pg.frameworks.modelling.DEFAULT_STYLES.get(
label, pg.frameworks.modelling.DEFAULT_STYLES['Default']))
style.update(kwargs)
super(VESCModelling, self).drawData(a1, ra, error=raE,
label=labels[0], **style)
style['color'] = 'C2'
a2.semilogy(phi, self.ab2, label=labels[1], **style)
if phiE is not None:
a2.errorbar(phi, self.ab2,
xerr=phiE,
**DEFAULT_STYLES.get('Error',
DEFAULT_STYLES['Default']),
barsabove=True,
label='_nolegend_'
)
a2.set_ylim(max(self.ab2), min(self.ab2))
a2.set_xlabel('Apparent neg. phase (mRad)', color='C2')
a2.set_ylabel('AB/2 in (m)')
a2.legend()
a2.grid(True)
class VESManager(MethodManager1d):
r"""Vertical electrical sounding (VES) manager class.
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.physics import VESManager
>>> ab2 = np.logspace(np.log10(1.5), np.log10(100), 32)
>>> mn2 = 1.0
>>> # 3 layer with 100, 500 and 20 Ohmm
>>> # and layer thickness of 4, 6, 10 m
>>> # over a Halfspace of 800 Ohmm
>>> synthModel = pg.cat([4., 6., 10.], [100., 5., 20., 800.])
>>> ves = VESManager()
>>> ra, err = ves.simulate(synthModel, ab2=ab2, mn2=mn2, noiseLevel=0.01)
>>> ax = ves.showData(ra, error=err)
>>> # _= ves.invert(ra, err, nLayer=4, showProgress=0, verbose=0)
>>> # ax = ves.showModel(synthModel)
>>> # ax = ves.showResult(ax=ax)
>>> pg.wait()
"""
def __init__(self, **kwargs):
"""Constructor
Parameters
----------
complex : bool
Accept complex resistivities.
Attributes
----------
complex : bool
Accept complex resistivities.
"""
self._complex = kwargs.pop('complex', False)
super(VESManager, self).__init__(**kwargs)
self.inv.setDeltaChiStop(1)
self.dataTrans = None
self.rhoaTrans = pg.trans.TransLog()
self.phiaTrans = pg.trans.TransLin()
@property
def complex(self):
return self._complex
@complex.setter
def complex(self, c):
self._complex = c
self.reinitForwardOperator()
def createForwardOperator(self, **kwargs):
"""Create Forward Operator.
Create Forward Operator based on complex attribute.
"""
if self.complex:
return VESCModelling(**kwargs)
else:
return VESModelling(**kwargs)
def simulate(self, model, ab2=None, mn2=None, **kwargs):
"""Simulate measurement data."""
if ab2 is not None and mn2 is not None:
self._fw.fop.setDataSpace(ab2=ab2, mn2=mn2)
return super(VESManager, self).simulate(model, **kwargs)
def preErrorCheck(self, err, dataVals=None):
"""Called before the validity check of the error values."""
err = np.atleast_1d(err)
if self.complex:
if len(err) == 2:
nData = len(dataVals) // 2
err = pg.cat(np.ones(nData)*err[0],
np.abs(err[1] / dataVals[nData:]))
else:
if len(err) == 1:
err = np.ones(nData)*err[0]
return err
def invert(self, data=None, err=None, ab2=None, mn2=None, **kwargs):
"""Invert measured data.
Parameters
----------
Keyword Arguments
----------------
**kwargs
Additional kwargs inherited from %(MethodManager1d.invert) and
%(Inversion.run)
Returns
-------
model : pg.Vector
inversion result
"""
if ab2 is not None and mn2 is not None:
self.fop.setDataSpace(ab2=ab2, mn2=mn2)
if data is not None:
if self.complex:
nData = len(data)//2
self.dataTrans = pg.trans.TransCumulative()
self.dataTrans.add(self.rhoaTrans, nData)
self.dataTrans.add(self.phiaTrans, nData)
else:
self.dataTrans = pg.trans.TransLog()
self.inv.dataTrans = self.dataTrans
if 'layerLimits' not in kwargs:
kwargs['layerLimits'] = [min(self.fop.mn2)/5,
max(self.fop.ab2)/2]
if 'paraLimits' in kwargs and self.complex:
pL = kwargs['paraLimits'][1]
kwargs['paraLimits'][1] = [pL[0]/1000, pL[1]/1000]
return super(VESManager, self).invert(data=data, err=err, **kwargs)
def loadData(self, fileName, **kwargs):
""" Load simple data matrix
"""
mat = np.loadtxt(fileName)
if len(mat[0]) == 4:
self.fop.setDataSpace(ab2=mat[:, 0], mn2=mat[:, 1])
return mat.T
if len(mat[0]) == 6:
self.complex = True
self.fop.setDataSpace(ab2=mat[:, 0], mn2=mat[:, 1])
return (mat[:, 0], mat[:, 1],
np.array(pg.cat(mat[:, 2], mat[:, 4])),
np.array(pg.cat(mat[:, 3], mat[:, 5])))
def exportData(self, fileName, data=None, error=None):
"""Export data into simple ascii matrix.
Usefull?
"""
mn2 = np.abs((self.fop.am - self.fop.an) / 2.)
ab2 = (self.fop.am + self.fop.bm) / 2.
mat = None
if data is None:
data = self.inv.dataVals
if error is None:
error = self.inv.errorVals
if self.complex:
nData = len(data)//2
mat = np.array([ab2, mn2,
data[:nData], error[:nData],
data[nData:], error[nData:]
]).T
np.savetxt(fileName, mat,
header=r'ab/2\tmn/2\trhoa\terr\tphia\terrphi')
else:
mat = np.array([ab2, mn2, data, error]).T
np.savetxt(fileName, mat, header=r'ab/2\tmn/2\trhoa\terr')
def VESManagerApp():
"""Call VESManager as console app"""
parser = VESManager.createArgParser(dataSuffix='ves')
options = parser.parse_args()
verbose = not options.quiet
if verbose:
print("VES Manager console application.")
print(options._get_kwargs())
mgr = VESManager(verbose=verbose, debug=pg.debug())
ab2, mn2, ra, err = mgr.loadData(options.dataFileName)
mgr.showData(ra, err)
mgr.invert(ra, err, ab2, mn2,
maxIter=options.maxIter,
lam=options.lam,
)
mgr.showResultAndFit()
pg.wait()
if __name__ == '__main__':
VESManagerApp()
|
|
#
# @file TestReaction_newSetters.py
# @brief Reaction unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestReaction_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestReaction_newSetters(unittest.TestCase):
global R
R = None
def setUp(self):
self.R = libsbml.Reaction(1,2)
if (self.R == None):
pass
pass
def tearDown(self):
_dummyList = [ self.R ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addProduct1(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(2,2)
p1 = libsbml.SpeciesReference(2,2)
p1.setSpecies( "k")
p1.setId( "k1")
i = m.addProduct(p)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
p.setSpecies( "k")
p.setId( "k1")
i = m.addProduct(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumProducts() == 1 )
i = m.addProduct(p1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumProducts() == 1 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addProduct2(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(2,1)
p.setSpecies( "k")
i = m.addProduct(p)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumProducts() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addProduct3(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(1,2)
p.setSpecies( "k")
i = m.addProduct(p)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumProducts() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addProduct4(self):
m = libsbml.Reaction(2,2)
p = None
i = m.addProduct(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumProducts() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addReactant1(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(2,2)
p1 = libsbml.SpeciesReference(2,2)
p1.setSpecies( "k")
p1.setId( "k1")
i = m.addReactant(p)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
p.setSpecies( "k")
p.setId( "k1")
i = m.addReactant(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumReactants() == 1 )
i = m.addReactant(p1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumReactants() == 1 )
_dummyList = [ p1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addReactant2(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(2,1)
p.setSpecies( "k")
i = m.addReactant(p)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumReactants() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addReactant3(self):
m = libsbml.Reaction(2,2)
p = libsbml.SpeciesReference(1,2)
p.setSpecies( "k")
i = m.addReactant(p)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumReactants() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_addReactant4(self):
m = libsbml.Reaction(2,2)
p = None
i = m.addReactant(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumReactants() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_createKineticLaw(self):
r = libsbml.Reaction(2,2)
kl = r.createKineticLaw()
self.assert_( r.isSetKineticLaw() == True )
self.assert_( (kl).getLevel() == 2 )
self.assert_( (kl).getVersion() == 2 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_createProduct(self):
m = libsbml.Reaction(2,2)
p = m.createProduct()
self.assert_( m.getNumProducts() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_createReactant(self):
m = libsbml.Reaction(2,2)
p = m.createReactant()
self.assert_( m.getNumReactants() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setFast1(self):
i = self.R.setFast(True)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.R.getFast() == True )
self.assertEqual( True, self.R.isSetFast() )
i = self.R.setFast(False)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.R.getFast() == False )
self.assertEqual( True, self.R.isSetFast() )
i = self.R.unsetFast()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.R.getFast() == False )
self.assertEqual( False, self.R.isSetFast() )
pass
def test_Reaction_setFast2(self):
R1 = libsbml.Reaction(2,4)
i = R1.unsetFast()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( R1.getFast() == False )
self.assertEqual( False, R1.isSetFast() )
pass
def test_Reaction_setId1(self):
i = self.R.setId( "1cell")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.R.isSetId() )
pass
def test_Reaction_setId2(self):
i = self.R.setId( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.R.isSetId() )
self.assert_(( "cell" == self.R.getId() ))
i = self.R.setId("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.R.isSetId() )
pass
def test_Reaction_setKineticLaw1(self):
kl = libsbml.KineticLaw(2,1)
kl.setMath(libsbml.parseFormula("1"))
i = self.R.setKineticLaw(kl)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assertEqual( False, self.R.isSetKineticLaw() )
_dummyList = [ kl ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setKineticLaw2(self):
kl = libsbml.KineticLaw(1,1)
kl.setMath(libsbml.parseFormula("1"))
i = self.R.setKineticLaw(kl)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assertEqual( False, self.R.isSetKineticLaw() )
_dummyList = [ kl ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setKineticLaw3(self):
kl = libsbml.KineticLaw(1,2)
kl.setMath(libsbml.parseFormula("1"))
i = self.R.setKineticLaw(kl)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.R.isSetKineticLaw() )
_dummyList = [ kl ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setKineticLaw4(self):
i = self.R.setKineticLaw(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.R.isSetKineticLaw() )
i = self.R.unsetKineticLaw()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.R.isSetKineticLaw() )
pass
def test_Reaction_setName1(self):
i = self.R.setName( "cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.R.isSetName() )
i = self.R.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.R.isSetName() )
pass
def test_Reaction_setName2(self):
p = libsbml.Reaction(2,2)
i = p.setName( "1cell")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, p.isSetName() )
i = p.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, p.isSetName() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setName3(self):
p = libsbml.Reaction(2,2)
i = p.setName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, p.isSetName() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def test_Reaction_setReversible1(self):
i = self.R.setReversible(True)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.R.getReversible() == True )
i = self.R.setReversible(False)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.R.getReversible() == False )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestReaction_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
|
TThostFtdcTraderIDType = "string"
TThostFtdcInvestorIDType = "string"
TThostFtdcBrokerIDType = "string"
TThostFtdcBrokerAbbrType = "string"
TThostFtdcBrokerNameType = "string"
TThostFtdcExchangeInstIDType = "string"
TThostFtdcOrderRefType = "string"
TThostFtdcParticipantIDType = "string"
TThostFtdcUserIDType = "string"
TThostFtdcPasswordType = "string"
TThostFtdcClientIDType = "string"
TThostFtdcInstrumentIDType = "string"
TThostFtdcInstrumentCodeType = "string"
TThostFtdcMarketIDType = "string"
TThostFtdcProductNameType = "string"
TThostFtdcExchangeIDType = "string"
TThostFtdcExchangeNameType = "string"
TThostFtdcExchangeAbbrType = "string"
TThostFtdcExchangeFlagType = "string"
TThostFtdcMacAddressType = "string"
TThostFtdcSystemIDType = "string"
TThostFtdcExchangePropertyType = "char"
TThostFtdcDateType = "string"
TThostFtdcTimeType = "string"
TThostFtdcLongTimeType = "string"
TThostFtdcInstrumentNameType = "string"
TThostFtdcSettlementGroupIDType = "string"
TThostFtdcOrderSysIDType = "string"
TThostFtdcTradeIDType = "string"
TThostFtdcCommandTypeType = "string"
TThostFtdcIPAddressType = "string"
TThostFtdcIPPortType = "int"
TThostFtdcProductInfoType = "string"
TThostFtdcProtocolInfoType = "string"
TThostFtdcBusinessUnitType = "string"
TThostFtdcDepositSeqNoType = "string"
TThostFtdcIdentifiedCardNoType = "string"
TThostFtdcIdCardTypeType = "char"
TThostFtdcOrderLocalIDType = "string"
TThostFtdcUserNameType = "string"
TThostFtdcPartyNameType = "string"
TThostFtdcErrorMsgType = "string"
TThostFtdcFieldNameType = "string"
TThostFtdcFieldContentType = "string"
TThostFtdcSystemNameType = "string"
TThostFtdcContentType = "string"
TThostFtdcInvestorRangeType = "char"
TThostFtdcDepartmentRangeType = "char"
TThostFtdcDataSyncStatusType = "char"
TThostFtdcBrokerDataSyncStatusType = "char"
TThostFtdcExchangeConnectStatusType = "char"
TThostFtdcTraderConnectStatusType = "char"
TThostFtdcFunctionCodeType = "char"
TThostFtdcBrokerFunctionCodeType = "char"
TThostFtdcOrderActionStatusType = "char"
TThostFtdcOrderStatusType = "char"
TThostFtdcOrderSubmitStatusType = "char"
TThostFtdcPositionDateType = "char"
TThostFtdcPositionDateTypeType = "char"
TThostFtdcTradingRoleType = "char"
TThostFtdcProductClassType = "char"
TThostFtdcInstLifePhaseType = "char"
TThostFtdcDirectionType = "char"
TThostFtdcPositionTypeType = "char"
TThostFtdcPosiDirectionType = "char"
TThostFtdcSysSettlementStatusType = "char"
TThostFtdcRatioAttrType = "char"
TThostFtdcHedgeFlagType = "char"
TThostFtdcBillHedgeFlagType = "char"
TThostFtdcClientIDTypeType = "char"
TThostFtdcOrderPriceTypeType = "char"
TThostFtdcOffsetFlagType = "char"
TThostFtdcForceCloseReasonType = "char"
TThostFtdcOrderTypeType = "char"
TThostFtdcTimeConditionType = "char"
TThostFtdcVolumeConditionType = "char"
TThostFtdcContingentConditionType = "char"
TThostFtdcActionFlagType = "char"
TThostFtdcTradingRightType = "char"
TThostFtdcOrderSourceType = "char"
TThostFtdcTradeTypeType = "char"
TThostFtdcPriceSourceType = "char"
TThostFtdcInstrumentStatusType = "char"
TThostFtdcInstStatusEnterReasonType = "char"
TThostFtdcOrderActionRefType = "int"
TThostFtdcInstallCountType = "int"
TThostFtdcInstallIDType = "int"
TThostFtdcErrorIDType = "int"
TThostFtdcSettlementIDType = "int"
TThostFtdcVolumeType = "int"
TThostFtdcFrontIDType = "int"
TThostFtdcSessionIDType = "int"
TThostFtdcSequenceNoType = "int"
TThostFtdcCommandNoType = "int"
TThostFtdcMillisecType = "int"
TThostFtdcVolumeMultipleType = "int"
TThostFtdcTradingSegmentSNType = "int"
TThostFtdcRequestIDType = "int"
TThostFtdcYearType = "int"
TThostFtdcMonthType = "int"
TThostFtdcBoolType = "int"
TThostFtdcPriceType = "double"
TThostFtdcCombOffsetFlagType = "string"
TThostFtdcCombHedgeFlagType = "string"
TThostFtdcRatioType = "double"
TThostFtdcMoneyType = "double"
TThostFtdcLargeVolumeType = "double"
TThostFtdcSequenceSeriesType = "int"
TThostFtdcCommPhaseNoType = "int"
TThostFtdcSequenceLabelType = "string"
TThostFtdcUnderlyingMultipleType = "double"
TThostFtdcPriorityType = "int"
TThostFtdcContractCodeType = "string"
TThostFtdcCityType = "string"
TThostFtdcIsStockType = "string"
TThostFtdcChannelType = "string"
TThostFtdcAddressType = "string"
TThostFtdcZipCodeType = "string"
TThostFtdcTelephoneType = "string"
TThostFtdcFaxType = "string"
TThostFtdcMobileType = "string"
TThostFtdcEMailType = "string"
TThostFtdcMemoType = "string"
TThostFtdcCompanyCodeType = "string"
TThostFtdcWebsiteType = "string"
TThostFtdcTaxNoType = "string"
TThostFtdcBatchStatusType = "char"
TThostFtdcPropertyIDType = "string"
TThostFtdcPropertyNameType = "string"
TThostFtdcLicenseNoType = "string"
TThostFtdcAgentIDType = "string"
TThostFtdcAgentNameType = "string"
TThostFtdcAgentGroupIDType = "string"
TThostFtdcAgentGroupNameType = "string"
TThostFtdcReturnStyleType = "char"
TThostFtdcReturnPatternType = "char"
TThostFtdcReturnLevelType = "char"
TThostFtdcReturnStandardType = "char"
TThostFtdcMortgageTypeType = "char"
TThostFtdcInvestorSettlementParamIDType = "char"
TThostFtdcExchangeSettlementParamIDType = "char"
TThostFtdcSystemParamIDType = "char"
TThostFtdcTradeParamIDType = "char"
TThostFtdcSettlementParamValueType = "string"
TThostFtdcCounterIDType = "string"
TThostFtdcInvestorGroupNameType = "string"
TThostFtdcBrandCodeType = "string"
TThostFtdcWarehouseType = "string"
TThostFtdcProductDateType = "string"
TThostFtdcGradeType = "string"
TThostFtdcClassifyType = "string"
TThostFtdcPositionType = "string"
TThostFtdcYieldlyType = "string"
TThostFtdcWeightType = "string"
TThostFtdcSubEntryFundNoType = "int"
TThostFtdcFileIDType = "char"
TThostFtdcFileNameType = "string"
TThostFtdcFileTypeType = "char"
TThostFtdcFileFormatType = "char"
TThostFtdcFileUploadStatusType = "char"
TThostFtdcTransferDirectionType = "char"
TThostFtdcUploadModeType = "string"
TThostFtdcAccountIDType = "string"
TThostFtdcBankFlagType = "string"
TThostFtdcBankAccountType = "string"
TThostFtdcOpenNameType = "string"
TThostFtdcOpenBankType = "string"
TThostFtdcBankNameType = "string"
TThostFtdcPublishPathType = "string"
TThostFtdcOperatorIDType = "string"
TThostFtdcMonthCountType = "int"
TThostFtdcAdvanceMonthArrayType = "string"
TThostFtdcDateExprType = "string"
TThostFtdcInstrumentIDExprType = "string"
TThostFtdcInstrumentNameExprType = "string"
TThostFtdcSpecialCreateRuleType = "char"
TThostFtdcBasisPriceTypeType = "char"
TThostFtdcProductLifePhaseType = "char"
TThostFtdcDeliveryModeType = "char"
TThostFtdcLogLevelType = "string"
TThostFtdcProcessNameType = "string"
TThostFtdcOperationMemoType = "string"
TThostFtdcFundIOTypeType = "char"
TThostFtdcFundTypeType = "char"
TThostFtdcFundDirectionType = "char"
TThostFtdcFundStatusType = "char"
TThostFtdcBillNoType = "string"
TThostFtdcBillNameType = "string"
TThostFtdcPublishStatusType = "char"
TThostFtdcEnumValueIDType = "string"
TThostFtdcEnumValueTypeType = "string"
TThostFtdcEnumValueLabelType = "string"
TThostFtdcEnumValueResultType = "string"
TThostFtdcSystemStatusType = "char"
TThostFtdcSettlementStatusType = "char"
TThostFtdcRangeIntTypeType = "string"
TThostFtdcRangeIntFromType = "string"
TThostFtdcRangeIntToType = "string"
TThostFtdcFunctionIDType = "string"
TThostFtdcFunctionValueCodeType = "string"
TThostFtdcFunctionNameType = "string"
TThostFtdcRoleIDType = "string"
TThostFtdcRoleNameType = "string"
TThostFtdcDescriptionType = "string"
TThostFtdcCombineIDType = "string"
TThostFtdcCombineTypeType = "string"
TThostFtdcInvestorTypeType = "char"
TThostFtdcBrokerTypeType = "char"
TThostFtdcRiskLevelType = "char"
TThostFtdcFeeAcceptStyleType = "char"
TThostFtdcPasswordTypeType = "char"
TThostFtdcAlgorithmType = "char"
TThostFtdcIncludeCloseProfitType = "char"
TThostFtdcAllWithoutTradeType = "char"
TThostFtdcCommentType = "string"
TThostFtdcVersionType = "string"
TThostFtdcTradeCodeType = "string"
TThostFtdcTradeDateType = "string"
TThostFtdcTradeTimeType = "string"
TThostFtdcTradeSerialType = "string"
TThostFtdcTradeSerialNoType = "int"
TThostFtdcFutureIDType = "string"
TThostFtdcBankIDType = "string"
TThostFtdcBankBrchIDType = "string"
TThostFtdcBankBranchIDType = "string"
TThostFtdcOperNoType = "string"
TThostFtdcDeviceIDType = "string"
TThostFtdcRecordNumType = "string"
TThostFtdcFutureAccountType = "string"
TThostFtdcFuturePwdFlagType = "char"
TThostFtdcTransferTypeType = "char"
TThostFtdcFutureAccPwdType = "string"
TThostFtdcCurrencyCodeType = "string"
TThostFtdcRetCodeType = "string"
TThostFtdcRetInfoType = "string"
TThostFtdcTradeAmtType = "string"
TThostFtdcUseAmtType = "string"
TThostFtdcFetchAmtType = "string"
TThostFtdcTransferValidFlagType = "char"
TThostFtdcCertCodeType = "string"
TThostFtdcReasonType = "char"
TThostFtdcFundProjectIDType = "string"
TThostFtdcSexType = "char"
TThostFtdcProfessionType = "string"
TThostFtdcNationalType = "string"
TThostFtdcProvinceType = "string"
TThostFtdcRegionType = "string"
TThostFtdcCountryType = "string"
TThostFtdcLicenseNOType = "string"
TThostFtdcCompanyTypeType = "string"
TThostFtdcBusinessScopeType = "string"
TThostFtdcCapitalCurrencyType = "string"
TThostFtdcUserTypeType = "char"
TThostFtdcBranchIDType = "string"
TThostFtdcRateTypeType = "char"
TThostFtdcNoteTypeType = "char"
TThostFtdcSettlementStyleType = "char"
TThostFtdcBrokerDNSType = "string"
TThostFtdcSentenceType = "string"
TThostFtdcSettlementBillTypeType = "char"
TThostFtdcUserRightTypeType = "char"
TThostFtdcMarginPriceTypeType = "char"
TThostFtdcBillGenStatusType = "char"
TThostFtdcAlgoTypeType = "char"
TThostFtdcHandlePositionAlgoIDType = "char"
TThostFtdcFindMarginRateAlgoIDType = "char"
TThostFtdcHandleTradingAccountAlgoIDType = "char"
TThostFtdcPersonTypeType = "char"
TThostFtdcQueryInvestorRangeType = "char"
TThostFtdcInvestorRiskStatusType = "char"
TThostFtdcLegIDType = "int"
TThostFtdcLegMultipleType = "int"
TThostFtdcImplyLevelType = "int"
TThostFtdcClearAccountType = "string"
TThostFtdcOrganNOType = "string"
TThostFtdcClearbarchIDType = "string"
TThostFtdcUserEventTypeType = "char"
TThostFtdcUserEventInfoType = "string"
TThostFtdcCloseStyleType = "char"
TThostFtdcStatModeType = "char"
TThostFtdcParkedOrderStatusType = "char"
TThostFtdcParkedOrderIDType = "string"
TThostFtdcParkedOrderActionIDType = "string"
TThostFtdcVirDealStatusType = "char"
TThostFtdcOrgSystemIDType = "char"
TThostFtdcVirTradeStatusType = "char"
TThostFtdcVirBankAccTypeType = "char"
TThostFtdcVirementStatusType = "char"
TThostFtdcVirementAvailAbilityType = "char"
TThostFtdcVirementTradeCodeType = "char"
TThostFtdcPhotoTypeNameType = "string"
TThostFtdcPhotoTypeIDType = "string"
TThostFtdcPhotoNameType = "string"
TThostFtdcTopicIDType = "int"
TThostFtdcReportTypeIDType = "string"
TThostFtdcCharacterIDType = "string"
TThostFtdcAMLParamIDType = "string"
TThostFtdcAMLInvestorTypeType = "string"
TThostFtdcAMLIdCardTypeType = "string"
TThostFtdcAMLTradeDirectType = "string"
TThostFtdcAMLTradeModelType = "string"
TThostFtdcAMLParamIDType = "string"
TThostFtdcAMLOpParamValueType = "double"
TThostFtdcAMLCustomerCardTypeType = "string"
TThostFtdcAMLInstitutionNameType = "string"
TThostFtdcAMLDistrictIDType = "string"
TThostFtdcAMLRelationShipType = "string"
TThostFtdcAMLInstitutionTypeType = "string"
TThostFtdcAMLInstitutionIDType = "string"
TThostFtdcAMLAccountTypeType = "string"
TThostFtdcAMLTradingTypeType = "string"
TThostFtdcAMLTransactClassType = "string"
TThostFtdcAMLCapitalIOType = "string"
TThostFtdcAMLSiteType = "string"
TThostFtdcAMLCapitalPurposeType = "string"
TThostFtdcAMLReportTypeType = "string"
TThostFtdcAMLSerialNoType = "string"
TThostFtdcAMLStatusType = "string"
TThostFtdcAMLGenStatusType = "char"
TThostFtdcAMLSeqCodeType = "string"
TThostFtdcAMLFileNameType = "string"
TThostFtdcAMLMoneyType = "double"
TThostFtdcAMLFileAmountType = "int"
TThostFtdcCFMMCKeyType = "string"
TThostFtdcCFMMCTokenType = "string"
TThostFtdcCFMMCKeyKindType = "char"
TThostFtdcAMLReportNameType = "string"
TThostFtdcIndividualNameType = "string"
TThostFtdcCurrencyIDType = "string"
TThostFtdcCustNumberType = "string"
TThostFtdcOrganCodeType = "string"
TThostFtdcOrganNameType = "string"
TThostFtdcSuperOrganCodeType = "string"
TThostFtdcSubBranchIDType = "string"
TThostFtdcSubBranchNameType = "string"
TThostFtdcBranchNetCodeType = "string"
TThostFtdcBranchNetNameType = "string"
TThostFtdcOrganFlagType = "string"
TThostFtdcBankCodingForFutureType = "string"
TThostFtdcBankReturnCodeType = "string"
TThostFtdcPlateReturnCodeType = "string"
TThostFtdcBankSubBranchIDType = "string"
TThostFtdcFutureBranchIDType = "string"
TThostFtdcReturnCodeType = "string"
TThostFtdcOperatorCodeType = "string"
TThostFtdcClearDepIDType = "string"
TThostFtdcClearBrchIDType = "string"
TThostFtdcClearNameType = "string"
TThostFtdcBankAccountNameType = "string"
TThostFtdcInvDepIDType = "string"
TThostFtdcInvBrchIDType = "string"
TThostFtdcMessageFormatVersionType = "string"
TThostFtdcDigestType = "string"
TThostFtdcAuthenticDataType = "string"
TThostFtdcPasswordKeyType = "string"
TThostFtdcFutureAccountNameType = "string"
TThostFtdcMobilePhoneType = "string"
TThostFtdcFutureMainKeyType = "string"
TThostFtdcFutureWorkKeyType = "string"
TThostFtdcFutureTransKeyType = "string"
TThostFtdcBankMainKeyType = "string"
TThostFtdcBankWorkKeyType = "string"
TThostFtdcBankTransKeyType = "string"
TThostFtdcBankServerDescriptionType = "string"
TThostFtdcAddInfoType = "string"
TThostFtdcDescrInfoForReturnCodeType = "string"
TThostFtdcCountryCodeType = "string"
TThostFtdcSerialType = "int"
TThostFtdcPlateSerialType = "int"
TThostFtdcBankSerialType = "string"
TThostFtdcCorrectSerialType = "int"
TThostFtdcFutureSerialType = "int"
TThostFtdcApplicationIDType = "int"
TThostFtdcBankProxyIDType = "int"
TThostFtdcFBTCoreIDType = "int"
TThostFtdcServerPortType = "int"
TThostFtdcRepealedTimesType = "int"
TThostFtdcRepealTimeIntervalType = "int"
TThostFtdcTotalTimesType = "int"
TThostFtdcFBTRequestIDType = "int"
TThostFtdcTIDType = "int"
TThostFtdcTradeAmountType = "double"
TThostFtdcCustFeeType = "double"
TThostFtdcFutureFeeType = "double"
TThostFtdcSingleMaxAmtType = "double"
TThostFtdcSingleMinAmtType = "double"
TThostFtdcTotalAmtType = "double"
TThostFtdcCertificationTypeType = "char"
TThostFtdcFileBusinessCodeType = "char"
TThostFtdcCashExchangeCodeType = "char"
TThostFtdcYesNoIndicatorType = "char"
TThostFtdcBanlanceTypeType = "char"
TThostFtdcGenderType = "char"
TThostFtdcFeePayFlagType = "char"
TThostFtdcPassWordKeyTypeType = "char"
TThostFtdcFBTPassWordTypeType = "char"
TThostFtdcFBTEncryModeType = "char"
TThostFtdcBankRepealFlagType = "char"
TThostFtdcBrokerRepealFlagType = "char"
TThostFtdcInstitutionTypeType = "char"
TThostFtdcLastFragmentType = "char"
TThostFtdcBankAccStatusType = "char"
TThostFtdcMoneyAccountStatusType = "char"
TThostFtdcManageStatusType = "char"
TThostFtdcSystemTypeType = "char"
TThostFtdcTxnEndFlagType = "char"
TThostFtdcProcessStatusType = "char"
TThostFtdcCustTypeType = "char"
TThostFtdcFBTTransferDirectionType = "char"
TThostFtdcOpenOrDestroyType = "char"
TThostFtdcAvailabilityFlagType = "char"
TThostFtdcOrganTypeType = "char"
TThostFtdcOrganLevelType = "char"
TThostFtdcProtocalIDType = "char"
TThostFtdcConnectModeType = "char"
TThostFtdcSyncModeType = "char"
TThostFtdcBankAccTypeType = "char"
TThostFtdcFutureAccTypeType = "char"
TThostFtdcOrganStatusType = "char"
TThostFtdcCCBFeeModeType = "char"
TThostFtdcCommApiTypeType = "char"
TThostFtdcServiceIDType = "int"
TThostFtdcServiceLineNoType = "int"
TThostFtdcServiceNameType = "string"
TThostFtdcLinkStatusType = "char"
TThostFtdcCommApiPointerType = "int"
TThostFtdcPwdFlagType = "char"
TThostFtdcSecuAccTypeType = "char"
TThostFtdcTransferStatusType = "char"
TThostFtdcSponsorTypeType = "char"
TThostFtdcReqRspTypeType = "char"
TThostFtdcFBTUserEventTypeType = "char"
TThostFtdcBankIDByBankType = "string"
TThostFtdcBankOperNoType = "string"
TThostFtdcBankCustNoType = "string"
TThostFtdcDBOPSeqNoType = "int"
TThostFtdcTableNameType = "string"
TThostFtdcPKNameType = "string"
TThostFtdcPKValueType = "string"
TThostFtdcDBOperationType = "char"
TThostFtdcSyncFlagType = "char"
TThostFtdcTargetIDType = "string"
TThostFtdcSyncTypeType = "char"
TThostFtdcFBETimeType = "string"
TThostFtdcFBEBankNoType = "string"
TThostFtdcFBECertNoType = "string"
TThostFtdcExDirectionType = "char"
TThostFtdcFBEBankAccountType = "string"
TThostFtdcFBEBankAccountNameType = "string"
TThostFtdcFBEAmtType = "double"
TThostFtdcFBEBusinessTypeType = "string"
TThostFtdcFBEPostScriptType = "string"
TThostFtdcFBERemarkType = "string"
TThostFtdcExRateType = "double"
TThostFtdcFBEResultFlagType = "char"
TThostFtdcFBERtnMsgType = "string"
TThostFtdcFBEExtendMsgType = "string"
TThostFtdcFBEBusinessSerialType = "string"
TThostFtdcFBESystemSerialType = "string"
TThostFtdcFBETotalExCntType = "int"
TThostFtdcFBEExchStatusType = "char"
TThostFtdcFBEFileFlagType = "char"
TThostFtdcFBEAlreadyTradeType = "char"
TThostFtdcFBEOpenBankType = "string"
TThostFtdcFBEUserEventTypeType = "char"
TThostFtdcFBEFileNameType = "string"
TThostFtdcFBEBatchSerialType = "string"
TThostFtdcFBEReqFlagType = "char"
TThostFtdcNotifyClassType = "char"
TThostFtdcRiskNofityInfoType = "string"
TThostFtdcForceCloseSceneIdType = "string"
TThostFtdcForceCloseTypeType = "char"
TThostFtdcInstrumentIDsType = "string"
TThostFtdcRiskNotifyMethodType = "char"
TThostFtdcRiskNotifyStatusType = "char"
TThostFtdcRiskUserEventType = "char"
TThostFtdcParamIDType = "int"
TThostFtdcParamNameType = "string"
TThostFtdcParamValueType = "string"
TThostFtdcConditionalOrderSortTypeType = "char"
TThostFtdcSendTypeType = "char"
TThostFtdcClientIDStatusType = "char"
TThostFtdcIndustryIDType = "string"
TThostFtdcQuestionIDType = "string"
TThostFtdcQuestionContentType = "string"
TThostFtdcOptionIDType = "string"
TThostFtdcOptionContentType = "string"
TThostFtdcQuestionTypeType = "char"
TThostFtdcProcessIDType = "string"
TThostFtdcSeqNoType = "int"
TThostFtdcUOAProcessStatusType = "string"
TThostFtdcProcessTypeType = "string"
TThostFtdcBusinessTypeType = "char"
TThostFtdcCfmmcReturnCodeType = "char"
TThostFtdcExReturnCodeType = "int"
TThostFtdcClientTypeType = "char"
TThostFtdcExchangeIDTypeType = "char"
TThostFtdcExClientIDTypeType = "char"
TThostFtdcClientClassifyType = "string"
TThostFtdcUOAOrganTypeType = "string"
TThostFtdcUOACountryCodeType = "string"
TThostFtdcAreaCodeType = "string"
TThostFtdcFuturesIDType = "string"
TThostFtdcCffmcDateType = "string"
TThostFtdcCffmcTimeType = "string"
TThostFtdcNocIDType = "string"
TThostFtdcUpdateFlagType = "char"
TThostFtdcApplyOperateIDType = "char"
TThostFtdcApplyStatusIDType = "char"
TThostFtdcSendMethodType = "char"
TThostFtdcEventTypeType = "string"
TThostFtdcEventModeType = "char"
TThostFtdcUOAAutoSendType = "char"
TThostFtdcQueryDepthType = "int"
TThostFtdcDataCenterIDType = "int"
TThostFtdcFlowIDType = "char"
TThostFtdcCheckLevelType = "char"
TThostFtdcCheckNoType = "int"
TThostFtdcCheckStatusType = "char"
TThostFtdcUsedStatusType = "char"
TThostFtdcRateTemplateNameType = "string"
TThostFtdcPropertyStringType = "string"
TThostFtdcBankAcountOriginType = "char"
TThostFtdcMonthBillTradeSumType = "char"
TThostFtdcFBTTradeCodeEnumType = "char"
TThostFtdcRateTemplateIDType = "string"
TThostFtdcRiskRateType = "string"
TThostFtdcTimestampType = "int"
TThostFtdcInvestorIDRuleNameType = "string"
TThostFtdcInvestorIDRuleExprType = "string"
TThostFtdcLastDriftType = "int"
TThostFtdcLastSuccessType = "int"
TThostFtdcAuthKeyType = "string"
TThostFtdcSerialNumberType = "string"
TThostFtdcOTPTypeType = "char"
TThostFtdcOTPVendorsIDType = "string"
TThostFtdcOTPVendorsNameType = "string"
TThostFtdcOTPStatusType = "char"
TThostFtdcBrokerUserTypeType = "char"
TThostFtdcFutureTypeType = "char"
TThostFtdcFundEventTypeType = "char"
TThostFtdcAccountSourceTypeType = "char"
TThostFtdcCodeSourceTypeType = "char"
TThostFtdcUserRangeType = "char"
TThostFtdcTimeSpanType = "string"
TThostFtdcImportSequenceIDType = "string"
TThostFtdcByGroupType = "char"
TThostFtdcTradeSumStatModeType = "char"
TThostFtdcComTypeType = "int"
TThostFtdcUserProductIDType = "string"
TThostFtdcUserProductNameType = "string"
TThostFtdcUserProductMemoType = "string"
TThostFtdcCSRCCancelFlagType = "string"
TThostFtdcCSRCDateType = "string"
TThostFtdcCSRCInvestorNameType = "string"
TThostFtdcCSRCOpenInvestorNameType = "string"
TThostFtdcCSRCInvestorIDType = "string"
TThostFtdcCSRCIdentifiedCardNoType = "string"
TThostFtdcCSRCClientIDType = "string"
TThostFtdcCSRCBankFlagType = "string"
TThostFtdcCSRCBankAccountType = "string"
TThostFtdcCSRCOpenNameType = "string"
TThostFtdcCSRCMemoType = "string"
TThostFtdcCSRCTimeType = "string"
TThostFtdcCSRCTradeIDType = "string"
TThostFtdcCSRCExchangeInstIDType = "string"
TThostFtdcCSRCMortgageNameType = "string"
TThostFtdcCSRCReasonType = "string"
TThostFtdcIsSettlementType = "string"
TThostFtdcCSRCMoneyType = "double"
TThostFtdcCSRCPriceType = "double"
TThostFtdcCSRCOptionsTypeType = "string"
TThostFtdcCSRCStrikePriceType = "double"
TThostFtdcCSRCTargetProductIDType = "string"
TThostFtdcCSRCTargetInstrIDType = "string"
TThostFtdcCommModelNameType = "string"
TThostFtdcCommModelMemoType = "string"
TThostFtdcExprSetModeType = "char"
TThostFtdcRateInvestorRangeType = "char"
TThostFtdcAgentBrokerIDType = "string"
TThostFtdcDRIdentityIDType = "int"
TThostFtdcDRIdentityNameType = "string"
TThostFtdcDBLinkIDType = "string"
TThostFtdcSyncDataStatusType = "char"
TThostFtdcTradeSourceType = "char"
TThostFtdcFlexStatModeType = "char"
TThostFtdcByInvestorRangeType = "char"
TThostFtdcSRiskRateType = "string"
TThostFtdcSequenceNo12Type = "int"
TThostFtdcPropertyInvestorRangeType = "char"
TThostFtdcFileStatusType = "char"
TThostFtdcFileGenStyleType = "char"
TThostFtdcSysOperModeType = "char"
TThostFtdcSysOperTypeType = "char"
TThostFtdcCSRCDataQueyTypeType = "char"
TThostFtdcFreezeStatusType = "char"
TThostFtdcStandardStatusType = "char"
TThostFtdcCSRCFreezeStatusType = "string"
TThostFtdcRightParamTypeType = "char"
TThostFtdcRightTemplateIDType = "string"
TThostFtdcRightTemplateNameType = "string"
TThostFtdcDataStatusType = "char"
TThostFtdcAMLCheckStatusType = "char"
TThostFtdcAmlDateTypeType = "char"
TThostFtdcAmlCheckLevelType = "char"
TThostFtdcAmlCheckFlowType = "string"
TThostFtdcDataTypeType = "string"
TThostFtdcExportFileTypeType = "char"
TThostFtdcSettleManagerTypeType = "char"
TThostFtdcSettleManagerIDType = "string"
TThostFtdcSettleManagerNameType = "string"
TThostFtdcSettleManagerLevelType = "char"
TThostFtdcSettleManagerGroupType = "char"
TThostFtdcCheckResultMemoType = "string"
TThostFtdcFunctionUrlType = "string"
TThostFtdcAuthInfoType = "string"
TThostFtdcAuthCodeType = "string"
TThostFtdcLimitUseTypeType = "char"
TThostFtdcDataResourceType = "char"
TThostFtdcMarginTypeType = "char"
TThostFtdcActiveTypeType = "char"
TThostFtdcMarginRateTypeType = "char"
TThostFtdcBackUpStatusType = "char"
TThostFtdcInitSettlementType = "char"
TThostFtdcReportStatusType = "char"
TThostFtdcSaveStatusType = "char"
TThostFtdcSettArchiveStatusType = "char"
TThostFtdcCTPTypeType = "char"
TThostFtdcToolIDType = "string"
TThostFtdcToolNameType = "string"
TThostFtdcCloseDealTypeType = "char"
TThostFtdcMortgageFundUseRangeType = "char"
TThostFtdcCurrencyUnitType = "double"
TThostFtdcExchangeRateType = "double"
TThostFtdcSpecProductTypeType = "char"
TThostFtdcFundMortgageTypeType = "char"
TThostFtdcAccountSettlementParamIDType = "char"
TThostFtdcCurrencyNameType = "string"
TThostFtdcCurrencySignType = "string"
TThostFtdcFundMortDirectionType = "char"
TThostFtdcBusinessClassType = "char"
TThostFtdcSwapSourceTypeType = "char"
TThostFtdcCurrExDirectionType = "char"
TThostFtdcCurrencySwapStatusType = "char"
TThostFtdcCurrExchCertNoType = "string"
TThostFtdcBatchSerialNoType = "string"
TThostFtdcReqFlagType = "char"
TThostFtdcResFlagType = "char"
TThostFtdcPageControlType = "string"
TThostFtdcRecordCountType = "int"
TThostFtdcCurrencySwapMemoType = "string"
TThostFtdcExStatusType = "char"
TThostFtdcClientRegionType = "char"
TThostFtdcWorkPlaceType = "string"
TThostFtdcBusinessPeriodType = "string"
TThostFtdcWebSiteType = "string"
TThostFtdcUOAIdCardTypeType = "string"
TThostFtdcClientModeType = "string"
TThostFtdcInvestorFullNameType = "string"
TThostFtdcUOABrokerIDType = "string"
TThostFtdcUOAZipCodeType = "string"
TThostFtdcUOAEMailType = "string"
TThostFtdcOldCityType = "string"
TThostFtdcCorporateIdentifiedCardNoType = "string"
TThostFtdcHasBoardType = "char"
TThostFtdcStartModeType = "char"
TThostFtdcTemplateTypeType = "char"
TThostFtdcLoginModeType = "char"
TThostFtdcPromptTypeType = "char"
TThostFtdcLedgerManageIDType = "string"
TThostFtdcInvestVarietyType = "string"
TThostFtdcBankAccountTypeType = "string"
TThostFtdcLedgerManageBankType = "string"
TThostFtdcCffexDepartmentNameType = "string"
TThostFtdcCffexDepartmentCodeType = "string"
TThostFtdcHasTrusteeType = "char"
TThostFtdcCSRCMemo1Type = "string"
TThostFtdcAssetmgrCFullNameType = "string"
TThostFtdcAssetmgrApprovalNOType = "string"
TThostFtdcAssetmgrMgrNameType = "string"
TThostFtdcAmTypeType = "char"
TThostFtdcCSRCAmTypeType = "string"
TThostFtdcCSRCFundIOTypeType = "char"
TThostFtdcCusAccountTypeType = "char"
TThostFtdcCSRCNationalType = "string"
TThostFtdcCSRCSecAgentIDType = "string"
TThostFtdcLanguageTypeType = "char"
TThostFtdcAmAccountType = "string"
TThostFtdcAssetmgrClientTypeType = "char"
TThostFtdcAssetmgrTypeType = "char"
TThostFtdcUOMType = "string"
TThostFtdcSHFEInstLifePhaseType = "string"
TThostFtdcSHFEProductClassType = "string"
TThostFtdcPriceDecimalType = "string"
TThostFtdcInTheMoneyFlagType = "string"
TThostFtdcCheckInstrTypeType = "char"
TThostFtdcDeliveryTypeType = "char"
TThostFtdcBigMoneyType = "double"
TThostFtdcMaxMarginSideAlgorithmType = "char"
TThostFtdcDAClientTypeType = "char"
TThostFtdcCombinInstrIDType = "string"
TThostFtdcCombinSettlePriceType = "string"
TThostFtdcDCEPriorityType = "int"
TThostFtdcTradeGroupIDType = "int"
TThostFtdcIsCheckPrepaType = "int"
TThostFtdcUOAAssetmgrTypeType = "char"
TThostFtdcDirectionEnType = "char"
TThostFtdcOffsetFlagEnType = "char"
TThostFtdcHedgeFlagEnType = "char"
TThostFtdcFundIOTypeEnType = "char"
TThostFtdcFundTypeEnType = "char"
TThostFtdcFundDirectionEnType = "char"
TThostFtdcFundMortDirectionEnType = "char"
TThostFtdcSwapBusinessTypeType = "string"
TThostFtdcOptionsTypeType = "char"
TThostFtdcStrikeModeType = "char"
TThostFtdcStrikeTypeType = "char"
TThostFtdcApplyTypeType = "char"
TThostFtdcGiveUpDataSourceType = "char"
TThostFtdcExecOrderSysIDType = "string"
TThostFtdcExecResultType = "char"
TThostFtdcStrikeSequenceType = "int"
TThostFtdcStrikeTimeType = "string"
TThostFtdcCombinationTypeType = "char"
TThostFtdcOptionRoyaltyPriceTypeType = "char"
TThostFtdcBalanceAlgorithmType = "char"
TThostFtdcActionTypeType = "char"
TThostFtdcForQuoteStatusType = "char"
TThostFtdcValueMethodType = "char"
TThostFtdcExecOrderPositionFlagType = "char"
TThostFtdcExecOrderCloseFlagType = "char"
TThostFtdcProductTypeType = "char"
TThostFtdcCZCEUploadFileNameType = "char"
TThostFtdcDCEUploadFileNameType = "char"
TThostFtdcSHFEUploadFileNameType = "char"
TThostFtdcCFFEXUploadFileNameType = "char"
TThostFtdcCombDirectionType = "char"
TThostFtdcStrikeOffsetTypeType = "char"
TThostFtdcReserveOpenAccStasType = "char"
TThostFtdcLoginRemarkType = "string"
TThostFtdcInvestUnitIDType = "string"
TThostFtdcBulletinIDType = "int"
TThostFtdcNewsTypeType = "string"
TThostFtdcNewsUrgencyType = "char"
TThostFtdcAbstractType = "string"
TThostFtdcComeFromType = "string"
TThostFtdcURLLinkType = "string"
TThostFtdcLongIndividualNameType = "string"
TThostFtdcLongFBEBankAccountNameType = "string"
TThostFtdcDateTimeType = "string"
TThostFtdcWeakPasswordSourceType = "char"
TThostFtdcRandomStringType = "string"
TThostFtdcOptSelfCloseFlagType = "char"
TThostFtdcBizTypeType = "char"
TThostFtdcAppTypeType = "char"
TThostFtdcAppIDType = "string"
TThostFtdcSystemInfoLenType = "int"
TThostFtdcAdditionalInfoLenType = "int"
TThostFtdcClientSystemInfoType = "string"
TThostFtdcAdditionalInfoType = "string"
TThostFtdcBase64ClientSystemInfoType = "string"
TThostFtdcBase64AdditionalInfoType = "string"
TThostFtdcCurrentAuthMethodType = "int"
TThostFtdcCaptchaInfoLenType = "int"
TThostFtdcCaptchaInfoType = "string"
TThostFtdcUserTextSeqType = "int"
TThostFtdcHandshakeDataType = "string"
TThostFtdcHandshakeDataLenType = "int"
TThostFtdcCryptoKeyVersionType = "string"
TThostFtdcRsaKeyVersionType = "int"
TThostFtdcSoftwareProviderIDType = "string"
TThostFtdcCollectTimeType = "string"
TThostFtdcQueryFreqType = "int"
TThostFtdcResponseValueType = "char"
TThostFtdcOTCTradeTypeType = "char"
TThostFtdcMatchTypeType = "char"
TThostFtdcOTCTraderIDType = "string"
TThostFtdcRiskValueType = "double"
TThostFtdcIDBNameType = "string"
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import time
from conary.lib import digestlib
from conary.lib import util
from rpath_tools.client import utils
logger = logging.getLogger(__name__)
class Uuid(object):
def __init__(self, uuidFile=None):
self.uuidFile = uuidFile
self._uuid = None
@property
def uuid(self):
if self._uuid is None:
self._uuid = self.read()
return self._uuid
def read(self):
return None
def _readFile(cls, path):
return file(path).readline().strip()
def _writeFile(cls, path, contents):
util.mkdirChain(os.path.dirname(path))
file(path, "w").write(contents)
@classmethod
def asString(cls, data):
"""Generate a UUID out of the data"""
assert len(data) == 16
h = "%02x"
fmt = '-'.join([h * 4, h * 2, h * 2, h * 2, h * 6])
return fmt % tuple(ord(x) for x in data)
class GeneratedUuid(Uuid):
def read(self):
uuid = self._generateUuid()
if self.uuidFile:
if not os.path.exists(self.uuidFile):
uuid = self._generateUuid()
self._writeFile(self.uuidFile, uuid)
else:
uuid = self._readFile(self.uuidFile)
return uuid
@classmethod
def _generateUuid(cls):
data = file("/dev/urandom").read(16)
return cls.asString(data)
class LocalUuid(Uuid):
def __init__(self, uuidFile, oldDir, deviceName=None):
self.oldDirPath = os.path.join(os.path.dirname(uuidFile), oldDir)
Uuid.__init__(self, uuidFile)
self._targetSystemId = None
self.deviceName = deviceName
@classmethod
def _readProcVersion(cls):
try:
version = file("/proc/version").read()
except IOError:
return None
return version
@classmethod
def _readInstanceIdFromEC2(cls):
try:
from amiconfig import instancedata
except ImportError:
return None
return instancedata.InstanceData().getInstanceId()
@classmethod
def _getEC2InstanceId(cls):
"""
Return the EC2 instance ID if the system is running in EC2
Return None otherwise.
"""
if not utils.runningInEC2():
return None
return cls._readInstanceIdFromEC2()
@property
def ec2InstanceId(self):
if self._targetSystemId is None:
self._targetSystemId = self._getEC2InstanceId()
return self._targetSystemId
@property
def targetSystemId(self):
return self.ec2InstanceId
def read(self):
instanceId = self.ec2InstanceId
if instanceId is not None:
sha = digestlib.sha1(instanceId)
retuuid = GeneratedUuid.asString(sha.digest()[:16])
else:
dmidecodeUuid = self._getDmidecodeUuid().lower()
retuuid = dmidecodeUuid
if os.path.exists(self.uuidFile):
persistedUuid = self._readFile(self.uuidFile)
if persistedUuid.lower() != retuuid:
self._writeDmidecodeUuid(retuuid)
else:
self._writeDmidecodeUuid(retuuid)
return retuuid
def _getUuidFromMac(self):
"""
Use the mac address from the system to hash a uuid.
"""
# Read mac address of self.deviceName
if utils.runningInEC2():
self.deviceName = 'eth0'
mac = None
if os.path.exists('/sys/class/net'):
if not self.deviceName:
deviceList = sorted( [ x for x in os.listdir('/sys/class/net')
if x != 'lo' ] )
if deviceList:
self.deviceName = deviceList[0]
mac = open('/sys/class/net/%s/address' % self.deviceName).read().strip()
if not mac:
# Legacy code
if os.path.exists('/sbin/ifconfig'):
logger.warn("No sysfs, falling back to ifconfig command.")
cmd = ['/sbin/ifconfig']
p = subprocess.Popen(cmd, stdout = subprocess.PIPE)
sts = p.wait()
if sts != 0:
raise Exception("Unable to run ifconfig to find mac address"
" for local uuid generation")
lines = p.stdout.read().strip()
# Work around for empty deviceName bug
deviceList = None
if not self.deviceName:
deviceList = sorted([ x.split()[0] for x in lines.split('\n')
if 'lo' not in x and 'HWaddr' in x ])
if deviceList:
self.deviceName = deviceList[0]
matcher = re.compile('^%s.*HWaddr\W(.*)$' % self.deviceName)
for line in lines.split('\n'):
match = matcher.match(line)
if match:
mac = match.groups()[0].strip()
if not mac:
raise Exception("Unable to find mac address for "
"local uuid generation")
mac = mac.lower()
if len(mac) > 16:
mac = mac[-16:]
elif len(mac) < 16:
mac = mac + '0'*(16-len(mac))
return self.asString(mac)
def _getDmidecodeUuid(self):
if not os.access("/dev/mem", os.R_OK):
raise Exception("Must run as root")
try:
import dmidecode
except ImportError:
logger.warn("Can't import dmidecode, falling back to dmidecode command.")
return self._getDmidecodeUuidCommand()
try:
return dmidecode.system()['0x0001']['data']['UUID']
except Exception:
# Depending on the target type, various Exceptions can be raised,
# so just handle any exception.
# kvm - AttributeError
# xen - RuntimeError
logger.warn("Can't use dmidecode library, falling back to mac address")
return self._getUuidFromMac()
def _getDmidecodeUuidCommand(self):
try:
dmidecode = "/usr/sbin/dmidecode"
cmd = [ dmidecode, "-s", "system-uuid" ]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE)
sts = p.wait()
if sts != 0:
raise Exception("Unable to extract system-uuid from dmidecode")
uuid = p.stdout.readline().strip()
if not uuid:
raise Exception("Unable to extract system-uuid from dmidecode")
return uuid
except Exception:
logger.warn("Can't use dmidecode command, falling back to mac address")
return self._getUuidFromMac()
def _writeDmidecodeUuid(self, uuid):
destFilePath = os.path.join(self.oldDirPath, "%.1f" % time.time())
self._writeFile(destFilePath, uuid)
self._writeFile(self.uuidFile, uuid)
|
|
import functools
import operator
import logging
from django.contrib import messages
from django.contrib.auth import logout as django_logout, login as django_login
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.db.models import Q
from django.http import Http404
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect, resolve_url
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, FormView
from braces.views import LoginRequiredMixin
from configuration.configuration import QuestionnaireConfiguration
from django.views.generic import ListView
from questionnaire.models import Questionnaire, STATUSES
from questionnaire.utils import query_questionnaires, get_list_values
from questionnaire.view_utils import get_paginator, get_pagination_parameters
from .client import remote_user_client
from .conf import settings
from .forms import WocatAuthenticationForm
from .models import User
logger = logging.getLogger(__name__)
class LoginView(FormView):
"""
This view handles the login form and authenticates users.
"""
form_class = WocatAuthenticationForm
template_name = 'login.html'
success_url = settings.ACCOUNTS_LOGIN_SUCCESS_URL
@method_decorator(never_cache)
@method_decorator(sensitive_post_parameters('password'))
def dispatch(self, *args, **kwargs):
if hasattr(self.request, 'user') and self.request.user.is_authenticated():
return redirect(self.get_success_url())
return super(LoginView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
# Add next_url to context to show notification in login form.
context = super().get_context_data(**kwargs)
context.update({
'next_url': self.request.GET.get('next'),
})
return context
def form_valid(self, form):
# Put the user on the request, and add a welcome message.
user = form.get_user()
django_login(self.request, user)
messages.info(self.request, _('Welcome {}').format(user.firstname))
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If this user has logged in before, check if the account was deactivated
when relaunching the wocat website. If so, return a redirect to the
reactivation view on the new wocat website.
"""
has_user = User.objects.filter(email=form.cleaned_data.get('username'))
if has_user.exists() and has_user.count() == 1:
user_info = remote_user_client.get_user_information(has_user[0].pk)
if user_info and not user_info.get('is_active', True):
return HttpResponseRedirect(settings.REACTIVATE_WOCAT_ACCOUNT_URL)
remote_addr = getattr(self.request, 'META', {}).get('REMOTE_ADDR')
if remote_addr:
logger.warning('Invalid login attempt: %s' % remote_addr)
return super().form_invalid(form)
def get_success_url(self):
# Explicitly passed ?next= url takes precedence.
redirect_to = self.request.GET.get('next') or reverse(self.success_url)
# Redirect UNCCD focal points to the list filtered with Questionnaires
# from their country. Only if no "next" is set.
if self.request.GET.get('next') is None:
unccd_countries = self.request.user.get_unccd_countries()
if unccd_countries:
country_keyword = unccd_countries[0].keyword
redirect_to = '{}?{}'.format(
reverse('wocat:questionnaire_list'),
QuestionnaireConfiguration.get_country_filter(
country_keyword)
)
# Prevent redirecting to other/invalid hosts - i.e. prevent xsrf
if not is_safe_url(url=redirect_to, host=self.request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
class ProfileView(LoginRequiredMixin, DetailView):
"""
Display the users questionnaires (and in future: notifications).
Questionnaires are loaded from the template (asynchronously).
"""
template_name = 'questionnaires.html'
def get_object(self, queryset=None):
return self.request.user
def get_questionnaires(self):
"""
Fetch questionnaires for current user.
"""
return query_questionnaires(
request=self.request, configuration_code='wocat',
only_current=False, limit=None
)
def get_status_list(self) -> dict:
"""
Fetch all (distinct) statuses that at least one questionnaire of the
current user has.
"""
questionnaires = self.get_questionnaires()
statuses = questionnaires.order_by(
'status'
).distinct(
'status'
).values_list(
'status', flat=True
)
status_choices = dict(STATUSES) # cast to dict for easier access.
return {status: _(status_choices[status]) for status in statuses}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['statuses'] = self.get_status_list()
return context
class UserDetailView(DetailView):
"""
Show some information about the requested user:
- information (name)
- focal point countries
- public questionnaires
"""
context_object_name = 'detail_user'
model = User
template_name = 'details.html'
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
# Update the user details
user_info = remote_user_client.get_user_information(obj.id)
remote_user_client.update_user(obj, user_info)
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
unccd_countries_list = []
unccd_countries = self.object.get_unccd_countries()
for country in unccd_countries:
unccd_countries_list.append((
country,
'{}?{}'.format(
reverse('wocat:questionnaire_list'),
QuestionnaireConfiguration.get_country_filter(
country.keyword))
))
context['unccd_countries'] = unccd_countries_list
return context
class QuestionnaireListMixin(ListView):
"""
Mixin for paginated questionnaires.
"""
template_name = 'questionnaire_status_list.html'
per_page = 3 # use qcats default paginator
@property
def status(self):
raise NotImplementedError
def get_filter_user(self):
raise NotImplementedError
def get_queryset(self):
"""
Fetch all questionnaires from the current user with the requested
status.
"""
return query_questionnaires(
request=self.request, configuration_code='wocat',
only_current=False, limit=None, **self.get_filter_user()
).filter(
status=self.status
)
def get_context_data(self, **kwargs) -> dict:
"""
Provide context data in qcats default way. Pagination happens in the
parents get_context_data method.
"""
context = super().get_context_data(**kwargs)
questionnaires, paginator = get_paginator(
objects=context['object_list'],
page=self.request.GET.get('page', 1),
limit=self.per_page
)
context['list_values'] = get_list_values(
questionnaire_objects=questionnaires, status_filter=Q()
)
context.update(**get_pagination_parameters(
self.request, paginator, questionnaires
))
return context
class PublicQuestionnaireListView(QuestionnaireListMixin):
"""
Get public questionnaires the user defined in the url.
"""
@property
def status(self):
return settings.QUESTIONNAIRE_PUBLIC
def get_filter_user(self):
user = get_object_or_404(User, id=self.kwargs['user_id'])
return {'user': user}
class QuestionnaireStatusListView(LoginRequiredMixin, QuestionnaireListMixin):
"""
Display all questionnaires for the requested status. Results are paginated.
"""
@property
def status(self):
"""
Validate status from request.
"""
try:
status = int(self.request.GET.get('status'))
except (TypeError, ValueError):
raise Http404()
if status not in dict(STATUSES).keys():
raise Http404()
return status
def get_filter_user(self):
"""
If no user is set, questionnaires are fetched according to permissions.
This is the specified behaviour for all statuses (except 'public'), as
questionnaires that require some kind of action should be listed.
For published questionnaires, only the questionnaires that the user has
worked on must be shown - so the user, not the permissions (roles) are
important.
"""
return {'user': self.request.user if self.status is settings.QUESTIONNAIRE_PUBLIC else None} # noqa
class QuestionnaireSearchView(LoginRequiredMixin, ListView):
"""
Search questionnaires by name or compiler, this is only allowed for staff
members.
The same logic is used for both ajax-response for the autocomplete-element
and classic list view.
"""
template_name = 'questionnaire_search.html'
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not self.request.user.is_staff:
raise Http404
return response
def get(self, request, *args, **kwargs):
if self.request.is_ajax():
return JsonResponse(list(self.get_json_data()), safe=False)
else:
return super().get(request, *args, **kwargs)
def get_paginate_by(self, queryset):
return 10 if self.request.is_ajax() else 20
def get_query_filters(self):
search_terms = self.request.GET.get('term', '')
for term in search_terms.split(' '):
name_lookup_params = {
'questiongroup': 'qg_name',
'lookup_by': 'string',
'value': term,
}
country_lookup_params = {
'questiongroup': 'qg_location',
'lookup_by': 'key_value',
'key': 'country',
'value': 'country_%s' % term,
}
yield Q(
Q(questionnairemembership__user__firstname__icontains=term) |
Q(questionnairemembership__user__lastname__icontains=term) |
Q(data__qs_data=name_lookup_params) |
Q(data__qs_data=country_lookup_params)
)
def get_queryset(self):
return Questionnaire.with_status.not_deleted().filter(
functools.reduce(operator.and_, self.get_query_filters())
).distinct()
def get_json_data(self):
"""
Structure as required for frontend.
"""
questionnaires = self.get_queryset()[:self.get_paginate_by(None)]
for questionnaire in questionnaires:
yield {
'name': questionnaire.get_name(),
'url': questionnaire.get_absolute_url(),
'compilers': ', '.join(
[compiler['name'] for compiler in questionnaire.compilers]
),
'country': ', '.join(questionnaire.get_countries()),
'status': questionnaire.get_status_display(),
'id': questionnaire.id,
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
questionnaires, paginator = get_paginator(
objects=self.get_queryset(),
page=self.request.GET.get('page', 1),
limit=self.get_paginate_by(None)
)
context['list_values'] = get_list_values(
questionnaire_objects=questionnaires, status_filter=Q()
)
context.update(**get_pagination_parameters(
self.request, paginator, questionnaires
))
return context
def logout(request):
"""
Log the user out. The actual logout is handled by the authentication
backend as specified in the settings (``settings.AUTH_LOGIN_FORM``).
Args:
``request`` (django.http.HttpRequest): The request object.
Returns:
``HttpResponse``. A rendered Http Response.
"""
django_logout(request)
return HttpResponseRedirect(reverse('home'))
def user_search(request):
"""
JSON view to search for users. The search is handled by the
authentication backend as specified in the settings
(``settings.AUTH_LOGIN_FORM``).
Args:
``request`` (django.http.HttpRequest): The request object with
optional GET parameter ``name``.
Returns:
``JsonResponse``. A rendered JSON response.
"""
search = remote_user_client.search_users(name=request.GET.get('name', ''))
if not search:
search = {
'success': False,
'message': 'Error: The user database cannot be queried right now.'
}
return JsonResponse(search)
@require_POST
@login_required
def user_update(request):
"""
JSON view to create or update a user. The user is queried through
the authentication backend as specified in the settings
(``settings.AUTH_LOGIN_FORM``).
Args:
``request`` (django.http.HttpRequest): The request object with
POST parameter ``uid`` (the user ID).
Returns:
``JsonResponse``. A rendered JSON response.
"""
ret = {
'success': False,
}
user_uid = request.POST.get('uid')
if user_uid is None:
ret['message'] = 'No user ID (uid) provided.'
return JsonResponse(ret)
# Try to find the user in the authentication DB
user_info = remote_user_client.get_user_information(user_uid)
if not user_info:
ret['message'] = 'No user with ID {} found in the authentication '
'database.'.format(user_uid)
return JsonResponse(ret)
# Update (or insert) the user details in the local database
try:
user, created = User.objects.get_or_create(pk=user_uid)
except IntegrityError:
ret['message'] = 'Duplicate email address "{}"'.format(
user_info.get('username'))
return JsonResponse(ret)
remote_user_client.update_user(user, user_info)
ret = {
'name': user.get_display_name(),
'success': True,
}
return JsonResponse(ret)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datetime
try:
import simplejson as json
except ImportError:
import json
from mock import Mock
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import assertRaisesRegex
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack_identity import AUTH_TOKEN_EXPIRES_GRACE_SECONDS
from libcloud.common.openstack_identity import get_class_for_auth_version
from libcloud.common.openstack_identity import OpenStackServiceCatalog
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection_OIDC_access_token
from libcloud.common.openstack_identity import OpenStackIdentityUser
from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection_VOMS
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.secrets import OPENSTACK_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.compute.test_openstack import OpenStackMockHttp
from libcloud.test.compute.test_openstack import OpenStack_2_0_MockHttp
class OpenStackIdentityConnectionTestCase(unittest.TestCase):
def setUp(self):
OpenStackBaseConnection.auth_url = None
OpenStackBaseConnection.conn_class = OpenStackMockHttp
def test_auth_url_is_correctly_assembled(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
APPEND = 0
NOTAPPEND = 1
auth_urls = [
('https://auth.api.example.com', APPEND, ''),
('https://auth.api.example.com/', NOTAPPEND, '/'),
('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'),
('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/')
]
actions = {
'1.0': '/v1.0',
'1.1': '/v1.1/auth',
'2.0': '/v2.0/tokens',
'2.0_apikey': '/v2.0/tokens',
'2.0_password': '/v2.0/tokens'
}
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
for (url, should_append_default_path, expected_path) in auth_urls:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class,
auth_url=url)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
try:
osa = osa.authenticate()
except Exception:
pass
if (should_append_default_path == APPEND):
expected_path = actions[auth_version]
self.assertEqual(osa.action, expected_path)
def test_basic_authentication(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url, user_id=user_id, key=key,
parent_conn=connection)
self.assertEqual(osa.urls, {})
self.assertIsNone(osa.auth_token)
self.assertIsNone(osa.auth_user_info)
osa = osa.authenticate()
self.assertTrue(len(osa.urls) >= 1)
self.assertTrue(osa.auth_token is not None)
if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_token_expires is not None)
if auth_version in ['2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_user_info is not None)
def test_token_expiration_and_force_reauthentication(self):
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
connection = self._get_mock_connection(OpenStack_2_0_MockHttp)
auth_url = connection.auth_url
yesterday = datetime.datetime.today() - datetime.timedelta(1)
tomorrow = datetime.datetime.today() + datetime.timedelta(1)
osa = OpenStackIdentity_2_0_Connection(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
mocked_auth_method = Mock(wraps=osa._authenticate_2_0_with_body)
osa._authenticate_2_0_with_body = mocked_auth_method
# Force re-auth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
count = 5
for i in range(0, count):
osa.authenticate(force=True)
self.assertEqual(mocked_auth_method.call_count, count)
# No force reauth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
if i == 0:
osa.auth_token_expires = tomorrow
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token which is about to expire in
# less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS
soon = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1)
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
if i == 0:
osa.auth_token_expires = soon
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
def _get_mock_connection(self, mock_http_class, auth_url=None):
OpenStackBaseConnection.conn_class = mock_http_class
if auth_url is None:
auth_url = "https://auth.api.example.com"
OpenStackBaseConnection.auth_url = auth_url
connection = OpenStackBaseConnection(*OPENSTACK_PARAMS)
connection._ex_force_base_url = "https://www.foo.com"
connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS)
return connection
class OpenStackIdentity_2_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_MockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test',
proxy_url='http://proxy:8080',
timeout=10)
self.auth_instance.auth_token = 'mock'
self.assertEqual(self.auth_instance.proxy_url, 'http://proxy:8080')
def test_list_projects(self):
result = self.auth_instance.list_projects()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'a')
self.assertEqual(result[0].name, 'test')
self.assertEqual(result[0].description, 'test project')
self.assertTrue(result[0].enabled)
class OpenStackIdentity_3_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test',
proxy_url='http://proxy:8080',
timeout=10)
self.auth_instance.auth_token = 'mock'
self.assertEqual(self.auth_instance.proxy_url, 'http://proxy:8080')
def test_token_scope_argument(self):
# Invalid token_scope value
expected_msg = 'Invalid value for "token_scope" argument: foo'
assertRaisesRegex(self, ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='foo')
# Missing tenant_name
expected_msg = 'Must provide tenant_name and domain_name argument'
assertRaisesRegex(self, ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='project')
# Missing domain_name
expected_msg = 'Must provide domain_name argument'
assertRaisesRegex(self, ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
domain_name=None)
# Scope to project all ok
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='project',
tenant_name='test',
domain_name='Default')
# Scope to domain
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
tenant_name=None,
domain_name='Default')
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test_user_id',
key='test_key',
token_scope='project',
tenant_name="test_tenant",
domain_name='test_domain',
proxy_url='http://proxy:8080',
timeout=10)
auth.authenticate()
self.assertEqual(auth.proxy_url, 'http://proxy:8080')
def test_list_supported_versions(self):
OpenStackIdentity_3_0_MockHttp.type = 'v3'
versions = self.auth_instance.list_supported_versions()
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].version, 'v2.0')
self.assertEqual(versions[0].url,
'http://192.168.18.100:5000/v2.0/')
self.assertEqual(versions[1].version, 'v3.0')
self.assertEqual(versions[1].url,
'http://192.168.18.100:5000/v3/')
def test_list_domains(self):
domains = self.auth_instance.list_domains()
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0].id, 'default')
self.assertEqual(domains[0].name, 'Default')
self.assertTrue(domains[0].enabled)
def test_list_projects(self):
projects = self.auth_instance.list_projects()
self.assertEqual(len(projects), 4)
self.assertEqual(projects[0].id, 'a')
self.assertEqual(projects[0].domain_id, 'default')
self.assertTrue(projects[0].enabled)
self.assertEqual(projects[0].description, 'Test project')
def test_list_users(self):
users = self.auth_instance.list_users()
self.assertEqual(len(users), 12)
self.assertEqual(users[0].id, 'a')
self.assertEqual(users[0].domain_id, 'default')
self.assertEqual(users[0].enabled, True)
self.assertEqual(users[0].email, 'openstack-test@localhost')
def test_list_roles(self):
roles = self.auth_instance.list_roles()
self.assertEqual(len(roles), 2)
self.assertEqual(roles[1].id, 'b')
self.assertEqual(roles[1].name, 'admin')
def test_list_user_projects(self):
user = self.auth_instance.list_users()[0]
projects = self.auth_instance.list_user_projects(user=user)
self.assertEqual(len(projects), 0)
def test_list_user_domain_roles(self):
user = self.auth_instance.list_users()[0]
domain = self.auth_instance.list_domains()[0]
roles = self.auth_instance.list_user_domain_roles(domain=domain,
user=user)
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0].name, 'admin')
def test_get_domain(self):
domain = self.auth_instance.get_domain(domain_id='default')
self.assertEqual(domain.name, 'Default')
def test_get_user(self):
user = self.auth_instance.get_user(user_id='a')
self.assertEqual(user.id, 'a')
self.assertEqual(user.domain_id, 'default')
self.assertEqual(user.enabled, True)
self.assertEqual(user.email, 'openstack-test@localhost')
def test_get_user_without_email(self):
user = self.auth_instance.get_user(user_id='b')
self.assertEqual(user.id, 'b')
self.assertEqual(user.name, 'userwithoutemail')
self.assertIsNone(user.email)
def test_get_user_without_enabled(self):
user = self.auth_instance.get_user(user_id='c')
self.assertEqual(user.id, 'c')
self.assertEqual(user.name, 'userwithoutenabled')
self.assertIsNone(user.enabled)
def test_create_user(self):
user = self.auth_instance.create_user(email='test2@localhost', password='test1',
name='test2', domain_id='default')
self.assertEqual(user.id, 'c')
self.assertEqual(user.name, 'test2')
def test_enable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.enable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_disable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.disable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_grant_domain_role_to_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_domain_role_to_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_domain_role_from_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_domain_role_from_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_grant_project_role_to_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_project_role_to_user(project=project,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_project_role_from_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_project_role_from_user(project=project,
role=role,
user=user)
self.assertTrue(result)
class OpenStackIdentity_3_0_Connection_OIDC_access_token_federation_projectsTests(
unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_federation_projects_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection_OIDC_access_token.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
tenant_name='oidc',
proxy_url='http://proxy:8080',
timeout=10)
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
token_scope='project',
tenant_name="oidc",
proxy_url='http://proxy:8080',
timeout=10)
auth.authenticate()
class OpenStackIdentity_3_0_Connection_OIDC_access_tokenTests(
unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection_OIDC_access_token.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
tenant_name='oidc',
domain_name='project_name2')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
token_scope='project',
tenant_name="oidc",
domain_name='project_name2')
auth.authenticate()
class OpenStackIdentity_2_0_Connection_VOMSTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_Connection_VOMSMockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection_VOMS.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
tenant_name='VO')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
token_scope='test',
tenant_name="VO")
auth.authenticate()
class OpenStackServiceCatalogTestCase(unittest.TestCase):
fixtures = ComputeFileFixtures('openstack')
def test_parsing_auth_v1_1(self):
data = self.fixtures.load('_v1_1__auth.json')
data = json.loads(data)
service_catalog = data['auth']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='1.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 3)
entry = [e for e in entries if e.service_type == 'cloudFilesCDN'][0]
self.assertEqual(entry.service_type, 'cloudFilesCDN')
self.assertIsNone(entry.service_name)
self.assertEqual(len(entry.endpoints), 2)
self.assertEqual(entry.endpoints[0].region, 'ORD')
self.assertEqual(entry.endpoints[0].url,
'https://cdn2.clouddrive.com/v1/MossoCloudFS')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'LON')
self.assertEqual(entry.endpoints[1].endpoint_type, 'external')
def test_parsing_auth_v2(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 9)
entry = [e for e in entries if e.service_name == 'cloudServers'][0]
self.assertEqual(entry.service_type, 'compute')
self.assertEqual(entry.service_name, 'cloudServers')
self.assertEqual(len(entry.endpoints), 1)
self.assertIsNone(entry.endpoints[0].region)
self.assertEqual(entry.endpoints[0].url,
'https://servers.api.rackspacecloud.com/v1.0/1337')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
def test_parsing_auth_v3(self):
data = self.fixtures.load('_v3__auth.json')
data = json.loads(data)
service_catalog = data['token']['catalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='3.x')
entries = catalog.get_entries()
self.assertEqual(len(entries), 6)
entry = [e for e in entries if e.service_type == 'volume'][0]
self.assertEqual(entry.service_type, 'volume')
self.assertIsNone(entry.service_name)
self.assertEqual(len(entry.endpoints), 3)
self.assertEqual(entry.endpoints[0].region, 'regionOne')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'regionOne')
self.assertEqual(entry.endpoints[1].endpoint_type, 'admin')
self.assertEqual(entry.endpoints[2].region, 'regionOne')
self.assertEqual(entry.endpoints[2].endpoint_type, 'internal')
def test_get_public_urls(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
public_urls = catalog.get_public_urls(service_type='object-store')
expected_urls = ['https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111',
'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111']
self.assertEqual(public_urls, expected_urls)
def test_get_regions(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
regions = catalog.get_regions(service_type='object-store')
self.assertEqual(regions, ['LON', 'ORD'])
regions = catalog.get_regions(service_type='invalid')
self.assertEqual(regions, [])
def test_get_service_types(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_types = catalog.get_service_types()
self.assertEqual(service_types, ['compute', 'image', 'network',
'object-store', 'rax:object-cdn',
'volumev2'])
service_types = catalog.get_service_types(region='ORD')
self.assertEqual(service_types, ['rax:object-cdn'])
def test_get_service_names(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_names = catalog.get_service_names()
self.assertEqual(service_names, ['cinderv2', 'cloudFiles',
'cloudFilesCDN', 'cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'glance',
'neutron',
'nova'])
service_names = catalog.get_service_names(service_type='compute')
self.assertEqual(service_names, ['cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'nova'])
class OpenStackIdentity_2_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v2_0_tenants.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_3_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v3')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v3(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_versions.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_domains.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_projects(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_auth_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.OK
data = json.loads(body)
if 'password' in data['auth']['identity']:
if data['auth']['identity']['password']['user']['domain']['name'] != 'test_domain' or \
data['auth']['scope']['project']['domain']['name'] != 'test_domain':
status = httplib.UNAUTHORIZED
body = ComputeFileFixtures('openstack').load('_v3__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users(self, method, url, body, headers):
if method == 'GET':
# list users
body = self.fixtures.load('v3_users.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == 'POST':
# create user
body = self.fixtures.load('v3_create_user.json')
return (httplib.CREATED, body, self.json_content_headers,
httplib.responses[httplib.CREATED])
raise NotImplementedError()
def _v3_users_a(self, method, url, body, headers):
if method == 'GET':
# look up a user
body = self.fixtures.load('v3_users_a.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
if method == 'PATCH':
# enable / disable user
body = self.fixtures.load('v3_users_a.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users_b(self, method, url, body, headers):
if method == 'GET':
# look up a user
body = self.fixtures.load('v3_users_b.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users_c(self, method, url, body, headers):
if method == 'GET':
# look up a user
body = self.fixtures.load('v3_users_c.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_roles(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_projects_a_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_domains_default(self, method, url, body, headers):
if method == 'GET':
# get domain
body = self.fixtures.load('v3_domains_default.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users_a_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = self.fixtures.load('v3_users_a_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles(self, method, url, body, headers):
if method == 'GET':
# get user domain roles
body = self.fixtures.load('v3_domains_default_users_a_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_identity_providers_idp_protocols_oidc_auth(self, method, url, body, headers):
if method == 'GET':
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"projects": [{"id": "project_id", "name": "project_name"},
{"id": "project_id2", "name": "project_name2"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_auth_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"projects": [{"id": "project_id", "name": "project_name"},
{"id": "project_id2", "name": "project_name2"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_3_0_federation_projects_MockHttp(OpenStackIdentity_3_0_MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v3')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v3_OS_FEDERATION_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"projects": [{"id": "project_id"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_auth_projects(self, method, url, body, headers):
return (httplib.INTERNAL_SERVER_ERROR, body, self.json_content_headers,
httplib.responses[httplib.INTERNAL_SERVER_ERROR])
class OpenStackIdentity_2_0_Connection_VOMSMockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.UNAUTHORIZED
data = json.loads(body)
if 'voms' in data['auth'] and data['auth']['voms'] is True:
status = httplib.OK
body = ComputeFileFixtures('openstack').load('_v2_0__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"tenant": [{"name": "tenant_name"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
"""
Title: OCR model for reading Captchas
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/06/14
Last modified: 2020/06/26
Description: How to implement an OCR model using CNNs, RNNs and CTC loss.
"""
"""
## Introduction
This example demonstrates a simple OCR model built with the Functional API. Apart from
combining CNN and RNN, it also illustrates how you can instantiate a new layer
and use it as an "Endpoint layer" for implementing CTC loss. For a detailed
guide to layer subclassing, please check out
[this page](https://keras.io/guides/making_new_layers_and_models_via_subclassing/)
in the developer guides.
"""
"""
## Setup
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Load the data: [Captcha Images](https://www.kaggle.com/fournierp/captcha-version-2-images)
Let's download the data.
"""
"""shell
curl -LO https://github.com/AakashKumarNain/CaptchaCracker/raw/master/captcha_images_v2.zip
unzip -qq captcha_images_v2.zip
"""
"""
The dataset contains 1040 captcha files as `png` images. The label for each sample is a string,
the name of the file (minus the file extension).
We will map each character in the string to an integer for training the model. Similary,
we will need to map the predictions of the model back to strings. For this purpose
we will maintain two dictionaries, mapping characters to integers, and integers to characters,
respectively.
"""
# Path to the data directory
data_dir = Path("./captcha_images_v2/")
# Get list of all the images
images = sorted(list(map(str, list(data_dir.glob("*.png")))))
labels = [img.split(os.path.sep)[-1].split(".png")[0] for img in images]
characters = set(char for label in labels for char in label)
print("Number of images found: ", len(images))
print("Number of labels found: ", len(labels))
print("Number of unique characters: ", len(characters))
print("Characters present: ", characters)
# Batch size for training and validation
batch_size = 16
# Desired image dimensions
img_width = 200
img_height = 50
# Factor by which the image is going to be downsampled
# by the convolutional blocks. We will be using two
# convolution blocks and each block will have
# a pooling layer which downsample the features by a factor of 2.
# Hence total downsampling factor would be 4.
downsample_factor = 4
# Maximum length of any captcha in the dataset
max_length = max([len(label) for label in labels])
"""
## Preprocessing
"""
# Mapping characters to integers
char_to_num = layers.StringLookup(vocabulary=list(characters), mask_token=None)
# Mapping integers back to original characters
num_to_char = layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)
def split_data(images, labels, train_size=0.9, shuffle=True):
# 1. Get the total size of the dataset
size = len(images)
# 2. Make an indices array and shuffle it, if required
indices = np.arange(size)
if shuffle:
np.random.shuffle(indices)
# 3. Get the size of training samples
train_samples = int(size * train_size)
# 4. Split data into training and validation sets
x_train, y_train = images[indices[:train_samples]], labels[indices[:train_samples]]
x_valid, y_valid = images[indices[train_samples:]], labels[indices[train_samples:]]
return x_train, x_valid, y_train, y_valid
# Splitting data into training and validation sets
x_train, x_valid, y_train, y_valid = split_data(np.array(images), np.array(labels))
def encode_single_sample(img_path, label):
# 1. Read image
img = tf.io.read_file(img_path)
# 2. Decode and convert to grayscale
img = tf.io.decode_png(img, channels=1)
# 3. Convert to float32 in [0, 1] range
img = tf.image.convert_image_dtype(img, tf.float32)
# 4. Resize to the desired size
img = tf.image.resize(img, [img_height, img_width])
# 5. Transpose the image because we want the time
# dimension to correspond to the width of the image.
img = tf.transpose(img, perm=[1, 0, 2])
# 6. Map the characters in label to numbers
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
# 7. Return a dict as our model is expecting two inputs
return {"image": img, "label": label}
"""
## Create `Dataset` objects
"""
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
validation_dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
validation_dataset = (
validation_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
"""
## Visualize the data
"""
_, ax = plt.subplots(4, 4, figsize=(10, 5))
for batch in train_dataset.take(1):
images = batch["image"]
labels = batch["label"]
for i in range(16):
img = (images[i] * 255).numpy().astype("uint8")
label = tf.strings.reduce_join(num_to_char(labels[i])).numpy().decode("utf-8")
ax[i // 4, i % 4].imshow(img[:, :, 0].T, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()
"""
## Model
"""
class CTCLayer(layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.backend.ctc_batch_cost
def call(self, y_true, y_pred):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# At test time, just return the computed predictions
return y_pred
def build_model():
# Inputs to the model
input_img = layers.Input(
shape=(img_width, img_height, 1), name="image", dtype="float32"
)
labels = layers.Input(name="label", shape=(None,), dtype="float32")
# First conv block
x = layers.Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv1",
)(input_img)
x = layers.MaxPooling2D((2, 2), name="pool1")(x)
# Second conv block
x = layers.Conv2D(
64,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv2",
)(x)
x = layers.MaxPooling2D((2, 2), name="pool2")(x)
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 4x smaller. The number of
# filters in the last layer is 64. Reshape accordingly before
# passing the output to the RNN part of the model
new_shape = ((img_width // 4), (img_height // 4) * 64)
x = layers.Reshape(target_shape=new_shape, name="reshape")(x)
x = layers.Dense(64, activation="relu", name="dense1")(x)
x = layers.Dropout(0.2)(x)
# RNNs
x = layers.Bidirectional(layers.LSTM(128, return_sequences=True, dropout=0.25))(x)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, dropout=0.25))(x)
# Output layer
x = layers.Dense(
len(char_to_num.get_vocabulary()) + 1, activation="softmax", name="dense2"
)(x)
# Add CTC layer for calculating CTC loss at each step
output = CTCLayer(name="ctc_loss")(labels, x)
# Define the model
model = keras.models.Model(
inputs=[input_img, labels], outputs=output, name="ocr_model_v1"
)
# Optimizer
opt = keras.optimizers.Adam()
# Compile the model and return
model.compile(optimizer=opt)
return model
# Get the model
model = build_model()
model.summary()
"""
## Training
"""
epochs = 100
early_stopping_patience = 10
# Add early stopping
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=early_stopping_patience, restore_best_weights=True
)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[early_stopping],
)
"""
## Inference
"""
# Get the prediction model by extracting layers till the output layer
prediction_model = keras.models.Model(
model.get_layer(name="image").input, model.get_layer(name="dense2").output
)
prediction_model.summary()
# A utility function to decode the output of the network
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][
:, :max_length
]
# Iterate over the results and get back the text
output_text = []
for res in results:
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8")
output_text.append(res)
return output_text
# Let's check results on some validation samples
for batch in validation_dataset.take(1):
batch_images = batch["image"]
batch_labels = batch["label"]
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
orig_texts = []
for label in batch_labels:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
orig_texts.append(label)
_, ax = plt.subplots(4, 4, figsize=(15, 5))
for i in range(len(pred_texts)):
img = (batch_images[i, :, :, 0] * 255).numpy().astype(np.uint8)
img = img.T
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server."""
import cgi
import errno
import logging
import os
import socket
import time
import urlparse
from tornado import httputil
from tornado import ioloop
from tornado import iostream
from tornado import stack_context
try:
import fcntl
except ImportError:
if os.name == 'nt':
from tornado import win32_support as fcntl
else:
raise
try:
import ssl # Python 2.6+
except ImportError:
ssl = None
try:
import multiprocessing # Python 2.6+
except ImportError:
multiprocessing = None
def _cpu_count():
if multiprocessing is not None:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
pass
logging.error("Could not detect number of processors; "
"running with one process")
return 1
class HTTPServer(object):
"""A non-blocking, single-threaded HTTP server.
A server is defined by a request callback that takes an HTTPRequest
instance as an argument and writes a valid HTTP response with
request.write(). request.finish() finishes the request (but does not
necessarily close the connection in the case of HTTP/1.1 keep-alive
requests). A simple example server that echoes back the URI you
requested:
import httpserver
import ioloop
def handle_request(request):
message = "You requested %s\n" % request.uri
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
len(message), message))
request.finish()
http_server = httpserver.HTTPServer(handle_request)
http_server.listen(8888)
ioloop.IOLoop.instance().start()
HTTPServer is a very basic connection handler. Beyond parsing the
HTTP request body and headers, the only HTTP semantics implemented
in HTTPServer is HTTP/1.1 keep-alive connections. We do not, however,
implement chunked encoding, so the request callback must provide a
Content-Length header or implement chunked encoding for HTTP/1.1
requests for the server to run correctly for HTTP/1.1 clients. If
the request handler is unable to do this, you can provide the
no_keep_alive argument to the HTTPServer constructor, which will
ensure the connection is closed on every request no matter what HTTP
version the client is using.
If xheaders is True, we support the X-Real-Ip and X-Scheme headers,
which override the remote IP and HTTP scheme for all requests. These
headers are useful when running Tornado behind a reverse proxy or
load balancer.
HTTPServer can serve HTTPS (SSL) traffic with Python 2.6+ and OpenSSL.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the ssl.wrap_socket() method,
including "certfile" and "keyfile":
HTTPServer(applicaton, ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
By default, listen() runs in a single thread in a single process. You
can utilize all available CPUs on this machine by calling bind() and
start() instead of listen():
http_server = httpserver.HTTPServer(handle_request)
http_server.bind(8888)
http_server.start(0) # Forks multiple sub-processes
ioloop.IOLoop.instance().start()
start(0) detects the number of CPUs on this machine and "pre-forks" that
number of child processes so that we have one Tornado process per CPU,
all with their own IOLoop. You can also pass in the specific number of
child processes you want to run with if you want to override this
auto-detection.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None):
"""Initializes the server with the given request callback.
If you use pre-forking/start() instead of the listen() method to
start your server, you should not pass an IOLoop instance to this
constructor. Each pre-forked child process will create its own
IOLoop instance after the forking process.
"""
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.io_loop = io_loop
self.xheaders = xheaders
self.ssl_options = ssl_options
self._socket = None
self._started = False
def listen(self, port, address=""):
"""Binds to the given port and starts the server in a single process.
This method is a shortcut for:
server.bind(port, address)
server.start(1)
"""
self.bind(port, address)
self.start(1)
def bind(self, port, address=""):
"""Binds this server to the given port on the given IP address.
To start the server, call start(). If you want to run this server
in a single process, you can call listen() as a shortcut to the
sequence of bind() and start() calls.
"""
assert not self._socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(self._socket.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self._socket.fileno(), fcntl.F_SETFD, flags)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.setblocking(0)
self._socket.bind((address, port))
self._socket.listen(128)
def start(self, num_processes=1):
"""Starts this server in the IOLoop.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the debug=True option to tornado.web.Application).
When using multiple processes, no IOLoops can be created or
referenced until after the call to HTTPServer.start(n).
"""
assert not self._started
self._started = True
if num_processes is None or num_processes <= 0:
num_processes = _cpu_count()
if num_processes > 1 and ioloop.IOLoop.initialized():
logging.error("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start()")
num_processes = 1
if num_processes > 1:
logging.info("Pre-forking %d server processes", num_processes)
for i in range(num_processes):
if os.fork() == 0:
import random
from binascii import hexlify
try:
# If available, use the same method as
# random.py
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
# Include the pid to avoid initializing two
# processes to the same value
seed(int(time.time() * 1000) ^ os.getpid())
random.seed(seed)
self.io_loop = ioloop.IOLoop.instance()
self.io_loop.add_handler(
self._socket.fileno(), self._handle_events,
ioloop.IOLoop.READ)
return
os.waitpid(-1, 0)
else:
if not self.io_loop:
self.io_loop = ioloop.IOLoop.instance()
self.io_loop.add_handler(self._socket.fileno(),
self._handle_events,
ioloop.IOLoop.READ)
def stop(self):
self.io_loop.remove_handler(self._socket.fileno())
self._socket.close()
def _handle_events(self, fd, events):
while True:
try:
connection, address = self._socket.accept()
except socket.error, e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl.wrap_socket(connection,
server_side=True,
do_handshake_on_connect=False,
**self.ssl_options)
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = iostream.SSLIOStream(connection, io_loop=self.io_loop)
else:
stream = iostream.IOStream(connection, io_loop=self.io_loop)
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders)
except:
logging.error("Error in connection callback", exc_info=True)
class _BadRequestException(Exception):
"""Exception class for malformed HTTP requests."""
pass
class HTTPConnection(object):
"""Handles a connection to an HTTP client, executing HTTP requests.
We parse HTTP headers and bodies, and execute the request callback
until the HTTP conection is closed.
"""
def __init__(self, stream, address, request_callback, no_keep_alive=False,
xheaders=False):
self.stream = stream
self.address = address
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._request = None
self._request_finished = False
# Save stack context here, outside of any request. This keeps
# contexts from one request from leaking into the next.
self._header_callback = stack_context.wrap(self._on_headers)
self.stream.read_until("\r\n\r\n", self._header_callback)
def write(self, chunk):
assert self._request, "Request closed"
if not self.stream.closed():
self.stream.write(chunk, self._on_write_complete)
def finish(self):
assert self._request, "Request closed"
self._request_finished = True
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._request_finished:
self._finish_request()
def _finish_request(self):
if self.no_keep_alive:
disconnect = True
else:
connection_header = self._request.headers.get("Connection")
if self._request.supports_http_1_1():
disconnect = connection_header == "close"
elif ("Content-Length" in self._request.headers
or self._request.method in ("HEAD", "GET")):
disconnect = connection_header != "Keep-Alive"
else:
disconnect = True
self._request = None
self._request_finished = False
if disconnect:
self.stream.close()
return
self.stream.read_until("\r\n\r\n", self._header_callback)
def _on_headers(self, data):
try:
eol = data.find("\r\n")
start_line = data[:eol]
try:
method, uri, version = start_line.split(" ")
except ValueError:
raise _BadRequestException("Malformed HTTP request line")
if not version.startswith("HTTP/"):
raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
headers = httputil.HTTPHeaders.parse(data[eol:])
self._request = HTTPRequest(
connection=self, method=method, uri=uri, version=version,
headers=headers, remote_ip=self.address[0])
content_length = headers.get("Content-Length")
if content_length:
content_length = int(content_length)
if content_length > self.stream.max_buffer_size:
raise _BadRequestException("Content-Length too long")
if headers.get("Expect") == "100-continue":
self.stream.write("HTTP/1.1 100 (Continue)\r\n\r\n")
self.stream.read_bytes(content_length, self._on_request_body)
return
self.request_callback(self._request)
except _BadRequestException, e:
logging.info("Malformed HTTP request from %s: %s",
self.address[0], e)
self.stream.close()
return
def _on_request_body(self, data):
self._request.body = data
content_type = self._request.headers.get("Content-Type", "")
if self._request.method in ("POST", "PUT"):
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = cgi.parse_qs(self._request.body)
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self._request.arguments.setdefault(name, []).extend(
values)
elif content_type.startswith("multipart/form-data"):
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
self._parse_mime_body(v, data)
break
else:
logging.warning("Invalid multipart/form-data")
self.request_callback(self._request)
def _parse_mime_body(self, boundary, data):
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
if data.endswith("\r\n"):
footer_length = len(boundary) + 6
else:
footer_length = len(boundary) + 4
parts = data[:-footer_length].split("--" + boundary + "\r\n")
for part in parts:
if not part: continue
eoh = part.find("\r\n\r\n")
if eoh == -1:
logging.warning("multipart/form-data missing headers")
continue
headers = httputil.HTTPHeaders.parse(part[:eoh])
name_header = headers.get("Content-Disposition", "")
if not name_header.startswith("form-data;") or \
not part.endswith("\r\n"):
logging.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
name_values = {}
for name_part in name_header[10:].split(";"):
name, name_value = name_part.strip().split("=", 1)
name_values[name] = name_value.strip('"').decode("utf-8")
if not name_values.get("name"):
logging.warning("multipart/form-data value missing name")
continue
name = name_values["name"]
if name_values.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
self._request.files.setdefault(name, []).append(dict(
filename=name_values["filename"], body=value,
content_type=ctype))
else:
self._request.arguments.setdefault(name, []).append(value)
class HTTPRequest(object):
"""A single HTTP request.
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names and values are both unicode always.
File uploads are available in the files property, which maps file
names to list of files. Each file is a dictionary of the form
{"filename":..., "content_type":..., "body":...}. The content_type
comes from the provided HTTP header and should not be trusted
outright given that it can be easily forged.
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
"""
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
body=None, remote_ip=None, protocol=None, host=None,
files=None, connection=None):
self.method = method
self.uri = uri
self.version = version
self.headers = headers or httputil.HTTPHeaders()
self.body = body or ""
if connection and connection.xheaders:
# Squid uses X-Forwarded-For, others use X-Real-Ip
self.remote_ip = self.headers.get(
"X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip))
# AWS uses X-Forwarded-Proto
self.protocol = self.headers.get(
"X-Scheme", self.headers.get("X-Forwarded-Proto", protocol))
if self.protocol not in ("http", "https"):
self.protocol = "http"
else:
self.remote_ip = remote_ip
if protocol:
self.protocol = protocol
elif connection and isinstance(connection.stream,
iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
self.path = path
self.query = query
arguments = cgi.parse_qs(query)
self.arguments = {}
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values: self.arguments[name] = values
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
def write(self, chunk):
"""Writes the given chunk to the response stream."""
assert isinstance(chunk, str)
self.connection.write(chunk)
def finish(self):
"""Finishes this HTTP request on the open connection."""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer must have been constructed
with cert_reqs set in ssl_options, e.g.:
server = HTTPServer(app,
ssl_options=dict(
certfile="foo.crt",
keyfile="foo.key",
cert_reqs=ssl.CERT_REQUIRED,
ca_certs="cacert.crt"))
The return value is a dictionary, see SSLSocket.getpeercert() in
the standard library for more details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert()
except ssl.SSLError:
return None
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip",
"body")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
|
|
#! /usr/bin/env python
#
# video_play.py -- video playback example with Ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
This example shows how Ginga has a fast enough refresh rate to play smooth
video. I was able to play HD video at 30 fps on 2010 era computer with no
skips.
Caveats:
1. There is no sound. This is due to the lack of a decent python module
that can read video files and provide _both_ audio and video streams.
2. Currently, it expects an OpenCV readable file as a command line parameter.
Only formats supported by OpenCV can be used (typically JPEG encoded).
Requirements:
To run this example you will need the OpenCV bindings for Python installed.
This module lets us access the video stream of a video file frame-by-frame.
Usage:
$ example1_video.py [log options] <video file>
"""
from __future__ import print_function
import sys, os
import time
import logging, logging.handlers
import threading
import numpy
import ginga.util.six as six
if six.PY2:
import Queue
else:
import queue as Queue
import ginga.toolkit as ginga_toolkit
from ginga import AstroImage
from ginga import RGBImage
from ginga import AutoCuts, RGBMap
from ginga.misc import log, Task
try:
import cv, cv2
except ImportError:
print("You need to install the OpenCV python module to run this example")
sys.exit(1)
from ginga import trcalc
trcalc.use('opencv')
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class GingaVision(object):
def __init__(self, logger, ev_quit, options):
super(GingaVision, self).__init__()
self.logger = logger
self.ev_quit = ev_quit
from ginga.gw import Widgets, Viewers, GwHelp, GwMain
self.card = 'default'
# playback rate; changed when we know the actual rate
self.fps = 30
self.playback_rate = 1.0 / self.fps
# Use an AstroImage, not RGBImage for now because we get a
# different default (faster) scaling algorithm
self.pimage = AstroImage.AstroImage()
self.pdata = None
self.app = Widgets.Application(logger=logger)
self.app.add_callback('shutdown', self.quit)
self.top = self.app.make_window("Ginga example2")
self.top.add_callback('close', lambda *args: self.quit())
thread_pool = Task.ThreadPool(2, logger, ev_quit=ev_quit)
thread_pool.startall()
self.main = GwMain.GwMain(logger=logger, ev_quit=ev_quit,
app=self.app, thread_pool=thread_pool)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
fi = Viewers.CanvasView(logger=logger)
fi.enable_autocuts('off')
fi.set_autocut_params('histogram')
fi.enable_autozoom('off')
fi.cut_levels(0, 255)
fi.defer_redraw = False
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
# these options are needed for correct panning with this type of image
fi._invertY = False
fi._originUpper = False
# <-- Some optimizations to smooth playback at decent FPS
fi.set_redraw_lag(self.playback_rate)
#fi.set_redraw_lag(0.0)
# PassThruRGBMapper is the most efficient mapper
#rgbmap = RGBMap.NonColorMapper(self.logger)
rgbmap = RGBMap.PassThruRGBMapper(self.logger)
fi.set_rgbmap(rgbmap)
# Clip cuts assumes data does not need to be scaled in cut levels--
# only clipped
fi.set_autocuts(AutoCuts.Clip(logger=self.logger))
# <-- end optimizations
bd = fi.get_bindings()
bd.enable_all(True)
fi.set_desired_size(512, 512)
iw = Viewers.GingaViewerWidget(viewer=fi)
vbox.add_widget(iw, stretch=1)
hbox = Widgets.HBox()
hbox.set_margins(4, 2, 4, 2)
wopen = Widgets.Button("Open File")
#wopen.clicked.connect(self.open_file)
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', lambda *args: self.quit())
for w in (wopen, wquit):
hbox.add_widget(w, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
self.top.set_title("Video Example Viewer")
def quit(self):
self.logger.info("quit called")
self.top.delete()
self.ev_quit.set()
def set_playback_rate(self, fps):
self.fps = fps
self.playback_rate = 1.0 / self.fps
self.fitsimage.set_redraw_lag(self.playback_rate)
def show_frame(self, img):
self.logger.debug("updating image")
try:
if (self.pdata is None) or (img.shape != self.pdata.shape):
self.pdata = numpy.copy(img)
self.pimage.set_data(self.pdata)
self.main.gui_call(self.fitsimage.set_image, self.pimage)
else:
#self.pimage.set_data(img)
self.pdata[::] = img[::]
self.main.gui_call(self.fitsimage.redraw)
except Exception as e:
self.logger.error("Error unpacking packet: %s" % (
str(e)))
def capture_video(self, device):
self.logger.info("capture video loop starting...")
cap = cv2.VideoCapture(device)
# Get width and height of frames and resize window
width = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
height = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
self.logger.info("Video is %dx%d resolution" % (width, height))
bd = 50
self.main.gui_do(self.top.resize, width+bd, height+bd)
# Get the frame rate
fps = cap.get(cv.CV_CAP_PROP_FPS)
if fps is not None:
if not numpy.isnan(fps) and float(fps) >= 1.0:
self.logger.info("Video rate is %d fps" % (fps))
self.set_playback_rate(fps)
# Get the frame count
num_frames = cap.get(cv.CV_CAP_PROP_FRAME_COUNT)
self.logger.info("There are %d frames" % (num_frames))
# video frames seem to be returned with blue channel in LSByte
self.pimage.set_order('BGR')
frame = 0
while not self.ev_quit.isSet():
start_time = time.time()
self.logger.debug("capture frame")
frame += 1
f, img = cap.read()
self.logger.debug("frame %d: capture time: %.4f" % (
frame, time.time() - start_time))
split_time = time.time()
if img is not None:
self.show_frame(img)
end_time = time.time()
self.logger.debug("redraw time %.4f sec" % (end_time-split_time))
elapsed_time = end_time - start_time
sleep_time = self.playback_rate - elapsed_time
if sleep_time < 0:
self.logger.warning("underrun %.4f sec" % (-sleep_time))
else:
sleep_time = max(sleep_time, 0.0)
self.logger.debug("sleeping for %.4f sec" % (sleep_time))
time.sleep(sleep_time)
#cv2.waitKey(1)
self.logger.info("capture video loop terminating...")
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
# event for synchronizing exit of all threads
ev_quit = threading.Event()
gv = GingaVision(logger, ev_quit, options)
gv.top.resize(670, 540)
gv.top.show()
gv.top.raise_()
# start video capture thread
if len(args) > 0:
filename = args[0]
else:
# default video input device
filename = 0
gv.main.nongui_do(gv.capture_video, filename)
gv.main.mainloop()
logger.info("program terminating...")
sys.exit(0)
if __name__ == '__main__':
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--port", dest="port", metavar="NUM",
type='int', default=23099,
help="Port to use for receiving data")
optprs.add_option("--other", dest="other", metavar="HOST",
help="Host to communicate with")
optprs.add_option("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(optprs)
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
#END
|
|
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import re
SEMVER_SPEC_VERSION = '2.0.0'
class _R(object):
def __init__(self, i):
self.i = i
def __call__(self):
v = self.i
self.i += 1
return v
def value(self):
return self.i
class Extendlist(list):
def __setitem__(self, i, v):
try:
list.__setitem__(self, i, v)
except IndexError:
if len(self) == i:
self.append(v)
else:
raise
def list_get(xs, i):
try:
return xs[i]
except IndexError:
return None
R = _R(0)
src = Extendlist()
regexp = {}
# The following Regular Expressions can be used for tokenizing,
# validating, and parsing SemVer version strings.
# ## Numeric Identifier
# A single `0`, or a non-zero digit followed by zero or more digits.
NUMERICIDENTIFIER = R()
src[NUMERICIDENTIFIER] = '0|[1-9]\\d*'
NUMERICIDENTIFIERLOOSE = R()
src[NUMERICIDENTIFIERLOOSE] = '[0-9]+'
# ## Non-numeric Identifier
# Zero or more digits, followed by a letter or hyphen, and then zero or
# more letters, digits, or hyphens.
NONNUMERICIDENTIFIER = R()
src[NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'
# ## Main Version
# Three dot-separated numeric identifiers.
MAINVERSION = R()
src[MAINVERSION] = ('(' + src[NUMERICIDENTIFIER] + ')\\.' +
'(' + src[NUMERICIDENTIFIER] + ')\\.' +
'(' + src[NUMERICIDENTIFIER] + ')')
MAINVERSIONLOOSE = R()
src[MAINVERSIONLOOSE] = ('(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[NUMERICIDENTIFIERLOOSE] + ')')
# ## Pre-release Version Identifier
# A numeric identifier, or a non-numeric identifier.
PRERELEASEIDENTIFIER = R()
src[PRERELEASEIDENTIFIER] = ('(?:' + src[NUMERICIDENTIFIER] +
'|' + src[NONNUMERICIDENTIFIER] + ')')
PRERELEASEIDENTIFIERLOOSE = R()
src[PRERELEASEIDENTIFIERLOOSE] = ('(?:' + src[NUMERICIDENTIFIERLOOSE] +
'|' + src[NONNUMERICIDENTIFIER] + ')')
# ## Pre-release Version
# Hyphen, followed by one or more dot-separated pre-release version
# identifiers.
PRERELEASE = R()
src[PRERELEASE] = ('(?:-(' + src[PRERELEASEIDENTIFIER] +
'(?:\\.' + src[PRERELEASEIDENTIFIER] + ')*))')
PRERELEASELOOSE = R()
src[PRERELEASELOOSE] = ('(?:-?(' + src[PRERELEASEIDENTIFIERLOOSE] +
'(?:\\.' + src[PRERELEASEIDENTIFIERLOOSE] + ')*))')
# ## Build Metadata Identifier
# Any combination of digits, letters, or hyphens.
BUILDIDENTIFIER = R()
src[BUILDIDENTIFIER] = '[0-9A-Za-z-]+'
# ## Build Metadata
# Plus sign, followed by one or more period-separated build metadata
# identifiers.
BUILD = R()
src[BUILD] = ('(?:\\+(' + src[BUILDIDENTIFIER] +
'(?:\\.' + src[BUILDIDENTIFIER] + ')*))')
# ## Full Version String
# A main version, followed optionally by a pre-release version and
# build metadata.
# Note that the only major, minor, patch, and pre-release sections of
# the version string are capturing groups. The build metadata is not a
# capturing group, because it should not ever be used in version
# comparison.
FULL = R()
FULLPLAIN = ('v?' + src[MAINVERSION] + src[PRERELEASE] + '?' + src[BUILD] + '?')
src[FULL] = '^' + FULLPLAIN + '$'
# like full, but allows v1.2.3 and =1.2.3, which people do sometimes.
# also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty
# common in the npm registry.
LOOSEPLAIN = ('[v=\\s]*' + src[MAINVERSIONLOOSE] +
src[PRERELEASELOOSE] + '?' +
src[BUILD] + '?')
LOOSE = R()
src[LOOSE] = '^' + LOOSEPLAIN + '$'
GTLT = R()
src[GTLT] = '((?:<|>)?=?)'
# Something like "2.*" or "1.2.x".
# Note that "x.x" is a valid xRange identifer, meaning "any version"
# Only the first item is strictly required.
XRANGEIDENTIFIERLOOSE = R()
src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'
XRANGEIDENTIFIER = R()
src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + '|x|X|\\*'
XRANGEPLAIN = R()
src[XRANGEPLAIN] = ('[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIER] + ')' +
'(?:(' + src[PRERELEASE] + ')' +
')?)?)?')
XRANGEPLAINLOOSE = R()
src[XRANGEPLAINLOOSE] = ('[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:(' + src[PRERELEASELOOSE] + ')' +
')?)?)?')
# >=2.x, for example, means >=2.0.0-0
# <1.x would be the same as "<1.0.0-0", though.
XRANGE = R()
src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'
XRANGELOOSE = R()
src[XRANGELOOSE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAINLOOSE] + '$'
# Tilde ranges.
# Meaning is "reasonably at or greater than"
LONETILDE = R()
src[LONETILDE] = '(?:~>?)'
TILDETRIM = R()
src[TILDETRIM] = '(\\s*)' + src[LONETILDE] + '\\s+'
regexp[TILDETRIM] = re.compile(src[TILDETRIM], re.M)
tildeTrimReplace = r'\1~'
TILDE = R()
src[TILDE] = '^' + src[LONETILDE] + src[XRANGEPLAIN] + '$'
TILDELOOSE = R()
src[TILDELOOSE] = ('^' + src[LONETILDE] + src[XRANGEPLAINLOOSE] + '$')
# Caret ranges.
# Meaning is "at least and backwards compatible with"
LONECARET = R()
src[LONECARET] = '(?:\\^)'
CARETTRIM = R()
src[CARETTRIM] = '(\\s*)' + src[LONECARET] + '\\s+'
regexp[CARETTRIM] = re.compile(src[CARETTRIM], re.M)
caretTrimReplace = r'\1^'
CARET = R()
src[CARET] = '^' + src[LONECARET] + src[XRANGEPLAIN] + '$'
CARETLOOSE = R()
src[CARETLOOSE] = '^' + src[LONECARET] + src[XRANGEPLAINLOOSE] + '$'
# A simple gt/lt/eq thing, or just "" to indicate "any version"
COMPARATORLOOSE = R()
src[COMPARATORLOOSE] = '^' + src[GTLT] + '\\s*(' + LOOSEPLAIN + ')$|^$'
COMPARATOR = R()
src[COMPARATOR] = '^' + src[GTLT] + '\\s*(' + FULLPLAIN + ')$|^$'
# An expression to strip any whitespace between the gtlt and the thing
# it modifies, so that `> 1.2.3` ==> `>1.2.3`
COMPARATORTRIM = R()
src[COMPARATORTRIM] = ('(\\s*)' + src[GTLT] +
'\\s*(' + LOOSEPLAIN + '|' + src[XRANGEPLAIN] + ')')
# this one has to use the /g flag
regexp[COMPARATORTRIM] = re.compile(src[COMPARATORTRIM], re.M)
comparatorTrimReplace = r'\1\2\3'
# Something like `1.2.3 - 1.2.4`
# Note that these all use the loose form, because they'll be
# checked against either the strict or loose comparator form
# later.
HYPHENRANGE = R()
src[HYPHENRANGE] = ('^\\s*(' + src[XRANGEPLAIN] + ')' +
'\\s+-\\s+' +
'(' + src[XRANGEPLAIN] + ')' +
'\\s*$')
HYPHENRANGELOOSE = R()
src[HYPHENRANGELOOSE] = ('^\\s*(' + src[XRANGEPLAINLOOSE] + ')' +
'\\s+-\\s+' +
'(' + src[XRANGEPLAINLOOSE] + ')' +
'\\s*$')
# Star ranges basically just allow anything at all.
STAR = R()
src[STAR] = '(<|>)?=?\\s*\\*'
# version name recovery for convinient
RECOVERYVERSIONNAME = R()
src[RECOVERYVERSIONNAME] = ('v?({n})(?:\\.({n}))?{pre}?'.format(n=src[NUMERICIDENTIFIER], pre=src[PRERELEASELOOSE]))
# Compile to actual regexp objects.
# All are flag-free, unless they were created above with a flag.
for i in range(R.value()):
logger.debug("genregxp %s %s", i, src[i])
if i not in regexp:
regexp[i] = re.compile(src[i])
def parse(version, loose):
if loose:
r = regexp[LOOSE]
else:
r = regexp[FULL]
m = r.search(version)
if m:
return semver(version, loose)
else:
return None
def valid(version, loose):
v = parse(version, loose)
if v.version:
return v
else:
return None
def clean(version, loose):
s = parse(version, loose)
if s:
return s.version
else:
return None
NUMERIC = re.compile("^\d+$")
def semver(version, loose):
if isinstance(version, SemVer):
if version.loose == loose:
return version
else:
version = version.version
elif not isinstance(version, str): # xxx:
raise ValueError("Invalid Version: {}".format(version))
"""
if (!(this instanceof SemVer))
return new SemVer(version, loose);
"""
return SemVer(version, loose)
make_semver = semver
class SemVer(object):
def __init__(self, version, loose):
logger.debug("SemVer %s, %s", version, loose)
self.loose = loose
self.raw = version
m = regexp[LOOSE if loose else FULL].search(version.strip())
if not m:
if not loose:
raise ValueError("Invalid Version: {}".format(version))
m = regexp[RECOVERYVERSIONNAME].search(version.strip())
self.major = int(m.group(1)) if m.group(1) else 0
self.minor = int(m.group(2)) if m.group(2) else 0
self.patch = 0
if not m.group(3):
self.prerelease = []
else:
self.prerelease = [(int(id) if NUMERIC.search(id) else id)
for id in m.group(3).split(".")]
else:
# these are actually numbers
self.major = int(m.group(1))
self.minor = int(m.group(2))
self.patch = int(m.group(3))
# numberify any prerelease numeric ids
if not m.group(4):
self.prerelease = []
else:
self.prerelease = [(int(id) if NUMERIC.search(id) else id)
for id in m.group(4).split(".")]
if m.group(5):
self.build = m.group(5).split(".")
else:
self.build = []
self.format() # xxx:
def format(self):
self.version = "{}.{}.{}".format(self.major, self.minor, self.patch)
if len(self.prerelease) > 0:
self.version += ("-{}".format(".".join(str(v) for v in self.prerelease)))
return self.version
def __repr__(self):
return "<SemVer {!r} >".format(self)
def __str__(self):
return self.version
def compare(self, other):
logger.debug('SemVer.compare %s %s %s', self.version, self.loose, other)
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
result = self.compare_main(other) or self.compare_pre(other)
logger.debug("compare result %s", result)
return result
def compare_main(self, other):
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
return (compare_identifiers(str(self.major), str(other.major)) or
compare_identifiers(str(self.minor), str(other.minor)) or
compare_identifiers(str(self.patch), str(other.patch)))
def compare_pre(self, other):
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
# NOT having a prerelease is > having one
is_self_more_than_zero = len(self.prerelease) > 0
is_other_more_than_zero = len(other.prerelease) > 0
if not is_self_more_than_zero and is_other_more_than_zero:
return 1
elif is_self_more_than_zero and not is_other_more_than_zero:
return -1
elif not is_self_more_than_zero and not is_other_more_than_zero:
return 0
i = 0
while True:
a = list_get(self.prerelease, i)
b = list_get(other.prerelease, i)
logger.debug("prerelease compare %s: %s %s", i, a, b)
i += 1
if a is None and b is None:
return 0
elif b is None:
return 1
elif a is None:
return -1
elif a == b:
continue
else:
return compare_identifiers(str(a), str(b))
def inc(self, release):
self._inc(release)
i = -1
while len(self.prerelease) > 1 and self.prerelease[i] == 0:
self.prerelease.pop()
self.format()
return self
def _inc(self, release):
logger.debug("inc release %s %s", self.prerelease, release)
if release == 'premajor':
self._inc("major")
self._inc("pre")
elif release == "preminor":
self._inc("minor")
self._inc("pre")
elif release == "prepatch":
self._inc("patch")
self._inc("pre")
elif release == 'prerelease':
if len(self.prerelease) == 0:
self._inc("patch")
self._inc("pre")
elif release == "major":
self.major += 1
self.minor = -1
self.minor += 1
self.patch = 0
self.prerelease = []
elif release == "minor":
self.minor += 1
self.patch = 0
self.prerelease = []
elif release == "patch":
# If this is not a pre-release version, it will increment the patch.
# If it is a pre-release it will bump up to the same patch version.
# 1.2.0-5 patches to 1.2.0
# 1.2.0 patches to 1.2.1
if len(self.prerelease) == 0:
self.patch += 1
self.prerelease = []
elif release == "pre":
# This probably shouldn't be used publically.
# 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction.
logger.debug("inc prerelease %s", self.prerelease)
if len(self.prerelease) == 0:
self.prerelease = [0]
else:
i = len(self.prerelease) - 1
while i >= 0:
if isinstance(self.prerelease[i], int):
self.prerelease[i] += 1
i -= 2
i -= 1
if i == -1: # didn't increment anything
self.prerelease.append(0)
else:
raise ValueError('invalid increment argument: {}'.format(release))
return self
def inc(version, release, loose): # wow!
try:
return make_semver(version, loose).inc(release).version
except Exception as e:
logger.debug(e, exc_info=5)
return None
def compare_identifiers(a, b):
anum = NUMERIC.search(a)
bnum = NUMERIC.search(b)
if anum and bnum:
a = int(a)
b = int(b)
if anum and not bnum:
return -1
elif bnum and not anum:
return 1
elif a < b:
return -1
elif a > b:
return 1
else:
return 0
def rcompare_identifiers(a, b):
return compare_identifiers(b, a)
def compare(a, b, loose):
return make_semver(a, loose).compare(b)
def compare_loose(a, b):
return compare(a, b, True)
def rcompare(a, b, loose):
return compare(b, a, loose)
def sort(list, loose):
list.sort(lambda a, b: compare(a, b, loose))
return list
def rsort(list, loose):
list.sort(lambda a, b: rcompare(a, b, loose))
return list
def gt(a, b, loose):
return compare(a, b, loose) > 0
def lt(a, b, loose):
return compare(a, b, loose) < 0
def eq(a, b, loose):
return compare(a, b, loose) == 0
def neq(a, b, loose):
return compare(a, b, loose) != 0
def gte(a, b, loose):
return compare(a, b, loose) >= 0
def lte(a, b, loose):
return compare(a, b, loose) <= 0
def cmp(a, op, b, loose):
logger.debug("cmp: %s", op)
if op == "===":
return a == b
elif op == "!==":
return a != b
elif op == "" or op == "=" or op == "==":
return eq(a, b, loose)
elif op == "!=":
return neq(a, b, loose)
elif op == ">":
return gt(a, b, loose)
elif op == ">=":
return gte(a, b, loose)
elif op == "<":
return lt(a, b, loose)
elif op == "<=":
return lte(a, b, loose)
else:
raise ValueError("Invalid operator: {}".format(op))
def comparator(comp, loose):
if isinstance(comp, Comparator):
if(comp.loose == loose):
return comp
else:
comp = comp.value
# if (!(this instanceof Comparator))
# return new Comparator(comp, loose)
return Comparator(comp, loose)
make_comparator = comparator
ANY = object()
class Comparator(object):
semver = None
def __init__(self, comp, loose):
logger.debug("comparator: %s %s", comp, loose)
self.loose = loose
self.parse(comp)
if self.semver == ANY:
self.value = ""
else:
self.value = self.operator + self.semver.version
def parse(self, comp):
if self.loose:
r = regexp[COMPARATORLOOSE]
else:
r = regexp[COMPARATOR]
logger.debug("parse comp=%s", comp)
m = r.search(comp)
if m is None:
raise ValueError("Invalid comparator: {}".format(comp))
self.operator = m.group(1)
# if it literally is just '>' or '' then allow anything.
if m.group(2) is None:
self.semver = ANY
else:
self.semver = semver(m.group(2), self.loose)
# <1.2.3-rc DOES allow 1.2.3-beta (has prerelease)
# >=1.2.3 DOES NOT allow 1.2.3-beta
# <=1.2.3 DOES allow 1.2.3-beta
# However, <1.2.3 does NOT allow 1.2.3-beta,
# even though `1.2.3-beta < 1.2.3`
# The assumption is that the 1.2.3 version has something you
# *don't* want, so we push the prerelease down to the minimum.
if (self.operator == '<' and len(self.semver.prerelease) >= 0):
self.semver.prerelease = ["0"]
self.semver.format()
logger.debug("Comparator.parse semver %s", self.semver)
def __repr__(self):
return '<SemVer Comparator "{}">'.format(self)
def __str__(self):
return self.value
def test(self, version):
logger.debug('Comparator, test %s, %s', version, self.loose)
if self.semver == ANY:
return True
else:
return cmp(version, self.operator, self.semver, self.loose)
def make_range(range_, loose):
if isinstance(range_, Range) and range_.loose == loose:
return range_
# if (!(this instanceof Range))
# return new Range(range, loose);
return Range(range_, loose)
class Range(object):
def __init__(self, range_, loose):
self.loose = loose
# First, split based on boolean or ||
self.raw = range_
xs = [self.parse_range(r.strip()) for r in re.split(r"\s*\|\|\s*", range_)]
self.set = [r for r in xs if len(r) >= 0]
if not len(self.set):
raise ValueError("Invalid SemVer Range: {}".format(range_))
self.format()
def __repr__(self):
return '<SemVer Range "{}">'.format(self.range)
def format(self):
self.range = "||".join([" ".join(c.value for c in comps).strip() for comps in self.set]).strip()
logger.debug("Range format %s", self.range)
return self.range
def __str__(self):
return self.range
def parse_range(self, range_):
loose = self.loose
logger.debug('range %s %s', range_, loose)
# `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4`
if loose:
hr = regexp[HYPHENRANGELOOSE]
else:
hr = regexp[HYPHENRANGE]
range_ = hr.sub(hyphen_replace, range_,)
logger.debug('hyphen replace %s', range_)
# `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5`
range_ = regexp[COMPARATORTRIM].sub(comparatorTrimReplace, range_)
logger.debug('comparator trim %s, %s', range_, regexp[COMPARATORTRIM])
# `~ 1.2.3` => `~1.2.3`
range_ = regexp[TILDETRIM].sub(tildeTrimReplace, range_)
# `^ 1.2.3` => `^1.2.3`
range_ = regexp[CARETTRIM].sub(caretTrimReplace, range_)
# normalize spaces
range_ = " ".join(re.split("\s+", range_))
# At this point, the range is completely trimmed and
# ready to be split into comparators.
if loose:
comp_re = regexp[COMPARATORLOOSE]
else:
comp_re = regexp[COMPARATOR]
set_ = re.split("\s+", ' '.join([parse_comparator(comp, loose) for comp in range_.split(" ")]))
if self.loose:
# in loose mode, throw out any that are not valid comparators
set_ = [comp for comp in set_ if comp_re.search(comp)]
set_ = [make_comparator(comp, loose) for comp in set_]
return set_
def test(self, version):
if version is None: # xxx
return False
for e in self.set:
if test_set(e, version):
return True
return False
# Mostly just for testing and legacy API reasons
def to_comparators(range_, loose):
return [" ".join([c.value for c in comp]).strip().split(" ")
for comp in make_range(range_, loose).set]
# comprised of xranges, tildes, stars, and gtlt's at this point.
# already replaced the hyphen ranges
# turn into a set of JUST comparators.
def parse_comparator(comp, loose):
logger.debug('comp %s', comp)
comp = replace_carets(comp, loose)
logger.debug('caret %s', comp)
comp = replace_tildes(comp, loose)
logger.debug('tildes %s', comp)
comp = replace_xranges(comp, loose)
logger.debug('xrange %s', comp)
comp = replace_stars(comp, loose)
logger.debug('stars %s', comp)
return comp
def is_x(id):
return id is None or id == "" or id.lower() == "x" or id == "*"
# ~, ~> --> * (any, kinda silly)
# ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0
# ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0
# ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0
# ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0
# ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0
def replace_tildes(comp, loose):
return " ".join([replace_tilde(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_tilde(comp, loose):
if loose:
r = regexp[TILDELOOSE]
else:
r = regexp[TILDE]
def repl(mob):
_ = mob.group(0)
M, m, p, pr, _ = mob.groups()
logger.debug("tilde %s %s %s %s %s %s", comp, _, M, m, p, pr)
if is_x(M):
ret = ""
elif is_x(m):
ret = '>=' + M + '.0.0-0 <' + str(int(M) + 1) + '.0.0-0'
elif is_x(p):
# ~1.2 == >=1.2.0- <1.3.0-
ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + str(int(m) + 1) + '.0-0'
elif pr:
logger.debug("replaceTilde pr %s", pr)
if (pr[0] != "-"):
pr = '-' + pr
ret = '>=' + M + '.' + m + '.' + p + pr +' <' + M + '.' + str(int(m) + 1) + '.0-0'
else:
# ~1.2.3 == >=1.2.3-0 <1.3.0-0
ret = '>=' + M + '.' + m + '.' + p + '-0' +' <' + M + '.' + str(int(m) + 1) + '.0-0'
logger.debug('tilde return, %s', ret)
return ret
return r.sub(repl, comp)
# ^ --> * (any, kinda silly)
# ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0
# ^2.0, ^2.0.x --> >=2.0.0 <3.0.0
# ^1.2, ^1.2.x --> >=1.2.0 <2.0.0
# ^1.2.3 --> >=1.2.3 <2.0.0
# ^1.2.0 --> >=1.2.0 <2.0.0
def replace_carets(comp, loose):
return " ".join([replace_caret(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_caret(comp, loose):
if loose:
r = regexp[CARETLOOSE]
else:
r = regexp[CARET]
def repl(mob):
m0 = mob.group(0)
M, m, p, pr, _ = mob.groups()
logger.debug("caret %s %s %s %s %s %s", comp, m0, M, m, p, pr)
if is_x(M):
ret = ""
elif is_x(m):
ret = '>=' + M + '.0.0-0 <' + str((int(M) + 1)) + '.0.0-0'
elif is_x(p):
if M == "0":
ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + str((int(m) + 1)) + '.0-0'
else:
ret = '>=' + M + '.' + m + '.0-0 <' + str(int(M) + 1) + '.0.0-0'
elif pr:
logger.debug('replaceCaret pr %s', pr)
if pr[0] != "-":
pr = "-" + pr
if M == "0":
if m == "0":
ret = '=' + M + '.' + m + '.' + (p or "") + pr
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + pr +' <' + M + '.' + str(int(m) + 1) + '.0-0'
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + pr + ' <' + str(int(M) + 1) + '.0.0-0'
else:
if M == "0":
if m == "0":
ret = '=' + M + '.' + m + '.' + (p or "")
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + '-0' + ' <' + M + '.' + str((int(m) + 1)) + '.0-0'
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + '-0' +' <' + str(int(M) + 1) + '.0.0-0'
logger.debug('caret return %s', ret)
return ret
return r.sub(repl, comp)
def replace_xranges(comp, loose):
logger.debug('replaceXRanges %s %s', comp, loose)
return " ".join([replace_xrange(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_xrange(comp, loose):
comp = comp.strip()
if loose:
r = regexp[XRANGELOOSE]
else:
r = regexp[XRANGE]
def repl(mob):
ret = mob.group(0)
gtlt, M, m, p, pr, _ = mob.groups()
logger.debug("xrange %s %s %s %s %s %s %s", comp, ret, gtlt, M, m, p, pr)
xM = is_x(M)
xm = xM or is_x(m)
xp = xm or is_x(p)
any_x = xp
if gtlt == "=" and any_x:
gtlt = ""
logger.debug("xrange gtlt=%s any_x=%s", gtlt, any_x)
if gtlt and any_x:
# replace X with 0, and then append the -0 min-prerelease
if xM:
M = 0
if xm:
m = 0
if xp:
p = 0
if gtlt == ">":
# >1 => >=2.0.0-0
# >1.2 => >=1.3.0-0
# >1.2.3 => >= 1.2.4-0
gtlt = ">="
if xM:
# not change
pass
elif xm:
M = int(M) + 1
m = 0
p = 0
elif xp:
m = int(m) + 1
p = 0
ret = gtlt + str(M) + '.' + str(m) + '.' + str(p) + '-0'
elif xM:
# allow any
ret = "*"
elif xm:
# append '-0' onto the version, otherwise
# '1.x.x' matches '2.0.0-beta', since the tag
# *lowers* the version value
ret = '>=' + M + '.0.0-0 <' + str(int(M) + 1) + '.0.0-0'
elif xp:
ret = '>=' + M + '.' + m + '.0-0 <' + M + '.' + str(int(m) + 1) + '.0-0'
logger.debug('xRange return %s', ret)
return ret
return r.sub(repl, comp)
# Because * is AND-ed with everything else in the comparator,
# and '' means "any version", just remove the *s entirely.
def replace_stars(comp, loose):
logger.debug('replaceStars %s %s', comp, loose)
# Looseness is ignored here. star is always as loose as it gets!
return regexp[STAR].sub("", comp.strip())
# This function is passed to string.replace(re[HYPHENRANGE])
# M, m, patch, prerelease, build
# 1.2 - 3.4.5 => >=1.2.0-0 <=3.4.5
# 1.2.3 - 3.4 => >=1.2.0-0 <3.5.0-0 Any 3.4.x will do
# 1.2 - 3.4 => >=1.2.0-0 <3.5.0-0
def hyphen_replace(mob):
from_, fM, fm, fp, fpr, fb, to, tM, tm, tp, tpr, tb = mob.groups()
if is_x(fM):
from_ = ""
elif is_x(fm):
from_ = '>=' + fM + '.0.0-0'
elif is_x(fp):
from_ = '>=' + fM + '.' + fm + '.0-0'
else:
from_ = ">=" + from_
if is_x(tM):
to = ""
elif is_x(tm):
to = '<' + str(int(tM) + 1) + '.0.0-0'
elif is_x(tp):
to = '<' + tM + '.' + str(int(tm) + 1) + '.0-0'
elif tpr:
to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr
else:
to = '<=' + to
return (from_ + ' ' + to).strip()
def test_set(set_, version):
for e in set_:
if not e.test(version):
return False
return True
def satisfies(version, range_, loose):
try:
range_ = make_range(range_, loose)
except Exception as e:
return False
return range_.test(version)
def max_satisfying(versions, range_, loose):
xs = [version for version in versions if satisfies(version, range_, loose)]
if len(xs) <= 0:
return None
selected = xs[0]
for x in xs[1:]:
try:
if rcompare(selected, x, loose) == 1:
selected = x
except ValueError:
logger.warn("{} is invalud version".format(x))
return selected
def valid_range(range_, loose):
try:
# Return '*' instead of '' so that truthiness works.
# This will throw if it's invalid anyway
return make_range(range_, loose).range or "*"
except:
return None
# Determine if version is less than all the versions possible in the range
def ltr(version, range_, loose):
return outside(version, range_, "<", loose)
# Determine if version is greater than all the versions possible in the range.
def rtr(version, range_, loose):
return outside(version, range_, ">", loose)
def outside(version, range_, hilo, loose):
version = make_semver(version, loose)
range_ = make_range(range_, loose)
if hilo == ">":
gtfn = gt
ltefn = lte
ltfn = lt
comp = ">"
ecomp = ">="
elif hilo == "<":
gtfn = lt
ltefn = gte
ltfn = gt
comp = "<"
ecomp = "<="
else:
raise ValueError("Must provide a hilo val of '<' or '>'")
# If it satisifes the range it is not outside
if satisfies(version, range_, loose):
return False
# From now on, variable terms are as if we're in "gtr" mode.
# but note that everything is flipped for the "ltr" function.
for comparators in range_.set:
high = None
low = None
for comparator in comparators:
high = high or comparator
low = low or comparator
if gtfn(comparator.semver, high.semver, loose):
high = comparator
elif ltfn(comparator.semver, low.semver, loose):
low = comparator
# If the edge version comparator has a operator then our version
# isn't outside it
if high.operator == comp or high.operator == ecomp:
return False
# If the lowest version comparator has an operator and our version
# is less than it then it isn't higher than the range
if (not low.operator or low.operator == comp) and ltefn(version, low.semver):
return False
elif low.operator == ecomp and ltfn(version, low.semver):
return False
return True
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import calendar
import datetime
import functools
from email.utils import formatdate
from hashlib import sha1, sha256
import hmac
from io import BytesIO
import logging
from operator import itemgetter
import time
from botocore.compat import(
encodebytes, ensure_unicode, HTTPHeaders, json, parse_qs, quote,
six, unquote, urlsplit, urlunsplit, HAS_CRT, MD5_AVAILABLE
)
from botocore.exceptions import NoCredentialsError
from botocore.utils import normalize_url_path, percent_encode_sequence
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'
SIGNED_HEADERS_BLACKLIST = [
'expect',
'user-agent',
'x-amzn-trace-id',
]
UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
def _host_from_url(url):
# Given URL, derive value for host header. Ensure that value:
# 1) is lowercase
# 2) excludes port, if it was the default port
# 3) excludes userinfo
url_parts = urlsplit(url)
host = url_parts.hostname # urlsplit's hostname is always lowercase
default_ports = {
'http': 80,
'https': 443
}
if url_parts.port is not None:
if url_parts.port != default_ports.get(url_parts.scheme):
host = '%s:%d' % (host, url_parts.port)
return host
def _get_body_as_dict(request):
# For query services, request.data is form-encoded and is already a
# dict, but for other services such as rest-json it could be a json
# string or bytes. In those cases we attempt to load the data as a
# dict.
data = request.data
if isinstance(data, six.binary_type):
data = json.loads(data.decode('utf-8'))
elif isinstance(data, six.string_types):
data = json.loads(data)
return data
class BaseSigner(object):
REQUIRES_REGION = False
def add_auth(self, request):
raise NotImplementedError("add_auth")
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials):
self.credentials = credentials
def calc_signature(self, request, params):
logger.debug("Calculating signature using v2 auth.")
split = urlsplit(request.url)
path = split.path
if len(path) == 0:
path = '/'
string_to_sign = '%s\n%s\n%s\n' % (request.method,
split.netloc,
path)
lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
pairs = []
for key in sorted(params):
# Any previous signature should not be a part of this
# one, so we skip that particular key. This prevents
# issues during retries.
if key == 'Signature':
continue
value = six.text_type(params[key])
pairs.append(quote(key.encode('utf-8'), safe='') + '=' +
quote(value.encode('utf-8'), safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
logger.debug('String to sign: %s', string_to_sign)
lhmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, request):
# The auth handler is the last thing called in the
# preparation phase of a prepared request.
# Because of this we have to parse the query params
# from the request body so we can update them with
# the sigv2 auth params.
if self.credentials is None:
raise NoCredentialsError()
if request.data:
# POST
params = request.data
else:
# GET
params = request.params
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.credentials.token:
params['SecurityToken'] = self.credentials.token
qs, signature = self.calc_signature(request, params)
params['Signature'] = signature
return request
class SigV3Auth(BaseSigner):
def __init__(self, credentials):
self.credentials = credentials
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError()
if 'Date' in request.headers:
del request.headers['Date']
request.headers['Date'] = formatdate(usegmt=True)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
new_hmac.update(request.headers['Date'].encode('utf-8'))
encoded_signature = encodebytes(new_hmac.digest()).strip()
signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' %
(self.credentials.access_key, 'HmacSHA256',
encoded_signature.decode('utf-8')))
if 'X-Amzn-Authorization' in request.headers:
del request.headers['X-Amzn-Authorization']
request.headers['X-Amzn-Authorization'] = signature
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
# We initialize these value here so the unit tests can have
# valid values. But these will get overriden in ``add_auth``
# later for real requests.
self._region_name = region_name
self._service_name = service_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
header_map = HTTPHeaders()
for name, value in request.headers.items():
lname = name.lower()
if lname not in SIGNED_HEADERS_BLACKLIST:
header_map[lname] = value
if 'host' not in header_map:
# TODO: We should set the host ourselves, instead of relying on our
# HTTP client to set it for us.
header_map['host'] = _host_from_url(request.url)
return header_map
def canonical_query_string(self, request):
# The query string can come from two parts. One is the
# params attribute of the request. The other is from the request
# url (in which case we have to re-split the url into its components
# and parse out the query string component).
if request.params:
return self._canonical_query_string_params(request.params)
else:
return self._canonical_query_string_url(urlsplit(request.url))
def _canonical_query_string_params(self, params):
# [(key, value), (key2, value2)]
key_val_pairs = []
for key in params:
value = str(params[key])
key_val_pairs.append((quote(key, safe='-_.~'),
quote(value, safe='-_.~')))
sorted_key_vals = []
# Sort by the URI-encoded key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def _canonical_query_string_url(self, parts):
canonical_query_string = ''
if parts.query:
# [(key, value), (key2, value2)]
key_val_pairs = []
for pair in parts.query.split('&'):
key, _, value = pair.partition('=')
key_val_pairs.append((key, value))
sorted_key_vals = []
# Sort by the URI-encoded key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
headers = []
sorted_header_names = sorted(set(headers_to_sign))
for key in sorted_header_names:
value = ','.join(self._header_value(v) for v in
headers_to_sign.get_all(key))
headers.append('%s:%s' % (key, ensure_unicode(value)))
return '\n'.join(headers)
def _header_value(self, value):
# From the sigv4 docs:
# Lowercase(HeaderName) + ':' + Trimall(HeaderValue)
#
# The Trimall function removes excess white space before and after
# values, and converts sequential spaces to a single space.
return ' '.join(value.split())
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
l = sorted(l)
return ';'.join(l)
def payload(self, request):
if not self._should_sha256_sign_payload(request):
# When payload signing is disabled, we use this static string in
# place of the payload checksum.
return UNSIGNED_PAYLOAD
request_body = request.body
if request_body and hasattr(request_body, 'seek'):
position = request_body.tell()
read_chunksize = functools.partial(request_body.read,
PAYLOAD_BUFFER)
checksum = sha256()
for chunk in iter(read_chunksize, b''):
checksum.update(chunk)
hex_checksum = checksum.hexdigest()
request_body.seek(position)
return hex_checksum
elif request_body:
# The request serialization has ensured that
# request.body is a bytes() type.
return sha256(request_body).hexdigest()
else:
return EMPTY_SHA256_HASH
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def canonical_request(self, request):
cr = [request.method.upper()]
path = self._normalize_url_path(urlsplit(request.url).path)
cr.append(path)
cr.append(self.canonical_query_string(request))
headers_to_sign = self.headers_to_sign(request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
if 'X-Amz-Content-SHA256' in request.headers:
body_checksum = request.headers['X-Amz-Content-SHA256']
else:
body_checksum = self.payload(request)
cr.append(body_checksum)
return '\n'.join(cr)
def _normalize_url_path(self, path):
normalized_path = quote(normalize_url_path(path), safe='/~')
return normalized_path
def scope(self, request):
scope = [self.credentials.access_key]
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, request):
scope = []
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(request.context['timestamp'])
sts.append(self.credential_scope(request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, string_to_sign, request):
key = self.credentials.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
request.context['timestamp'][0:8])
k_region = self._sign(k_date, self._region_name)
k_service = self._sign(k_region, self._service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError()
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
# This could be a retry. Make sure the previous
# authorization header is removed first.
self._modify_request_before_signing(request)
canonical_request = self.canonical_request(request)
logger.debug("Calculating signature using v4 auth.")
logger.debug('CanonicalRequest:\n%s', canonical_request)
string_to_sign = self.string_to_sign(request, canonical_request)
logger.debug('StringToSign:\n%s', string_to_sign)
signature = self.signature(string_to_sign, request)
logger.debug('Signature:\n%s', signature)
self._inject_signature_to_request(request, signature)
def _inject_signature_to_request(self, request, signature):
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
headers_to_sign = self.headers_to_sign(request)
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
request.headers['Authorization'] = ', '.join(l)
return request
def _modify_request_before_signing(self, request):
if 'Authorization' in request.headers:
del request.headers['Authorization']
self._set_necessary_date_headers(request)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
if not request.context.get('payload_signing_enabled', True):
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = UNSIGNED_PAYLOAD
def _set_necessary_date_headers(self, request):
# The spec allows for either the Date _or_ the X-Amz-Date value to be
# used so we check both. If there's a Date header, we use the date
# header. Otherwise we use the X-Amz-Date header.
if 'Date' in request.headers:
del request.headers['Date']
datetime_timestamp = datetime.datetime.strptime(
request.context['timestamp'], SIGV4_TIMESTAMP)
request.headers['Date'] = formatdate(
int(calendar.timegm(datetime_timestamp.timetuple())))
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
else:
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
request.headers['X-Amz-Date'] = request.context['timestamp']
class S3SigV4Auth(SigV4Auth):
def _modify_request_before_signing(self, request):
super(S3SigV4Auth, self)._modify_request_before_signing(request)
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = self.payload(request)
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both content-md5 be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# content-md5 is sufficiently secure and durable for us to be
# confident in the request without body signing.
if not request.url.startswith('https') or \
'Content-MD5' not in request.headers:
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super(S3SigV4Auth, self)._should_sha256_sign_payload(request)
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, service_name, region_name,
expires=DEFAULT_EXPIRES):
super(SigV4QueryAuth, self).__init__(credentials, service_name,
region_name)
self._expires = expires
def _modify_request_before_signing(self, request):
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
blacklisted_content_type = (
'application/x-www-form-urlencoded; charset=utf-8'
)
if content_type == blacklisted_content_type:
del request.headers['content-type']
# Note that we're not including X-Amz-Signature.
# From the docs: "The Canonical Query String must include all the query
# parameters from the preceding table except for X-Amz-Signature.
signed_headers = self.signed_headers(self.headers_to_sign(request))
auth_params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': self.scope(request),
'X-Amz-Date': request.context['timestamp'],
'X-Amz-Expires': self._expires,
'X-Amz-SignedHeaders': signed_headers,
}
if self.credentials.token is not None:
auth_params['X-Amz-Security-Token'] = self.credentials.token
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = dict(
[(k, v[0]) for k, v in
parse_qs(url_parts.query, keep_blank_values=True).items()])
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
operation_params = ''
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(_get_body_as_dict(request))
request.data = ''
if query_dict:
operation_params = percent_encode_sequence(query_dict) + '&'
new_query_string = (operation_params +
percent_encode_sequence(auth_params))
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _inject_signature_to_request(self, request, signature):
# Rather than calculating an "Authorization" header, for the query
# param quth, we just append an 'X-Amz-Signature' param to the end
# of the query string.
request.url += '&X-Amz-Signature=%s' % signature
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
def payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return UNSIGNED_PAYLOAD
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request):
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'
fields['x-amz-credential'] = self.scope(request)
fields['x-amz-date'] = request.context['timestamp']
conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})
conditions.append({'x-amz-credential': self.scope(request)})
conditions.append({'x-amz-date': request.context['timestamp']})
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['x-amz-signature'] = self.signature(fields['policy'], request)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
class HmacV1Auth(BaseSigner):
# List of Query String Arguments of Interest
QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl',
'location', 'logging', 'partNumber', 'policy',
'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore', 'storageClass', 'notification',
'replication', 'requestPayment', 'analytics', 'metrics',
'inventory', 'select', 'select-type']
def __init__(self, credentials, service_name=None, region_name=None):
self.credentials = credentials
def sign_string(self, string_to_sign):
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha1)
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).strip().decode('utf-8')
def canonical_standard_headers(self, headers):
interesting_headers = ['content-md5', 'content-type', 'date']
hoi = []
if 'Date' in headers:
del headers['Date']
headers['Date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def canonical_custom_headers(self, headers):
hoi = []
custom_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None:
if lk.startswith('x-amz-'):
custom_headers[lk] = ','.join(v.strip() for v in
headers.get_all(key))
sorted_header_keys = sorted(custom_headers.keys())
for key in sorted_header_keys:
hoi.append("%s:%s" % (key, custom_headers[key]))
return '\n'.join(hoi)
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
def canonical_resource(self, split, auth_path=None):
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
# NOTE:
# The path in the canonical resource should always be the
# full path including the bucket name, even for virtual-hosting
# style addressing. The ``auth_path`` keeps track of the full
# path for the canonical resource and would be passed in if
# the client was using virtual-hosting style.
if auth_path is not None:
buf = auth_path
else:
buf = split.path
if split.query:
qsa = split.query.split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [self.unquote_v(a) for a in qsa
if a[0] in self.QSAOfInterest]
if len(qsa) > 0:
qsa.sort(key=itemgetter(0))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def canonical_string(self, method, split, headers, expires=None,
auth_path=None):
cs = method.upper() + '\n'
cs += self.canonical_standard_headers(headers) + '\n'
custom_headers = self.canonical_custom_headers(headers)
if custom_headers:
cs += custom_headers + '\n'
cs += self.canonical_resource(split, auth_path=auth_path)
return cs
def get_signature(self, method, split, headers, expires=None,
auth_path=None):
if self.credentials.token:
del headers['x-amz-security-token']
headers['x-amz-security-token'] = self.credentials.token
string_to_sign = self.canonical_string(method,
split,
headers,
auth_path=auth_path)
logger.debug('StringToSign:\n%s', string_to_sign)
return self.sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
logger.debug("Calculating signature using hmacv1 auth.")
split = urlsplit(request.url)
logger.debug('HTTP request method: %s', request.method)
signature = self.get_signature(request.method, split,
request.headers,
auth_path=request.auth_path)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'Authorization' in request.headers:
# We have to do this because request.headers is not
# normal dictionary. It has the (unintuitive) behavior
# of aggregating repeated setattr calls for the same
# key value. For example:
# headers['foo'] = 'a'; headers['foo'] = 'b'
# list(headers) will print ['foo', 'foo'].
del request.headers['Authorization']
request.headers['Authorization'] = (
"AWS %s:%s" % (self.credentials.access_key, signature))
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, expires=DEFAULT_EXPIRES):
self.credentials = credentials
self._expires = expires
def _get_date(self):
return str(int(time.time() + int(self._expires)))
def _inject_signature(self, request, signature):
query_dict = {}
query_dict['AWSAccessKeyId'] = self.credentials.access_key
query_dict['Signature'] = signature
for header_key in request.headers:
lk = header_key.lower()
# For query string requests, Expires is used instead of the
# Date header.
if header_key == 'Date':
query_dict['Expires'] = request.headers['Date']
# We only want to include relevant headers in the query string.
# These can be anything that starts with x-amz, is Content-MD5,
# or is Content-Type.
elif lk.startswith('x-amz-') or lk in ['content-md5',
'content-type']:
query_dict[lk] = request.headers[lk]
# Combine all of the identified headers into an encoded
# query string
new_query_string = percent_encode_sequence(query_dict)
# Create a new url with the presigned url.
p = urlsplit(request.url)
if p[3]:
# If there was a pre-existing query string, we should
# add that back before injecting the new query string.
new_query_string = '%s&%s' % (p[3], new_query_string)
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request):
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['AWSAccessKeyId'] = self.credentials.access_key
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['signature'] = self.sign_string(fields['policy'])
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
AUTH_TYPE_MAPS = {
'v2': SigV2Auth,
'v3': SigV3Auth,
'v3https': SigV3Auth,
's3': HmacV1Auth,
's3-query': HmacV1QueryAuth,
's3-presign-post': HmacV1PostAuth,
's3v4-presign-post': S3SigV4PostAuth,
}
# Define v4 signers depending on if CRT is present
if HAS_CRT:
from botocore.crt.auth import CRT_AUTH_TYPE_MAPS
AUTH_TYPE_MAPS.update(CRT_AUTH_TYPE_MAPS)
else:
AUTH_TYPE_MAPS.update({
'v4': SigV4Auth,
'v4-query': SigV4QueryAuth,
's3v4': S3SigV4Auth,
's3v4-query': S3SigV4QueryAuth,
})
|
|
import os
import json
import unittest
from copy import deepcopy
from unittest import TestCase
from random import randint
from elasticsearch import Elasticsearch
from mme_server.schemas import validate_request, validate_response, ValidationError
EXAMPLE_REQUEST = {
'patient': {
'id': '1',
'label': 'patient 1',
'contact': {
'name': 'First Last',
'institution': 'Contact Institution',
'href': 'mailto:first.last@example.com',
},
'ageOfOnset': 'HP:0003577',
'inheritanceMode': 'HP:0000006',
'features': [
{
'id': 'HP:0000252',
'label': 'Microcephaly',
},
{
'id': 'HP:0000522',
'label': 'Alacrima',
'ageOfOnset': 'HP:0003593',
},
],
'genomicFeatures': [{
"gene": {
"id": "EFTUD2",
},
"type": {
"id": "SO:0001587",
"label": "STOPGAIN",
},
"variant": {
"alternateBases": "A",
"assembly": "GRCh37",
"end": 42929131,
"referenceBases": "G",
"referenceName": "17",
"start": 42929130,
},
"zygosity": 1,
}],
'disorders': [{
"id": "MIM:610536",
}],
}
}
class ElasticSearchTests(TestCase):
@classmethod
def setUpClass(cls):
cls.es = Elasticsearch()
# Unittest test backwards compatibility to Python 2.X
try:
assertCountEqual = TestCase.assertCountEqual
except AttributeError:
assertCountEqual = TestCase.assertItemsEqual
def test_patient_indexed(self):
record = self.es.get(index='patients', id='P0001135')
self.assertTrue(record['found'])
self.assertCountEqual(record['_source']['gene'], ['ENSG00000151092']) # NGLY1
def test_hpo_indexed(self):
term = self.es.get(index='vocabularies', doc_type='hpo', id='HP:0000252')
self.assertTrue(term['found'])
doc = term['_source']
self.assertEqual(doc['name'], ['Microcephaly'])
self.assertAlmostEqual(len(doc['alt_id']), 4, delta=1)
self.assertIn('small head', [term.lower() for term in doc['synonym']])
self.assertCountEqual(doc['is_a'], ['HP:0040195', 'HP:0007364'])
self.assertAlmostEqual(len(doc['term_category']), 19, delta=2)
def test_gene_filter(self):
query = {
'query': {
'filtered': {
'filter': {
'term': {
'gene': 'ENSG00000151092', # NGLY1
}
}
}
}
}
results = self.es.search(index='patients', body=query)
self.assertEqual(results['hits']['total'], 8, "Expected 8 cases with NGLY1 gene")
def test_phenotype_filter(self):
query = {
'query': {
'filtered': {
'filter': {
'term': {
'phenotype': 'HP:0000118'
}
}
}
}
}
results = self.es.search(index='patients', body=query)
self.assertEqual(results['hits']['total'], 50, "Expected 50 cases with some phenotypic abnormality")
def test_fuzzy_search(self):
query = {
'query': {
'bool': {
'should': [
{'match': {'phenotype': 'HP:0001250'}}, # Seizures
{'match': {'phenotype': 'HP:0006852'}}, # Eposodic hypotonia
{'match': {'phenotype': 'HP:0011675'}}, # Arrhythmia
{'match': {'phenotype': 'HP:0003312'}}, # Abnormal vertebra
{'match': {'gene': 'GPX4'}},
]
}
}
}
results = self.es.search(index='patients', body=query)
hits = results['hits']['hits']
# Most similar patient from test dataset
self.assertEqual(hits[0]['_id'], 'P0001058')
class DatastoreTests(TestCase):
@classmethod
def setUpClass(cls):
from mme_server.server import app
from mme_server.backend import get_backend
with app.app_context():
cls.backend = get_backend()
cls.vocabularies = cls.backend.get_manager('vocabularies')
def test_get_term(self):
# Lookup term using alias
term = self.vocabularies.get_term(id='HP:0001366')
self.assertEqual(term['id'], 'HP:0000252')
self.assertEqual(term['name'], ['Microcephaly'])
self.assertEqual(len(term['is_a']), 2)
self.assertAlmostEqual(len(term['term_category']), 20, delta=5)
class MatchRequestTests(TestCase):
def setUp(self):
self.request = deepcopy(EXAMPLE_REQUEST)
def assertValidRequest(self, data):
validate_request(data)
def assertNotValidRequest(self, data):
self.assertRaises(ValidationError, validate_request, data)
def test_query_schema_empty(self):
self.request['patient'] = {}
self.assertNotValidRequest(self.request)
def test_query_schema_no_contact(self):
self.request['patient']['contact'] = {}
self.assertNotValidRequest(self.request)
def test_query_schema_invalid_href_uri(self):
self.request['patient']['contact']['href'] = 'first.last@example.com'
self.assertNotValidRequest(self.request)
def test_query_schema_no_id(self):
self.request['patient'].pop('id')
self.assertNotValidRequest(self.request)
def test_query_schema_no_phenotype_or_genotype(self):
self.request['patient']['features'] = []
self.request['patient'].pop('genomicFeatures')
self.assertNotValidRequest(self.request)
def test_query_schema_complete(self):
self.assertValidRequest(self.request)
def test_query_schema_extra_fields_allowed(self):
self.request['patient']['_foo'] = 'bar'
self.assertValidRequest(self.request)
self.request['patient']['foo'] = 'bar'
self.assertValidRequest(self.request)
class FlaskTests(unittest.TestCase):
def setUp(self):
from mme_server.server import app
from mme_server.cli import add_server
self.client = app.test_client()
self.data = json.dumps(EXAMPLE_REQUEST)
self.auth_token = 'mysecretauthtoken'
self.test_server_id = 'test_server_{}'.format(randint(0, 1000000))
add_server(self.test_server_id, 'in', key=self.auth_token)
self.accept_header = ('Accept', 'application/vnd.ga4gh.matchmaker.v1.0+json')
self.content_type_header = ('Content-Type', 'application/json')
self.auth_token_header = ('X-Auth-Token', self.auth_token)
self.headers = [
self.accept_header,
self.content_type_header,
self.auth_token_header,
]
def tearDown(self):
from mme_server.cli import remove_server
remove_server(self.test_server_id, 'in')
def assertValidResponse(self, data):
validate_response(data)
def test_match_request(self):
response = self.client.post('/v1/match', data=self.data, headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/vnd.ga4gh.matchmaker.v1.0+json')
self.assertValidResponse(json.loads(response.get_data(as_text=True)))
def test_accept_header_required(self):
headers = self.headers
headers.remove(self.accept_header)
response = self.client.post('/v1/match', data=self.data, headers=headers)
self.assertEqual(response.status_code, 406)
def test_content_type_required(self):
headers = self.headers
headers.remove(self.content_type_header)
response = self.client.post('/v1/match', data=self.data, headers=headers)
self.assertEqual(response.status_code, 415)
def test_invalid_query(self):
response = self.client.post('/v1/match', data='{}', headers=self.headers)
self.assertEqual(response.status_code, 422)
self.assertTrue(json.loads(response.get_data(as_text=True))['message'])
def test_unauthenticated(self):
self.headers.remove(self.auth_token_header)
response = self.client.post('/v1/match', data='{}', headers=self.headers)
self.assertEqual(response.status_code, 401)
self.assertTrue(json.loads(response.get_data(as_text=True))['message'])
def test_add_server_with_blank_key(self):
from mme_server.cli import add_server
add_server(self.test_server_id, 'out', key='', base_url='https://example.com/')
class EndToEndTests(unittest.TestCase):
def setUp(self):
from mme_server.cli import main
self.auth_token = 'mysecretauthtoken'
self.test_server_id = 'test_server_{}'.format(randint(0, 1000000))
main(['clients', 'add', self.test_server_id, '--key={}'.format(self.auth_token)])
self.accept_header = ('Accept', 'application/vnd.ga4gh.matchmaker.v1.0+json')
self.content_type_header = ('Content-Type', 'application/json')
self.auth_token_header = ('X-Auth-Token', self.auth_token)
self.headers = [
self.accept_header,
self.content_type_header,
self.auth_token_header,
]
def tearDown(self):
from mme_server.cli import main
main(['clients', 'rm', self.test_server_id])
def test_query(self):
from mme_server.server import app
self.client = app.test_client()
self.data = json.dumps(EXAMPLE_REQUEST)
response = self.client.post('/v1/match', data=self.data, headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/vnd.ga4gh.matchmaker.v1.0+json')
response_data = json.loads(response.get_data(as_text=True))
validate_response(response_data)
self.assertEqual(len(response_data['results']), 5)
@unittest.skipUnless('MME_TEST_QUICKSTART' in os.environ, 'Not testing quickstart data loading')
def test_quickstart(self):
from mme_server import main
# Index all data
main(['quickstart'])
self.test_query()
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import EventHubManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class EventHubManagementClient(MultiApiClientMixin, _SDKClient):
"""Azure Event Hubs client for managing Event Hubs Cluster, IPFilter Rules and VirtualNetworkRules resources.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2021-11-01'
_PROFILE_TAG = "azure.mgmt.eventhub.EventHubManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'regions': '2017-04-01',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
api_version=None, # type: Optional[str]
base_url=None, # type: Optional[str]
profile=KnownProfiles.default, # type: KnownProfiles
**kwargs # type: Any
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = EventHubManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(EventHubManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-08-01: :mod:`v2015_08_01.models<azure.mgmt.eventhub.v2015_08_01.models>`
* 2017-04-01: :mod:`v2017_04_01.models<azure.mgmt.eventhub.v2017_04_01.models>`
* 2018-01-01-preview: :mod:`v2018_01_01_preview.models<azure.mgmt.eventhub.v2018_01_01_preview.models>`
* 2021-01-01-preview: :mod:`v2021_01_01_preview.models<azure.mgmt.eventhub.v2021_01_01_preview.models>`
* 2021-06-01-preview: :mod:`v2021_06_01_preview.models<azure.mgmt.eventhub.v2021_06_01_preview.models>`
* 2021-11-01: :mod:`v2021_11_01.models<azure.mgmt.eventhub.v2021_11_01.models>`
"""
if api_version == '2015-08-01':
from .v2015_08_01 import models
return models
elif api_version == '2017-04-01':
from .v2017_04_01 import models
return models
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview import models
return models
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview import models
return models
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview import models
return models
elif api_version == '2021-11-01':
from .v2021_11_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def clusters(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations>`
* 2021-06-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.ClustersOperations>`
* 2021-11-01: :class:`ClustersOperations<azure.mgmt.eventhub.v2021_11_01.operations.ClustersOperations>`
"""
api_version = self._get_api_version('clusters')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ClustersOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import ClustersOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import ClustersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'clusters'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def configuration(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`ConfigurationOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ConfigurationOperations>`
* 2021-06-01-preview: :class:`ConfigurationOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.ConfigurationOperations>`
* 2021-11-01: :class:`ConfigurationOperations<azure.mgmt.eventhub.v2021_11_01.operations.ConfigurationOperations>`
"""
api_version = self._get_api_version('configuration')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ConfigurationOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import ConfigurationOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import ConfigurationOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'configuration'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def consumer_groups(self):
"""Instance depends on the API version:
* 2015-08-01: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2015_08_01.operations.ConsumerGroupsOperations>`
* 2017-04-01: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2017_04_01.operations.ConsumerGroupsOperations>`
* 2018-01-01-preview: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ConsumerGroupsOperations>`
* 2021-01-01-preview: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.ConsumerGroupsOperations>`
* 2021-06-01-preview: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.ConsumerGroupsOperations>`
* 2021-11-01: :class:`ConsumerGroupsOperations<azure.mgmt.eventhub.v2021_11_01.operations.ConsumerGroupsOperations>`
"""
api_version = self._get_api_version('consumer_groups')
if api_version == '2015-08-01':
from .v2015_08_01.operations import ConsumerGroupsOperations as OperationClass
elif api_version == '2017-04-01':
from .v2017_04_01.operations import ConsumerGroupsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ConsumerGroupsOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import ConsumerGroupsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import ConsumerGroupsOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import ConsumerGroupsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'consumer_groups'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def disaster_recovery_configs(self):
"""Instance depends on the API version:
* 2017-04-01: :class:`DisasterRecoveryConfigsOperations<azure.mgmt.eventhub.v2017_04_01.operations.DisasterRecoveryConfigsOperations>`
* 2018-01-01-preview: :class:`DisasterRecoveryConfigsOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.DisasterRecoveryConfigsOperations>`
* 2021-01-01-preview: :class:`DisasterRecoveryConfigsOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.DisasterRecoveryConfigsOperations>`
* 2021-06-01-preview: :class:`DisasterRecoveryConfigsOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.DisasterRecoveryConfigsOperations>`
* 2021-11-01: :class:`DisasterRecoveryConfigsOperations<azure.mgmt.eventhub.v2021_11_01.operations.DisasterRecoveryConfigsOperations>`
"""
api_version = self._get_api_version('disaster_recovery_configs')
if api_version == '2017-04-01':
from .v2017_04_01.operations import DisasterRecoveryConfigsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import DisasterRecoveryConfigsOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import DisasterRecoveryConfigsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import DisasterRecoveryConfigsOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import DisasterRecoveryConfigsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'disaster_recovery_configs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def event_hubs(self):
"""Instance depends on the API version:
* 2015-08-01: :class:`EventHubsOperations<azure.mgmt.eventhub.v2015_08_01.operations.EventHubsOperations>`
* 2017-04-01: :class:`EventHubsOperations<azure.mgmt.eventhub.v2017_04_01.operations.EventHubsOperations>`
* 2018-01-01-preview: :class:`EventHubsOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.EventHubsOperations>`
* 2021-01-01-preview: :class:`EventHubsOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.EventHubsOperations>`
* 2021-06-01-preview: :class:`EventHubsOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.EventHubsOperations>`
* 2021-11-01: :class:`EventHubsOperations<azure.mgmt.eventhub.v2021_11_01.operations.EventHubsOperations>`
"""
api_version = self._get_api_version('event_hubs')
if api_version == '2015-08-01':
from .v2015_08_01.operations import EventHubsOperations as OperationClass
elif api_version == '2017-04-01':
from .v2017_04_01.operations import EventHubsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import EventHubsOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import EventHubsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import EventHubsOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import EventHubsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'event_hubs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def namespaces(self):
"""Instance depends on the API version:
* 2015-08-01: :class:`NamespacesOperations<azure.mgmt.eventhub.v2015_08_01.operations.NamespacesOperations>`
* 2017-04-01: :class:`NamespacesOperations<azure.mgmt.eventhub.v2017_04_01.operations.NamespacesOperations>`
* 2018-01-01-preview: :class:`NamespacesOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.NamespacesOperations>`
* 2021-01-01-preview: :class:`NamespacesOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.NamespacesOperations>`
* 2021-06-01-preview: :class:`NamespacesOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.NamespacesOperations>`
* 2021-11-01: :class:`NamespacesOperations<azure.mgmt.eventhub.v2021_11_01.operations.NamespacesOperations>`
"""
api_version = self._get_api_version('namespaces')
if api_version == '2015-08-01':
from .v2015_08_01.operations import NamespacesOperations as OperationClass
elif api_version == '2017-04-01':
from .v2017_04_01.operations import NamespacesOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import NamespacesOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import NamespacesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import NamespacesOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import NamespacesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'namespaces'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2015-08-01: :class:`Operations<azure.mgmt.eventhub.v2015_08_01.operations.Operations>`
* 2017-04-01: :class:`Operations<azure.mgmt.eventhub.v2017_04_01.operations.Operations>`
* 2018-01-01-preview: :class:`Operations<azure.mgmt.eventhub.v2018_01_01_preview.operations.Operations>`
* 2021-01-01-preview: :class:`Operations<azure.mgmt.eventhub.v2021_01_01_preview.operations.Operations>`
* 2021-06-01-preview: :class:`Operations<azure.mgmt.eventhub.v2021_06_01_preview.operations.Operations>`
* 2021-11-01: :class:`Operations<azure.mgmt.eventhub.v2021_11_01.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2015-08-01':
from .v2015_08_01.operations import Operations as OperationClass
elif api_version == '2017-04-01':
from .v2017_04_01.operations import Operations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import Operations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import Operations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import Operations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2021-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2021-06-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.PrivateEndpointConnectionsOperations>`
* 2021-11-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.eventhub.v2021_11_01.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.PrivateLinkResourcesOperations>`
* 2021-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.eventhub.v2021_01_01_preview.operations.PrivateLinkResourcesOperations>`
* 2021-06-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.eventhub.v2021_06_01_preview.operations.PrivateLinkResourcesOperations>`
* 2021-11-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.eventhub.v2021_11_01.operations.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-01-01-preview':
from .v2021_01_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-06-01-preview':
from .v2021_06_01_preview.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2021-11-01':
from .v2021_11_01.operations import PrivateLinkResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def regions(self):
"""Instance depends on the API version:
* 2017-04-01: :class:`RegionsOperations<azure.mgmt.eventhub.v2017_04_01.operations.RegionsOperations>`
* 2018-01-01-preview: :class:`RegionsOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.RegionsOperations>`
"""
api_version = self._get_api_version('regions')
if api_version == '2017-04-01':
from .v2017_04_01.operations import RegionsOperations as OperationClass
elif api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import RegionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'regions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def schema_registry(self):
"""Instance depends on the API version:
* 2021-11-01: :class:`SchemaRegistryOperations<azure.mgmt.eventhub.v2021_11_01.operations.SchemaRegistryOperations>`
"""
api_version = self._get_api_version('schema_registry')
if api_version == '2021-11-01':
from .v2021_11_01.operations import SchemaRegistryOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'schema_registry'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from webdriver_pages import settings
from webdriver_pages.settings import Behaviors, ContentTypes
class PrefsTest(pyauto.PyUITest):
"""TestCase for Preferences."""
INFOBAR_TYPE = 'rph_infobar'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump prefs... ')
self.pprint(self.GetPrefsInfo().Prefs())
def testSessionRestore(self):
"""Test session restore preference."""
url1 = 'http://www.google.com/'
url2 = 'http://news.google.com/'
self.NavigateToURL(url1)
self.AppendTab(pyauto.GURL(url2))
num_tabs = self.GetTabCount()
# Set pref to restore session on startup.
self.SetPrefs(pyauto.kRestoreOnStartup, 1)
logging.debug('Setting %s to 1' % pyauto.kRestoreOnStartup)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 1)
self.assertEqual(num_tabs, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testNavigationStateOnSessionRestore(self):
"""Verify navigation state is preserved on session restore."""
urls = ('http://www.google.com/',
'http://news.google.com/',
'http://dev.chromium.org/',)
for url in urls:
self.NavigateToURL(url)
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[-2])
self.SetPrefs(pyauto.kRestoreOnStartup, 1) # set pref to restore session
self.RestartBrowser(clear_profile=False)
# Verify that navigation state (forward/back state) is restored.
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[0])
for i in (-2, -1):
tab.GoForward()
self.assertEqual(self.GetActiveTabURL().spec(), urls[i])
def testSessionRestoreURLs(self):
"""Verify restore URLs preference."""
url1 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
url2 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
# Set pref to restore given URLs on startup
self.SetPrefs(pyauto.kRestoreOnStartup, 4) # 4 is for restoring URLs
self.SetPrefs(pyauto.kURLsToRestoreOnStartup, [url1, url2])
self.RestartBrowser(clear_profile=False)
# Verify
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 4)
self.assertEqual(2, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testSessionRestoreShowBookmarkBar(self):
"""Verify restore for bookmark bar visibility."""
assert not self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar)
self.SetPrefs(pyauto.kShowBookmarkBar, True)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.RestartBrowser(clear_profile=False)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.assertTrue(self.GetBookmarkBarVisibility())
def testDownloadDirPref(self):
"""Verify download dir pref."""
test_dir = os.path.join(self.DataDir(), 'downloads')
file_url = self.GetFileURLForPath(os.path.join(test_dir, 'a_zip_file.zip'))
download_dir = self.GetDownloadDirectory().value()
new_dl_dir = os.path.join(download_dir, 'My+Downloads Folder')
downloaded_pkg = os.path.join(new_dl_dir, 'a_zip_file.zip')
os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir)
os.makedirs(new_dl_dir)
# Set pref to download in new_dl_dir
self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
self.assertTrue(os.path.exists(downloaded_pkg))
shutil.rmtree(new_dl_dir, ignore_errors=True) # cleanup
def testToolbarButtonsPref(self):
"""Verify toolbar buttons prefs."""
# Assert defaults first
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
self.SetPrefs(pyauto.kShowHomeButton, True)
self.RestartBrowser(clear_profile=False)
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
def testNetworkPredictionEnabledPref(self):
"""Verify DNS prefetching pref."""
# Assert default
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kNetworkPredictionEnabled))
self.SetPrefs(pyauto.kNetworkPredictionEnabled, False)
self.RestartBrowser(clear_profile=False)
self.assertFalse(self.GetPrefsInfo().Prefs(
pyauto.kNetworkPredictionEnabled))
def testHomepagePrefs(self):
"""Verify homepage prefs."""
# "Use the New Tab page"
self.SetPrefs(pyauto.kHomePageIsNewTabPage, True)
logging.debug('Setting %s to 1' % pyauto.kHomePageIsNewTabPage)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage),
True)
# "Open this page"
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
self.SetPrefs(pyauto.kHomePage, url)
self.SetPrefs(pyauto.kHomePageIsNewTabPage, False)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePage), url)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage))
# TODO(nirnimesh): Actually verify that homepage loads.
# This requires telling pyauto *not* to set about:blank as homepage.
def testGeolocationPref(self):
"""Verify geolocation pref.
Checks for the geolocation infobar.
"""
# GetBrowserInfo() call seems to fail later on in this test. Call it early.
# crbug.com/89000
branding = self.GetBrowserInfo()['properties']['branding']
url = self.GetFileURLForPath(os.path.join( # triggers geolocation
self.DataDir(), 'geolocation', 'geolocation_on_load.html'))
self.assertEqual(3, # default state
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Disable geolocation
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 2)
self.assertEqual(2,
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.ReloadTab()
# Fails on Win7/Vista Chromium bots. crbug.com/89000
if (self.IsWin7() or self.IsWinVista()) and branding == 'Chromium':
return
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
def testUnderTheHoodPref(self):
"""Verify the security preferences for Under the Hood.
The setting is enabled by default."""
pref_list = [pyauto.kNetworkPredictionEnabled, pyauto.kSafeBrowsingEnabled,
pyauto.kAlternateErrorPagesEnabled,
pyauto.kSearchSuggestEnabled, pyauto.kShowOmniboxSearchHint]
for pref in pref_list:
# Verify the default value
self.assertEqual(self.GetPrefsInfo().Prefs(pref), True)
self.SetPrefs(pref, False)
self.RestartBrowser(clear_profile=False)
for pref in pref_list:
self.assertEqual(self.GetPrefsInfo().Prefs(pref), False)
def testJavaScriptEnableDisable(self):
"""Verify enabling disabling javascript prefs work """
self.assertTrue(
self.GetPrefsInfo().Prefs(pyauto.kWebKitJavascriptEnabled))
url = self.GetFileURLForDataPath(
os.path.join('javaScriptTitle.html'))
title1 = 'Title from script javascript enabled'
self.NavigateToURL(url)
self.assertEqual(title1, self.GetActiveTabTitle())
self.SetPrefs(pyauto.kWebKitJavascriptEnabled, False)
title = 'This is html title'
self.NavigateToURL(url)
self.assertEqual(title, self.GetActiveTabTitle())
def testHaveLocalStatePrefs(self):
"""Verify that we have some Local State prefs."""
self.assertTrue(self.GetLocalStatePrefsInfo())
def testAllowSelectedGeoTracking(self):
"""Verify hostname pattern and behavior for allowed tracking."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', infobar_index=0) # Allow tracking.
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Allow the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 1}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testDismissedInfobarSavesNoEntry(self):
"""Verify dismissing infobar does not save an exception entry."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetFileURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertEqual(
{}, self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testGeolocationBlockedWhenTrackingDenied(self):
"""Verify geolocations is blocked when tracking is denied.
The test verifies the blocked hostname pattern entry on the Geolocations
exceptions page.
"""
# Ask for permission when site wants to track.
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('cancel', infobar_index=0) # Deny tracking.
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Block the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 2}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def _CheckForVisibleImage(self, tab_index=0, windex=0):
"""Checks whether or not an image is visible on the webpage.
Args:
tab_index: Tab index. Defaults to 0 (first tab).
windex: Window index. Defaults to 0 (first window).
Returns:
True if image is loaded, otherwise returns False if image is not loaded.
"""
# Checks whether an image is loaded by checking the area (width
# and height) of the image. If the area is non zero then the image is
# visible. If the area is zero then the image is not loaded.
# Chrome zeros the |naturalWidth| and |naturalHeight|.
script = """
for (i=0; i < document.images.length; i++) {
if ((document.images[i].naturalWidth != 0) &&
(document.images[i].naturalHeight != 0)) {
window.domAutomationController.send(true);
}
}
window.domAutomationController.send(false);
"""
return self.ExecuteJavascript(script, windex=windex, tab_index=tab_index)
def testImageContentSettings(self):
"""Verify image content settings show or hide images."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
# Set to block all images from loading.
self.SetPrefs(pyauto.kDefaultContentSettings, {'images': 2})
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testImagesNotBlockedInIncognito(self):
"""Verify images are not blocked in Incognito mode."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self._CheckForVisibleImage(windex=1),
msg='No visible images found in Incognito mode.')
def testBlockImagesForHostname(self):
"""Verify images blocked for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.BLOCK)
# Add an exception BLOCK for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testAllowImagesForHostname(self):
"""Verify images allowed for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.ALLOW)
# Add an exception ALLOW for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
def testProtocolHandlerRegisteredCorrectly(self):
"""Verify sites that ask to be default handlers registers correctly."""
url = self.GetHttpURLForDataPath('settings', 'protocol_handler.html')
self.NavigateToURL(url)
# Returns a dictionary with the custom handler.
asked_handler_dict = self._driver.execute_script(
'return registerCustomHandler()')
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self._driver.find_element_by_id('test_protocol').click()
self.assertTrue(
self._driver.execute_script(
'return doesQueryConformsToProtocol("%s", "%s")'
% (asked_handler_dict['query_key'],
asked_handler_dict['query_value'])),
msg='Protocol did not register correctly.')
if __name__ == '__main__':
pyauto_functional.Main()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from xml.etree import ElementTree as etree
from tempest.common import http
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
CONF = config.CONF
class AccountClient(rest_client.RestClient):
def __init__(self, auth_provider):
super(AccountClient, self).__init__(auth_provider)
self.service = CONF.object_storage.catalog_type
def create_account(self, data=None,
params=None,
metadata={},
remove_metadata={},
metadata_prefix='X-Account-Meta-',
remove_metadata_prefix='X-Remove-Account-Meta-'):
"""Create an account."""
url = ''
if params:
url += '?%s' % urllib.urlencode(params)
headers = {}
for key in metadata:
headers[metadata_prefix + key] = metadata[key]
for key in remove_metadata:
headers[remove_metadata_prefix + key] = remove_metadata[key]
resp, body = self.put(url, data, headers)
return resp, body
def delete_account(self, data=None, params=None):
"""Delete an account."""
url = ''
if params:
if 'bulk-delete' in params:
url += 'bulk-delete&'
url = '?%s%s' % (url, urllib.urlencode(params))
resp, body = self.delete(url, headers={}, body=data)
return resp, body
def list_account_metadata(self):
"""
HEAD on the storage URL
Returns all account metadata headers
"""
resp, body = self.head('')
return resp, body
def create_account_metadata(self, metadata,
metadata_prefix='X-Account-Meta-'):
"""Creates an account metadata entry."""
headers = {}
for key in metadata:
headers[metadata_prefix + key] = metadata[key]
resp, body = self.post('', headers=headers, body=None)
return resp, body
def delete_account_metadata(self, metadata,
metadata_prefix='X-Remove-Account-Meta-'):
"""
Deletes an account metadata entry.
"""
headers = {}
for item in metadata:
headers[metadata_prefix + item] = metadata[item]
resp, body = self.post('', headers=headers, body=None)
return resp, body
def create_and_delete_account_metadata(
self,
create_metadata=None,
delete_metadata=None,
create_metadata_prefix='X-Account-Meta-',
delete_metadata_prefix='X-Remove-Account-Meta-'):
"""
Creates and deletes an account metadata entry.
"""
headers = {}
for key in create_metadata:
headers[create_metadata_prefix + key] = create_metadata[key]
for key in delete_metadata:
headers[delete_metadata_prefix + key] = delete_metadata[key]
resp, body = self.post('', headers=headers, body=None)
return resp, body
def list_account_containers(self, params=None):
"""
GET on the (base) storage URL
Given valid X-Auth-Token, returns a list of all containers for the
account.
Optional Arguments:
limit=[integer value N]
Limits the number of results to at most N values
DEFAULT: 10,000
marker=[string value X]
Given string value X, return object names greater in value
than the specified marker.
DEFAULT: No Marker
format=[string value, either 'json' or 'xml']
Specify either json or xml to return the respective serialized
response.
DEFAULT: Python-List returned in response body
"""
url = '?%s' % urllib.urlencode(params) if params else ''
resp, body = self.get(url, headers={})
if params and params.get('format') == 'json':
body = json.loads(body)
elif params and params.get('format') == 'xml':
body = etree.fromstring(body)
else:
body = body.strip().splitlines()
return resp, body
def list_extensions(self):
self.skip_path()
try:
resp, body = self.get('info')
finally:
self.reset_path()
body = json.loads(body)
return resp, body
class AccountClientCustomizedHeader(rest_client.RestClient):
# TODO(andreaf) This class is now redundant, to be removed in next patch
def __init__(self, auth_provider):
super(AccountClientCustomizedHeader, self).__init__(
auth_provider)
# Overwrites json-specific header encoding in rest_client.RestClient
self.service = CONF.object_storage.catalog_type
self.format = 'json'
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
self.http_obj = http.ClosingHttp()
if headers is None:
headers = {}
elif extra_headers:
try:
headers.update(self.get_headers())
except (ValueError, TypeError):
headers = {}
# Authorize the request
req_url, req_headers, req_body = self.auth_provider.auth_request(
method=method, url=url, headers=headers, body=body,
filters=self.filters
)
# use original body
resp, resp_body = self.http_obj.request(req_url, method,
headers=req_headers,
body=req_body)
self._log_request(method, req_url, resp)
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized()
return resp, resp_body
def list_account_containers(self, params=None, metadata=None):
"""
GET on the (base) storage URL
Given a valid X-Auth-Token, returns a list of all containers for the
account.
Optional Arguments:
limit=[integer value N]
Limits the number of results to at most N values
DEFAULT: 10,000
marker=[string value X]
Given string value X, return object names greater in value
than the specified marker.
DEFAULT: No Marker
format=[string value, either 'json' or 'xml']
Specify either json or xml to return the respective serialized
response.
DEFAULT: Python-List returned in response body
"""
url = '?format=%s' % self.format
if params:
url += '&%s' + urllib.urlencode(params)
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.get(url, headers=headers)
return resp, body
|
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import uuid
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from novaclient.v1_1 import floating_ip_pools
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkClientTestCase(test.APITestCase):
def test_networkclient_no_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.nova.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
def test_networkclient_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.neutron.SecurityGroupManager)
def test_networkclient_neutron_with_nova_security_group(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions().AndReturn({'extensions': []})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
class NetworkApiNovaTestBase(test.APITestCase):
def setUp(self):
super(NetworkApiNovaTestBase, self).setUp()
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
class NetworkApiNovaSecurityGroupTests(NetworkApiNovaTestBase):
def test_server_update_security_groups(self):
all_secgroups = self.security_groups.list()
added_secgroup = all_secgroups[2]
rm_secgroup = all_secgroups[0]
cur_secgroups_raw = [{'id': sg.id, 'name': sg.name,
'rules': []}
for sg in all_secgroups[0:2]]
cur_secgroups_ret = {'security_groups': cur_secgroups_raw}
new_sg_ids = [sg.id for sg in all_secgroups[1:3]]
instance_id = self.servers.first().id
novaclient = self.stub_novaclient()
novaclient.security_groups = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.client = self.mox.CreateMockAnything()
novaclient.security_groups.list().AndReturn(all_secgroups)
url = '/servers/%s/os-security-groups' % instance_id
novaclient.client.get(url).AndReturn((200, cur_secgroups_ret))
novaclient.servers.add_security_group(instance_id, added_secgroup.name)
novaclient.servers.remove_security_group(instance_id, rm_secgroup.name)
self.mox.ReplayAll()
api.network.server_update_security_groups(
self.request, instance_id, new_sg_ids)
class NetworkApiNovaFloatingIpTests(NetworkApiNovaTestBase):
def test_floating_ip_pools_list(self):
pool_names = ['pool1', 'pool2']
pools = [floating_ip_pools.FloatingIPPool(
None, {'name': pool}) for pool in pool_names]
novaclient = self.stub_novaclient()
novaclient.floating_ip_pools = self.mox.CreateMockAnything()
novaclient.floating_ip_pools.list().AndReturn(pools)
self.mox.ReplayAll()
ret = api.network.floating_ip_pools_list(self.request)
self.assertEqual(pool_names, [p.name for p in ret])
def test_floating_ip_list(self):
fips = self.api_floating_ips.list()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(fips)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_list(self.request)
for r, e in zip(ret, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(e, attr), getattr(r, attr))
self.assertEqual(e.instance_id, r.port_id)
exp_instance_type = 'compute' if e.instance_id else None
self.assertEqual(exp_instance_type, r.instance_type)
def test_floating_ip_get(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(fip.id).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip.id)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertEqual(fip.instance_id, ret.port_id)
self.assertEqual(fip.instance_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
def test_floating_ip_allocate(self):
pool_name = 'fip_pool'
fip = [fip for fip in self.api_floating_ips.list()
if not fip.instance_id][0]
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create(pool=pool_name).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_allocate(self.request, pool_name)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertIsNone(ret.port_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_release(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(fip.id)
self.mox.ReplayAll()
api.network.tenant_floating_ip_release(self.request, fip.id)
def test_floating_ip_associate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip)
novaclient.servers.add_floating_ip(server.id, floating_ip.ip) \
.AndReturn(server)
self.mox.ReplayAll()
api.network.floating_ip_associate(self.request,
floating_ip.id,
server.id)
def test_floating_ip_disassociate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip)
novaclient.servers.remove_floating_ip(server.id, floating_ip.ip) \
.AndReturn(server)
self.mox.ReplayAll()
api.network.floating_ip_disassociate(self.request,
floating_ip.id)
def test_floating_ip_target_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list().AndReturn(servers)
self.mox.ReplayAll()
targets = api.network.floating_ip_target_list(self.request)
for target, server in zip(targets, servers):
self.assertEqual(server.id, target.id)
self.assertEqual('%s (%s)' % (server.name, server.id), target.name)
def test_floating_ip_target_get_by_instance(self):
self.mox.ReplayAll()
instance_id = self.servers.first().id
ret = api.network.floating_ip_target_get_by_instance(self.request,
instance_id)
self.assertEqual(instance_id, ret)
class NetworkApiNeutronTestBase(test.APITestCase):
def setUp(self):
super(NetworkApiNeutronTestBase, self).setUp()
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.qclient = self.stub_neutronclient()
class NetworkApiNeutronTests(NetworkApiNeutronTestBase):
def _get_expected_addresses(self, server, no_fip_expected=True):
server_ports = self.ports.filter(device_id=server.id)
addresses = collections.defaultdict(list)
for p in server_ports:
net_name = self.networks.get(id=p['network_id']).name
for ip in p.fixed_ips:
addresses[net_name].append(
{'version': 4,
'addr': ip['ip_address'],
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'fixed'})
if no_fip_expected:
continue
fips = self.q_floating_ips.filter(port_id=p['id'])
if not fips:
continue
# Only one FIP should match.
fip = fips[0]
addresses[net_name].append(
{'version': 4,
'addr': fip.floating_ip_address,
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'floating'})
return addresses
def _check_server_address(self, res_server_data, no_fip_expected=False):
expected_addresses = self._get_expected_addresses(res_server_data,
no_fip_expected)
self.assertEqual(len(expected_addresses),
len(res_server_data.addresses))
for net, addresses in expected_addresses.items():
self.assertIn(net, res_server_data.addresses)
self.assertEqual(addresses, res_server_data.addresses[net])
def _test_servers_update_addresses(self, router_enabled=True):
tenant_id = self.request.user.tenant_id
servers = copy.deepcopy(self.servers.list())
server_ids = [server.id for server in servers]
server_ports = [p for p in self.api_ports.list()
if p['device_id'] in server_ids]
server_port_ids = [p['id'] for p in server_ports]
if router_enabled:
assoc_fips = [fip for fip in self.api_q_floating_ips.list()
if fip['port_id'] in server_port_ids]
server_network_ids = [p['network_id'] for p in server_ports]
server_networks = [net for net in self.api_networks.list()
if net['id'] in server_network_ids]
self.qclient.list_ports(device_id=server_ids) \
.AndReturn({'ports': server_ports})
if router_enabled:
self.qclient.list_floatingips(tenant_id=tenant_id,
port_id=server_port_ids) \
.AndReturn({'floatingips': assoc_fips})
self.qclient.list_ports(tenant_id=tenant_id) \
.AndReturn({'ports': self.api_ports.list()})
self.qclient.list_networks(id=set(server_network_ids)) \
.AndReturn({'networks': server_networks})
self.qclient.list_subnets() \
.AndReturn({'subnets': self.api_subnets.list()})
self.mox.ReplayAll()
api.network.servers_update_addresses(self.request, servers)
self.assertEqual(self.servers.count(), len(servers))
self.assertEqual([server.id for server in self.servers.list()],
[server.id for server in servers])
no_fip_expected = not router_enabled
# server[0] has one fixed IP and one floating IP
# if router ext isenabled.
self._check_server_address(servers[0], no_fip_expected)
# The expected is also calculated, we examine the result manually once.
addrs = servers[0].addresses['net1']
if router_enabled:
self.assertEqual(2, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('floating', addrs[1]['OS-EXT-IPS:type'])
else:
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[1] has one fixed IP.
self._check_server_address(servers[1], no_fip_expected)
# manual check.
addrs = servers[1].addresses['net2']
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[2] has no corresponding ports in neutron_data,
# so it should be an empty dict.
self.assertFalse(servers[2].addresses)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_servers_update_addresses(self):
self._test_servers_update_addresses()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_servers_update_addresses_router_disabled(self):
self._test_servers_update_addresses(router_enabled=False)
class NetworkApiNeutronSecurityGroupTests(NetworkApiNeutronTestBase):
def setUp(self):
super(NetworkApiNeutronSecurityGroupTests, self).setUp()
self.qclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
self.sg_dict = dict([(sg['id'], sg['name']) for sg
in self.api_q_secgroups.list()])
def _cmp_sg_rule(self, exprule, retrule):
self.assertEqual(exprule['id'], retrule.id)
self.assertEqual(exprule['security_group_id'],
retrule.parent_group_id)
self.assertEqual(exprule['direction'],
retrule.direction)
self.assertEqual(exprule['ethertype'],
retrule.ethertype)
self.assertEqual(exprule['port_range_min'],
retrule.from_port)
self.assertEqual(exprule['port_range_max'],
retrule.to_port,)
if (exprule['remote_ip_prefix'] is None and
exprule['remote_group_id'] is None):
expcidr = ('::/0' if exprule['ethertype'] == 'IPv6'
else '0.0.0.0/0')
else:
expcidr = exprule['remote_ip_prefix']
self.assertEqual(expcidr, retrule.ip_range.get('cidr'))
self.assertEqual(self.sg_dict.get(exprule['remote_group_id']),
retrule.group.get('name'))
def _cmp_sg(self, exp_sg, ret_sg):
self.assertEqual(exp_sg['id'], ret_sg.id)
self.assertEqual(exp_sg['name'], ret_sg.name)
exp_rules = exp_sg['security_group_rules']
self.assertEqual(len(exp_rules), len(ret_sg.rules))
for (exprule, retrule) in itertools.izip(exp_rules, ret_sg.rules):
self._cmp_sg_rule(exprule, retrule)
def test_security_group_list(self):
sgs = self.api_q_secgroups.list()
tenant_id = self.request.user.tenant_id
# use deepcopy to ensure self.api_q_secgroups is not modified.
self.qclient.list_security_groups(tenant_id=tenant_id) \
.AndReturn({'security_groups': copy.deepcopy(sgs)})
self.mox.ReplayAll()
rets = api.network.security_group_list(self.request)
self.assertEqual(len(sgs), len(rets))
for (exp, ret) in itertools.izip(sgs, rets):
self._cmp_sg(exp, ret)
def test_security_group_get(self):
secgroup = self.api_q_secgroups.first()
sg_ids = set([secgroup['id']] +
[rule['remote_group_id'] for rule
in secgroup['security_group_rules']
if rule['remote_group_id']])
related_sgs = [sg for sg in self.api_q_secgroups.list()
if sg['id'] in sg_ids]
# use deepcopy to ensure self.api_q_secgroups is not modified.
self.qclient.show_security_group(secgroup['id']) \
.AndReturn({'security_group': copy.deepcopy(secgroup)})
self.qclient.list_security_groups(id=sg_ids, fields=['id', 'name']) \
.AndReturn({'security_groups': related_sgs})
self.mox.ReplayAll()
ret = api.network.security_group_get(self.request, secgroup['id'])
self._cmp_sg(secgroup, ret)
def test_security_group_create(self):
secgroup = self.api_q_secgroups.list()[1]
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description'],
'tenant_id': self.request.user.project_id}}
self.qclient.create_security_group(body) \
.AndReturn({'security_group': copy.deepcopy(secgroup)})
self.mox.ReplayAll()
ret = api.network.security_group_create(self.request, secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
def test_security_group_update(self):
secgroup = self.api_q_secgroups.list()[1]
secgroup = copy.deepcopy(secgroup)
secgroup['name'] = 'newname'
secgroup['description'] = 'new description'
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description']}}
self.qclient.update_security_group(secgroup['id'], body) \
.AndReturn({'security_group': secgroup})
self.mox.ReplayAll()
ret = api.network.security_group_update(self.request,
secgroup['id'],
secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
def test_security_group_delete(self):
secgroup = self.api_q_secgroups.first()
self.qclient.delete_security_group(secgroup['id'])
self.mox.ReplayAll()
api.network.security_group_delete(self.request, secgroup['id'])
def test_security_group_rule_create(self):
sg_rule = [r for r in self.api_q_secgroup_rules.list()
if r['protocol'] == 'tcp' and r['remote_ip_prefix']][0]
sg_id = sg_rule['security_group_id']
secgroup = [sg for sg in self.api_q_secgroups.list()
if sg['id'] == sg_id][0]
post_rule = copy.deepcopy(sg_rule)
del post_rule['id']
del post_rule['tenant_id']
post_body = {'security_group_rule': post_rule}
self.qclient.create_security_group_rule(post_body) \
.AndReturn({'security_group_rule': copy.deepcopy(sg_rule)})
self.qclient.list_security_groups(id=set([sg_id]),
fields=['id', 'name']) \
.AndReturn({'security_groups': [copy.deepcopy(secgroup)]})
self.mox.ReplayAll()
ret = api.network.security_group_rule_create(
self.request, sg_rule['security_group_id'],
sg_rule['direction'], sg_rule['ethertype'], sg_rule['protocol'],
sg_rule['port_range_min'], sg_rule['port_range_max'],
sg_rule['remote_ip_prefix'], sg_rule['remote_group_id'])
self._cmp_sg_rule(sg_rule, ret)
def test_security_group_rule_delete(self):
sg_rule = self.api_q_secgroup_rules.first()
self.qclient.delete_security_group_rule(sg_rule['id'])
self.mox.ReplayAll()
api.network.security_group_rule_delete(self.request, sg_rule['id'])
def _get_instance(self, cur_sg_ids):
instance_port = [p for p in self.api_ports.list()
if p['device_owner'].startswith('compute:')][0]
instance_id = instance_port['device_id']
# Emulate an intance with two ports
instance_ports = []
for _i in range(2):
p = copy.deepcopy(instance_port)
p['id'] = str(uuid.uuid4())
p['security_groups'] = cur_sg_ids
instance_ports.append(p)
return (instance_id, instance_ports)
def test_server_security_groups(self):
cur_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports(device_id=instance_id) \
.AndReturn({'ports': instance_ports})
secgroups = copy.deepcopy(self.api_q_secgroups.list())
self.qclient.list_security_groups(id=set(cur_sg_ids)) \
.AndReturn({'security_groups': secgroups})
self.mox.ReplayAll()
api.network.server_security_groups(self.request, instance_id)
def test_server_update_security_groups(self):
cur_sg_ids = [self.api_q_secgroups.first()['id']]
new_sg_ids = [sg['id'] for sg in self.api_q_secgroups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports(device_id=instance_id) \
.AndReturn({'ports': instance_ports})
for p in instance_ports:
body = {'port': {'security_groups': new_sg_ids}}
self.qclient.update_port(p['id'], body=body).AndReturn({'port': p})
self.mox.ReplayAll()
api.network.server_update_security_groups(
self.request, instance_id, new_sg_ids)
def test_security_group_backend(self):
self.mox.ReplayAll()
self.assertEqual('neutron',
api.network.security_group_backend(self.request))
class NetworkApiNeutronFloatingIpTests(NetworkApiNeutronTestBase):
def setUp(self):
super(NetworkApiNeutronFloatingIpTests, self).setUp()
self.qclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_floating_ip_supported(self):
self.mox.ReplayAll()
self.assertTrue(api.network.floating_ip_supported(self.request))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_floating_ip_supported_false(self):
self.mox.ReplayAll()
self.assertFalse(api.network.floating_ip_supported(self.request))
def test_floating_ip_pools_list(self):
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks(**search_opts) \
.AndReturn({'networks': ext_nets})
self.mox.ReplayAll()
rets = api.network.floating_ip_pools_list(self.request)
for attr in ['id', 'name']:
self.assertEqual([p[attr] for p in ext_nets],
[getattr(p, attr) for p in rets])
def test_floating_ip_list(self):
fips = self.api_q_floating_ips.list()
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_floatingips(**filters) \
.AndReturn({'floatingips': fips})
self.qclient.list_ports(**filters) \
.AndReturn({'ports': self.api_ports.list()})
self.mox.ReplayAll()
rets = api.network.tenant_floating_ip_list(self.request)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(exp[attr], getattr(ret, attr))
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_list_all_tenants(self):
fips = self.api_q_floating_ips.list()
self.qclient.list_floatingips().AndReturn({'floatingips': fips})
self.qclient.list_ports().AndReturn({'ports': self.api_ports.list()})
self.mox.ReplayAll()
# all_tenants option for floating IP list is api.neutron specific,
# so we call api.neutron.FloatingIpManager directly and
# actually we don't need NetworkClient in this test.
# setUp() in the base class sets up mox to expect
# api.base.is_service_enabled() is called and we need to call
# NetworkClient even if we don't use it so that mox.VerifyAll
# doesn't complain it.
api.network.NetworkClient(self.request)
fip_manager = api.neutron.FloatingIpManager(self.request)
rets = fip_manager.list(all_tenants=True)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(getattr(ret, attr), exp[attr])
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def _test_floating_ip_get_associated(self, assoc_port, exp_instance_type):
fip = self.api_q_floating_ips.list()[1]
self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip})
self.qclient.show_port(assoc_port['id']) \
.AndReturn({'port': assoc_port})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertEqual(assoc_port['device_id'], ret.instance_id)
self.assertEqual(exp_instance_type, ret.instance_type)
def test_floating_ip_get_associated(self):
assoc_port = self.api_ports.list()[1]
self._test_floating_ip_get_associated(assoc_port, 'compute')
def test_floating_ip_get_associated_with_loadbalancer_vip(self):
assoc_port = copy.deepcopy(self.api_ports.list()[1])
assoc_port['device_owner'] = 'neutron:LOADBALANCER'
assoc_port['device_id'] = str(uuid.uuid4())
assoc_port['name'] = 'vip-' + str(uuid.uuid4())
self._test_floating_ip_get_associated(assoc_port, 'loadbalancer')
def test_floating_ip_get_unassociated(self):
fip = self.api_q_floating_ips.list()[0]
self.qclient.show_floatingip(fip['id']).AndReturn({'floatingip': fip})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_allocate(self):
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
ext_net = ext_nets[0]
fip = self.api_q_floating_ips.first()
self.qclient.create_floatingip(
{'floatingip': {'floating_network_id': ext_net['id'],
'tenant_id': self.request.user.project_id}}) \
.AndReturn({'floatingip': fip})
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_allocate(self.request,
ext_net['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_release(self):
fip = self.api_q_floating_ips.first()
self.qclient.delete_floatingip(fip['id'])
self.mox.ReplayAll()
api.network.tenant_floating_ip_release(self.request, fip['id'])
def test_floating_ip_associate(self):
fip = self.api_q_floating_ips.list()[1]
assoc_port = self.api_ports.list()[1]
ip_address = assoc_port['fixed_ips'][0]['ip_address']
target_id = '%s_%s' % (assoc_port['id'], ip_address)
params = {'port_id': assoc_port['id'],
'fixed_ip_address': ip_address}
self.qclient.update_floatingip(fip['id'],
{'floatingip': params})
self.mox.ReplayAll()
api.network.floating_ip_associate(self.request, fip['id'], target_id)
def test_floating_ip_disassociate(self):
fip = self.api_q_floating_ips.list()[1]
self.qclient.update_floatingip(fip['id'],
{'floatingip': {'port_id': None}})
self.mox.ReplayAll()
api.network.floating_ip_disassociate(self.request, fip['id'])
def _get_target_id(self, port):
param = {'id': port['id'],
'addr': port['fixed_ips'][0]['ip_address']}
return '%(id)s_%(addr)s' % param
def _get_target_name(self, port):
param = {'svrid': port['device_id'],
'addr': port['fixed_ips'][0]['ip_address']}
return 'server_%(svrid)s: %(addr)s' % param
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_lb': True})
def test_floating_ip_target_list(self):
ports = self.api_ports.list()
# Port on the first subnet is connected to a router
# attached to external network in neutron_data.
subnet_id = self.subnets.first().id
target_ports = [(self._get_target_id(p),
self._get_target_name(p)) for p in ports
if (not p['device_owner'].startswith('network:') and
subnet_id in [ip['subnet_id']
for ip in p['fixed_ips']])]
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_ports(**filters).AndReturn({'ports': ports})
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
search_opts = {'project_id': self.request.user.tenant_id}
novaclient.servers.list(True, search_opts).AndReturn(servers)
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks(**search_opts) \
.AndReturn({'networks': ext_nets})
self.qclient.list_routers().AndReturn({'routers':
self.api_routers.list()})
self.qclient.list_vips().AndReturn({'vips': self.vips.list()})
self.mox.ReplayAll()
rets = api.network.floating_ip_target_list(self.request)
self.assertEqual(len(target_ports), len(rets))
for ret, exp in zip(rets, target_ports):
self.assertEqual(exp[0], ret.id)
self.assertEqual(exp[1], ret.name)
def test_floating_ip_target_get_by_instance(self):
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == '1']
search_opts = {'device_id': '1'}
self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates})
self.mox.ReplayAll()
ret = api.network.floating_ip_target_get_by_instance(self.request, '1')
self.assertEqual(self._get_target_id(candidates[0]), ret)
def test_target_floating_ip_port_by_instance(self):
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == '1']
search_opts = {'device_id': '1'}
self.qclient.list_ports(**search_opts).AndReturn({'ports': candidates})
self.mox.ReplayAll()
ret = api.network.floating_ip_target_list_by_instance(self.request,
'1')
self.assertEqual(self._get_target_id(candidates[0]), ret[0])
self.assertEqual(len(candidates), len(ret))
def test_floating_ip_target_get_by_instance_with_preloaded_target(self):
target_list = [{'name': 'name11', 'id': 'id11', 'instance_id': 'vm1'},
{'name': 'name21', 'id': 'id21', 'instance_id': 'vm2'},
{'name': 'name22', 'id': 'id22', 'instance_id': 'vm2'}]
self.mox.ReplayAll()
ret = api.network.floating_ip_target_get_by_instance(
self.request, 'vm2', target_list)
self.assertEqual('id21', ret)
def test_target_floating_ip_port_by_instance_with_preloaded_target(self):
target_list = [{'name': 'name11', 'id': 'id11', 'instance_id': 'vm1'},
{'name': 'name21', 'id': 'id21', 'instance_id': 'vm2'},
{'name': 'name22', 'id': 'id22', 'instance_id': 'vm2'}]
self.mox.ReplayAll()
ret = api.network.floating_ip_target_list_by_instance(
self.request, 'vm2', target_list)
self.assertEqual(['id21', 'id22'], ret)
|
|
# Most of this code was selectively copied from developmentseed/landsat-util
# https://github.com/developmentseed/landsat-util
# Minor modifications by Jwely@Github
import json
import time
import requests
import geocoder
import re
API_URL = 'https://api.developmentseed.org/landsat'
# Geocoding confidence scores,
# from https://github.com/DenisCarriere/geocoder/blob/master/docs/features/Confidence%20Score.md
geocode_confidences = {
10: 0.25,
9: 0.5,
8: 1.,
7: 5.,
6: 7.5,
5: 10.,
4: 15.,
3: 20.,
2: 25.,
1: 99999.,
# 0: unable to locate at all
}
def geocode(address, required_precision_km=1.):
""" Identifies the coordinates of an address
:param address:
the address to be geocoded
:type value:
String
:param required_precision_km:
the maximum permissible geographic uncertainty for the geocoding
:type required_precision_km:
float
:returns:
dict
"""
geocoded = geocoder.google(address)
precision_km = geocode_confidences[geocoded.confidence]
if precision_km <= required_precision_km:
(lon, lat) = geocoded.geometry['coordinates']
return {'lat': lat, 'lon': lon}
else:
raise ValueError("Address could not be precisely located")
def three_digit(number):
""" Add 0s to inputs that their length is less than 3.
:param number:
The number to convert
:type number:
int
:returns:
String
"""
number = str(number)
if len(number) == 1:
return u'00%s' % number
elif len(number) == 2:
return u'0%s' % number
else:
return number
def create_paired_list(value):
""" Create a list of paired items from a string.
:param value:
the format must be 003,003,004,004 (commas with no space)
:type value:
String
:returns:
List
:example:
create_paired_list('003,003,004,004')
[['003','003'], ['004', '004']]
"""
if isinstance(value, list):
value = ",".join(value)
array = re.split('\D+', value)
# Make sure the elements in the list are even and pairable
if len(array) % 2 == 0:
new_array = [list(array[i:i + 2]) for i in range(0, len(array), 2)]
return new_array
else:
raise ValueError('The string should include pairs and be formated. '
'The format must be 003,003,004,004 (commas with '
'no space)')
class Search(object):
""" The search class """
def __init__(self):
self.api_url = API_URL
def search(self, paths_rows=None, lat=None, lon=None, address=None, start_date=None, end_date=None, cloud_min=None,
cloud_max=None, limit=1, geojson=False):
"""
The main method of Search class. It searches Development Seed's Landsat API.
:param paths_rows:
A string in this format: "003,003,004,004". Must be in pairs and separated by comma.
:type paths_rows:
String
:param lat:
The latitude
:type lat:
String, float, integer
:param lon:
The The longitude
:type lon:
String, float, integer
:param address:
The address
:type address:
String
:param start_date:
Date string. format: YYYY-MM-DD
:type start_date:
String
:param end_date:
date string. format: YYYY-MM-DD
:type end_date:
String
:param cloud_min:
float specifying the minimum percentage. e.g. 4.3
:type cloud_min:
float
:param cloud_max:
float specifying the maximum percentage. e.g. 78.9
:type cloud_max:
float
:param limit:
integer specifying the maximum results return.
:type limit:
integer
:param geojson:
boolean specifying whether to return a geojson object
:type geojson:
boolean
:returns:
dict
:example:
s = Search()
s.search('003,003', '2014-01-01', '2014-06-01')
{
'status': u'SUCCESS',
'total_returned': 1,
'total': 1,
'limit': 1
'results': [
{
'sat_type': u'L8',
'sceneID': u'LC80030032014142LGN00',
'date': u'2014-05-22',
'path': u'003',
'thumbnail': u'http://....../landsat_8/2014/003/003/LC80030032014142LGN00.jpg',
'cloud': 33.36,
'row': u'003
}
]
}
"""
search_string = self.query_builder(paths_rows, lat, lon, address, start_date, end_date, cloud_min, cloud_max)
# Have to manually build the URI to bypass requests URI encoding
# The api server doesn't accept encoded URIs
r = requests.get('%s?search=%s&limit=%s' % (self.api_url, search_string, limit))
r_dict = json.loads(r.text)
result = {}
if 'error' in r_dict:
result['status'] = u'error'
result['code'] = r_dict['error']['code']
result['message'] = r_dict['error']['message']
elif 'meta' in r_dict:
if geojson:
result = {
'type': 'FeatureCollection',
'features': []
}
for r in r_dict['results']:
feature = {
'type': 'Feature',
'properties': {
'sceneID': r['sceneID'],
'row': three_digit(r['row']),
'path': three_digit(r['path']),
'thumbnail': r['browseURL'],
'date': r['acquisitionDate'],
'cloud': r['cloudCoverFull']
},
'geometry': {
'type': 'Polygon',
'coordinates': [
[
[r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']],
[r['lowerLeftCornerLongitude'], r['lowerLeftCornerLatitude']],
[r['lowerRightCornerLongitude'], r['lowerRightCornerLatitude']],
[r['upperRightCornerLongitude'], r['upperRightCornerLatitude']],
[r['upperLeftCornerLongitude'], r['upperLeftCornerLatitude']]
]
]
}
}
result['features'].append(feature)
else:
result['status'] = u'SUCCESS'
result['total'] = r_dict['meta']['results']['total']
result['limit'] = r_dict['meta']['results']['limit']
result['total_returned'] = len(r_dict['results'])
result['results'] = [{'sceneID': i['sceneID'],
'sat_type': u'L8',
'path': three_digit(i['path']),
'row': three_digit(i['row']),
'thumbnail': i['browseURL'],
'date': i['acquisitionDate'],
'cloud': i['cloudCoverFull']}
for i in r_dict['results']]
return result
def query_builder(self, paths_rows=None, lat=None, lon=None, address=None, start_date=None, end_date=None,
cloud_min=None, cloud_max=None):
""" Builds the proper search syntax (query) for Landsat API.
:param paths_rows:
A string in this format: "003,003,004,004". Must be in pairs and separated by comma.
:type paths_rows:
String
:param lat:
The latitude
:type lat:
String, float, integer
:param lon:
The The longitude
:type lon:
String, float, integer
:param address:
The address
:type address:
String
:param start_date:
Date string. format: YYYY-MM-DD
:type start_date:
String
:param end_date:
date string. format: YYYY-MM-DD
:type end_date:
String
:param cloud_min:
float specifying the minimum percentage. e.g. 4.3
:type cloud_min:
float
:param cloud_max:
float specifying the maximum percentage. e.g. 78.9
:type cloud_max:
float
:returns:
String
"""
query = []
or_string = ''
and_string = ''
search_string = ''
if paths_rows:
# Coverting rows and paths to paired list
new_array = create_paired_list(paths_rows)
paths_rows = ['(%s)' % self.row_path_builder(i[0], i[1]) for i in new_array]
or_string = '+OR+'.join(map(str, paths_rows))
if start_date and end_date:
query.append(self.date_range_builder(start_date, end_date))
elif start_date:
query.append(self.date_range_builder(start_date, '2100-01-01'))
elif end_date:
query.append(self.date_range_builder('2009-01-01', end_date))
if cloud_min and cloud_max:
query.append(self.cloud_cover_prct_range_builder(cloud_min, cloud_max))
elif cloud_min:
query.append(self.cloud_cover_prct_range_builder(cloud_min, '100'))
elif cloud_max:
query.append(self.cloud_cover_prct_range_builder('-1', cloud_max))
if address:
query.append(self.address_builder(address))
elif (lat is not None) and (lon is not None):
query.append(self.lat_lon_builder(lat, lon))
if query:
and_string = '+AND+'.join(map(str, query))
if and_string and or_string:
search_string = and_string + '+AND+(' + or_string + ')'
else:
search_string = or_string + and_string
return search_string
@staticmethod
def row_path_builder(path='', row=''):
"""
Builds row and path query.
:param path:
Landsat path. Must be three digits
:type path:
String
:param row:
Landsat row. Must be three digits
:type row:
String
:returns:
String
"""
return 'path:%s+AND+row:%s' % (path, row)
@staticmethod
def date_range_builder(start='2013-02-11', end=None):
"""
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
"""
if not end:
end = time.strftime('%Y-%m-%d')
return 'acquisitionDate:[%s+TO+%s]' % (start, end)
@staticmethod
def cloud_cover_prct_range_builder(min=0, max=100):
"""
Builds cloud cover percentage range query.
:param min:
float specifying the minimum percentage. Default is 0
:type min:
float
:param max:
float specifying the maximum percentage. Default is 100
:type max:
float
:returns:
String
"""
return 'cloudCoverFull:[%s+TO+%s]' % (min, max)
def address_builder(self, address):
""" Builds lat and lon query from a geocoded address.
:param address:
The address
:type address:
String
:returns:
String
"""
geocoded = geocode(address)
return self.lat_lon_builder(**geocoded)
@staticmethod
def lat_lon_builder(lat=0, lon=0):
""" Builds lat and lon query.
:param lat:
The latitude. Default is 0
:type lat:
float
:param lon:
The The longitude. Default is 0
:type lon:
float
:returns:
String
"""
return ('upperLeftCornerLatitude:[%s+TO+1000]+AND+lowerRightCornerLatitude:[-1000+TO+%s]'
'+AND+lowerLeftCornerLongitude:[-1000+TO+%s]+AND+upperRightCornerLongitude:[%s+TO+1000]'
% (lat, lat, lon, lon))
if __name__ == "__main__":
s = Search()
tiles = s.search(paths_rows="015,033", start_date="2016-01-01", end_date="2016-01-31", limit=1000)
for tile in tiles['results']:
print(tile)
|
|
import json
from rest_framework import status
from rest_framework.test import APITestCase
class SiteTest(APITestCase):
fixtures = [
'dcim',
'ipam',
'extras',
]
standard_fields = [
'id',
'name',
'slug',
'tenant',
'facility',
'asn',
'physical_address',
'shipping_address',
'comments',
'count_prefixes',
'count_vlans',
'count_racks',
'count_devices',
'count_circuits'
]
nested_fields = [
'id',
'name',
'slug'
]
rack_fields = [
'id',
'name',
'facility_id',
'display_name',
'site',
'group',
'tenant',
'role',
'type',
'width',
'u_height',
'comments'
]
graph_fields = [
'name',
'embed_url',
'embed_link',
]
def test_get_list(self, endpoint='/api/dcim/sites/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
def test_get_detail(self, endpoint='/api/dcim/sites/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
def test_get_site_list_rack(self, endpoint='/api/dcim/sites/1/racks/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in json.loads(response.content):
self.assertEqual(
sorted(i.keys()),
sorted(self.rack_fields),
)
# Check Nested Serializer.
self.assertEqual(
sorted(i.get('site').keys()),
sorted(self.nested_fields),
)
def test_get_site_list_graphs(self, endpoint='/api/dcim/sites/1/graphs/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in json.loads(response.content):
self.assertEqual(
sorted(i.keys()),
sorted(self.graph_fields),
)
class RackTest(APITestCase):
fixtures = [
'dcim',
'ipam'
]
nested_fields = [
'id',
'name',
'facility_id',
'display_name'
]
standard_fields = [
'id',
'name',
'facility_id',
'display_name',
'site',
'group',
'tenant',
'role',
'type',
'width',
'u_height',
'comments'
]
detail_fields = [
'id',
'name',
'facility_id',
'display_name',
'site',
'group',
'tenant',
'role',
'type',
'width',
'u_height',
'comments',
'front_units',
'rear_units'
]
def test_get_list(self, endpoint='/api/dcim/racks/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(i.get('site').keys()),
sorted(SiteTest.nested_fields),
)
def test_get_detail(self, endpoint='/api/dcim/racks/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.detail_fields),
)
self.assertEqual(
sorted(content.get('site').keys()),
sorted(SiteTest.nested_fields),
)
class ManufacturersTest(APITestCase):
fixtures = [
'dcim',
'ipam'
]
standard_fields = [
'id',
'name',
'slug',
]
nested_fields = standard_fields
def test_get_list(self, endpoint='/api/dcim/manufacturers/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
def test_get_detail(self, endpoint='/api/dcim/manufacturers/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
class DeviceTypeTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = [
'id',
'manufacturer',
'model',
'slug',
'part_number',
'u_height',
'is_full_depth',
'is_console_server',
'is_pdu',
'is_network_device',
]
nested_fields = [
'id',
'manufacturer',
'model',
'slug'
]
def test_get_list(self, endpoint='/api/dcim/device-types/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
def test_detail_list(self, endpoint='/api/dcim/device-types/1/'):
# TODO: details returns list view.
# response = self.client.get(endpoint)
# content = json.loads(response.content)
# self.assertEqual(response.status_code, status.HTTP_200_OK)
# self.assertEqual(
# sorted(content.keys()),
# sorted(self.standard_fields),
# )
# self.assertEqual(
# sorted(content.get('manufacturer').keys()),
# sorted(ManufacturersTest.nested_fields),
# )
pass
class DeviceRolesTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'name', 'slug', 'color']
nested_fields = ['id', 'name', 'slug']
def test_get_list(self, endpoint='/api/dcim/device-roles/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
def test_get_detail(self, endpoint='/api/dcim/device-roles/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
class PlatformsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'name', 'slug', 'rpc_client']
nested_fields = ['id', 'name', 'slug']
def test_get_list(self, endpoint='/api/dcim/platforms/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
def test_get_detail(self, endpoint='/api/dcim/platforms/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
class DeviceTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = [
'id',
'name',
'display_name',
'device_type',
'device_role',
'tenant',
'platform',
'serial',
'asset_tag',
'rack',
'position',
'face',
'parent_device',
'status',
'primary_ip',
'primary_ip4',
'primary_ip6',
'comments',
]
nested_fields = ['id', 'name', 'display_name']
def test_get_list(self, endpoint='/api/dcim/devices/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for device in content:
self.assertEqual(
sorted(device.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(device.get('device_type')),
sorted(DeviceTypeTest.nested_fields),
)
self.assertEqual(
sorted(device.get('device_role')),
sorted(DeviceRolesTest.nested_fields),
)
if device.get('platform'):
self.assertEqual(
sorted(device.get('platform')),
sorted(PlatformsTest.nested_fields),
)
self.assertEqual(
sorted(device.get('rack')),
sorted(RackTest.nested_fields),
)
def test_get_list_flat(self, endpoint='/api/dcim/devices/?format=json_flat'):
flat_fields = [
'asset_tag',
'comments',
'device_role_id',
'device_role_name',
'device_role_slug',
'device_type_id',
'device_type_manufacturer_id',
'device_type_manufacturer_name',
'device_type_manufacturer_slug',
'device_type_model',
'device_type_slug',
'display_name',
'face',
'id',
'name',
'parent_device',
'platform_id',
'platform_name',
'platform_slug',
'position',
'primary_ip_address',
'primary_ip_family',
'primary_ip_id',
'primary_ip4_address',
'primary_ip4_family',
'primary_ip4_id',
'primary_ip6',
'rack_display_name',
'rack_facility_id',
'rack_id',
'rack_name',
'serial',
'status',
'tenant',
]
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
device = content[0]
self.assertEqual(
sorted(device.keys()),
sorted(flat_fields),
)
def test_get_detail(self, endpoint='/api/dcim/devices/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
class ConsoleServerPortsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'device', 'name', 'connected_console']
nested_fields = ['id', 'device', 'name']
def test_get_list(self, endpoint='/api/dcim/devices/9/console-server-ports/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for console_port in content:
self.assertEqual(
sorted(console_port.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(console_port.get('device')),
sorted(DeviceTest.nested_fields),
)
class ConsolePortsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'device', 'name', 'cs_port', 'connection_status']
nested_fields = ['id', 'device', 'name']
def test_get_list(self, endpoint='/api/dcim/devices/1/console-ports/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for console_port in content:
self.assertEqual(
sorted(console_port.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(console_port.get('device')),
sorted(DeviceTest.nested_fields),
)
self.assertEqual(
sorted(console_port.get('cs_port')),
sorted(ConsoleServerPortsTest.nested_fields),
)
def test_get_detail(self, endpoint='/api/dcim/console-ports/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(content.get('device')),
sorted(DeviceTest.nested_fields),
)
class PowerPortsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'device', 'name', 'power_outlet', 'connection_status']
nested_fields = ['id', 'device', 'name']
def test_get_list(self, endpoint='/api/dcim/devices/1/power-ports/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(i.get('device')),
sorted(DeviceTest.nested_fields),
)
def test_get_detail(self, endpoint='/api/dcim/power-ports/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(content.get('device')),
sorted(DeviceTest.nested_fields),
)
class PowerOutletsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = ['id', 'device', 'name', 'connected_port']
nested_fields = ['id', 'device', 'name']
def test_get_list(self, endpoint='/api/dcim/devices/11/power-outlets/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(i.get('device')),
sorted(DeviceTest.nested_fields),
)
class InterfaceTest(APITestCase):
fixtures = ['dcim', 'ipam', 'extras']
standard_fields = [
'id',
'device',
'name',
'form_factor',
'mac_address',
'mgmt_only',
'description',
'is_connected'
]
nested_fields = ['id', 'device', 'name']
detail_fields = [
'id',
'device',
'name',
'form_factor',
'mac_address',
'mgmt_only',
'description',
'is_connected',
'connected_interface'
]
connection_fields = [
'id',
'interface_a',
'interface_b',
'connection_status',
]
def test_get_list(self, endpoint='/api/dcim/devices/1/interfaces/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(self.standard_fields),
)
self.assertEqual(
sorted(i.get('device')),
sorted(DeviceTest.nested_fields),
)
def test_get_detail(self, endpoint='/api/dcim/interfaces/1/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.detail_fields),
)
self.assertEqual(
sorted(content.get('device')),
sorted(DeviceTest.nested_fields),
)
def test_get_graph_list(self, endpoint='/api/dcim/interfaces/1/graphs/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for i in content:
self.assertEqual(
sorted(i.keys()),
sorted(SiteTest.graph_fields),
)
def test_get_interface_connections(self, endpoint='/api/dcim/interface-connections/4/'):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.connection_fields),
)
class RelatedConnectionsTest(APITestCase):
fixtures = ['dcim', 'ipam']
standard_fields = [
'device',
'console-ports',
'power-ports',
'interfaces',
]
def test_get_list(self, endpoint=(
'/api/dcim/related-connections/'
'?peer-device=test1-edge1&peer-interface=xe-0/0/3')):
response = self.client.get(endpoint)
content = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
sorted(content.keys()),
sorted(self.standard_fields),
)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
from collections import OrderedDict
from .Map import Map
import acq4.util.DatabaseGui as DatabaseGui
from . import MapCtrlTemplate
from acq4.Manager import logMsg, logExc
import acq4.pyqtgraph as pg
import os
class DBCtrl(Qt.QWidget):
"""GUI for reading and writing to the database."""
def __init__(self, host, identity):
Qt.QWidget.__init__(self)
self.host = host ## host is the parent Photostim object
self.dbIdentity = identity
## DB tables we will be using {owner: defaultTableName}
tables = OrderedDict([
(self.dbIdentity+'.maps', 'Photostim_maps'),
(self.dbIdentity+'.sites', 'Photostim_sites'),
(self.dbIdentity+'.events', 'Photostim_events')
])
self.maps = []
self.layout = Qt.QVBoxLayout()
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
self.dbgui = DatabaseGui.DatabaseGui(dm=host.dataManager(), tables=tables)
self.layout.addWidget(self.dbgui)
for name in ['getTableName', 'getDb']:
setattr(self, name, getattr(self.dbgui, name))
#self.scanTree = TreeWidget.TreeWidget()
#self.layout.addWidget(self.scanTree)
self.ui = MapCtrlTemplate.Ui_Form()
self.mapWidget = Qt.QWidget()
self.ui.setupUi(self.mapWidget)
self.layout.addWidget(self.mapWidget)
self.ui.scanTree.setAcceptDrops(False)
self.ui.scanTree.setDragEnabled(False)
labels = list(Map.mapFields.keys())[2:]
self.ui.mapTable.setHeaderLabels(labels)
self.ui.mapTable.itemChanged.connect(self.mapItemChanged)
self.ui.newMapBtn.clicked.connect(self.newMapClicked)
self.ui.loadMapBtn.clicked.connect(self.loadMapClicked)
self.ui.delMapBtn.clicked.connect(self.delMapClicked)
self.ui.addScanBtn.clicked.connect(self.addScanClicked)
self.ui.removeScanBtn.clicked.connect(self.removeScanClicked)
self.ui.clearDBSpotBtn.clicked.connect(self.clearDBSpot)
self.ui.storeDBSpotBtn.clicked.connect(self.storeDBSpot)
self.ui.clearDBScanBtn.clicked.connect(self.clearDBScan)
self.ui.storeDBScanBtn.clicked.connect(self.storeDBScan)
self.ui.rewriteSpotPosBtn.clicked.connect(self.rewriteSpotPosClicked)
self.ui.scanTree.itemChanged.connect(self.scanTreeItemChanged)
def scanLoaded(self, scan):
## Scan has been loaded, add a new item into the scanTree
item = ScanTreeItem(scan)
self.ui.scanTree.addTopLevelItem(item)
def scanTreeItemChanged(self, item, col):
item.changed(col)
def newMap(self, rec=None):
m = Map(self.host, rec)
self.maps.append(m)
item = m.item
self.ui.mapTable.addTopLevelItem(item)
self.ui.mapTable.setCurrentItem(item)
def mapItemChanged(self, item, col):
self.writeMapRecord(item.map)
def writeMapRecord(self, map):
dbui = self.host.getElement('Database')
db = dbui.getDb()
if db is None:
return
ident = self.dbIdentity+'.maps'
table = dbui.getTableName(ident)
rec = map.getRecord()
#if cell is not None:
#pt, rid = db.addDir(cell)
#rec['cell'] = rid
if rec['cell'] is None:
return
cell = rec['cell']
#fields = db.describeData(rec)
#fields['cell'] = 'int'
db.checkTable(table, ident, Map.mapFields, create=True)
if map.rowID is None:
db.insert(table, rec)
map.rowID = db.lastInsertRow()
else:
rec['rowid'] = map.rowID
db.insert(table, rec, replaceOnConflict=True)
def deleteMap(self, map):
item = map.item
self.ui.mapTable.takeTopLevelItem(self.ui.mapTable.indexOfTopLevelItem(item))
rowID = map.rowID
if rowID is None:
return
dbui = self.host.getElement('Database')
db = dbui.getDb()
if db is None:
raise Exception("No DB Loaded.")
ident = self.dbIdentity+'.maps'
table = dbui.getTableName(ident)
if db.tableOwner(table) != ident:
raise Exception("Table %s not owned by %s" % (table, ident))
db.delete(table, where={'rowid':rowID})
self.host.unregisterMap(map)
def listMaps(self, cells):
"""List all maps associated with the file handle for each cell in a list"""
self.ui.mapTable.clear()
self.maps = []
for cell in cells:
dbui = self.host.getElement('Database')
db = dbui.getDb()
if db is None:
logMsg("No database loaded in Data Manager.", msgType='error')
ident = self.dbIdentity+'.maps'
table = dbui.getTableName(ident)
if not db.hasTable(table):
return
if db.tableOwner(table) != ident:
raise Exception("Table %s not owned by %s" % (table, ident))
#row = db.getDirRowID(cell)
#if row is None:
#return
maps = db.select(table, ['rowid','*'], where={'cell': cell})
#print maps
for rec in maps:
scans = []
for rowid in rec['scans']:
if isinstance(rowid, tuple):
fh = db.getDir(rowid[0], rowid[1]) ## single-spot maps specify the Protocol table instead
else:
fh = db.getDir('ProtocolSequence', rowid) ## NOTE: single-spot maps use a different table!
scans.append((fh, rowid))
rec['scans'] = scans
self.newMap(rec)
def loadMap(self, map):
## turn scan stubs into real scans
map.loadStubs()
self.host.registerMap(map)
def newMapClicked(self):
## Create a new map in the database
try:
self.newMap()
self.ui.newMapBtn.success("OK.")
except:
self.ui.newMapBtn.failure("Error.")
raise
pass
def loadMapClicked(self):
try:
map = self.selectedMap()
self.loadMap(map)
self.ui.loadMapBtn.success("OK.")
except:
self.ui.loadMapBtn.failure("Error.")
raise
def delMapClicked(self):
try:
map = self.selectedMap()
self.deleteMap(map)
self.ui.addScanBtn.success("Deleted.")
except:
self.ui.addScanBtn.failure("Error.")
raise
#def getSelectedScanFromScanTree(self):
#"""Needs to return a list of scans."""
#if self.scanTree.currentItem().childCount() == 0:
#scan = self.scanTree.currentItem().scan
#return [scan]
#else:
#scans = []
#for i in range(self.scanTree.currentItem().childCount()):
#scan = self.scanTree.currentItem().child(i).scan
#scans.append(scan)
#return scans
def addScanClicked(self):
try:
#scan = self.getSelectedScanFromScanTree()
scans = self.selectedScans()
for scan in scans:
map = self.selectedMap()
map.addScans([scan])
self.writeMapRecord(map)
map.rebuildPlot()
self.ui.addScanBtn.success("OK.")
except:
self.ui.addScanBtn.failure("Error.")
raise
def removeScanClicked(self):
try:
item = self.ui.mapTable.currentItem()
scan = item.scan
map = item.parent().map
map.removeScan(scan)
self.writeMapRecord(map)
map.rebuildPlot()
self.ui.removeScanBtn.success("OK.")
except:
self.ui.removeScanBtn.failure("Error.")
raise
def clearDBSpot(self):
## remove all events referencing this spot
## remove stats for this spot
#raise Exception("Clearing spot data from a db is not yet implemented.")
### This clearly needs to change because it only works with the default tables -- but I wasn't sure how to get the right table names
dbui = self.host.getElement('Database')
db = dbui.getDb()
spot = self.host.selectedSpot
dh = spot.data().name(relativeTo=db.baseDir())
protocolID = db('Select rowid, Dir from DirTable_Protocol where Dir="%s"' %dh)
if len(protocolID) > 0:
protocolID = protocolID[0]['rowid']
else:
return
db('Delete from Photostim_events where ProtocolDir=%i' %protocolID)
db('Delete from Photostim_sites where ProtocolDir=%i' %protocolID)
#db('Delete from DirTable_Protocol where Dir="%s"' %dh)## don't delete the protocol, because other things like atlas tables reference the protocol, only delete from tables we own
print("Removed data for %s" %dh)
def storeDBSpot(self):
try:
self.host.storeDBSpot()
self.ui.storeDBSpotBtn.success("Stored.")
except:
self.ui.storeDBSpotBtn.failure("Error.")
raise
def selectedMap(self):
item = self.ui.mapTable.currentItem()
if item is None:
raise Exception("No map selected.")
if not hasattr(item, 'map'):
item = item.parent()
return item.map
def selectedScans(self):
items = self.ui.scanTree.selectedItems()
#item = self.ui.scanTree.currentItem()
return [item.scan for item in items]
def clearDBScan(self):
try:
scans = self.selectedScans()
if len(scans) == 0:
raise Exception("No scans selected.")
for scan in scans:
scan.clearFromDB()
#self.host.clearDBScan(scan)
self.ui.clearDBScanBtn.success("Cleared.")
except:
self.ui.clearDBScanBtn.failure("Error.")
raise
def storeDBScan(self):
try:
scans = self.selectedScans()
if len(scans) == 0:
raise Exception("No scans selected.")
with pg.ProgressDialog('Storing scan data to DB..', maximum=len(scans)) as dlg:
for scan in scans:
scan.storeToDB()
#self.host.storeDBScan(scan)
dlg += 1
if dlg.wasCanceled():
raise Exception('Store canceled by user')
self.ui.scanTree.clearSelection() ## We do this because it is too easy to forget to select the correct set of data before clicking store.
self.ui.storeDBScanBtn.success("Stored.")
except:
self.ui.storeDBScanBtn.failure("Error.")
raise
def rewriteSpotPosClicked(self):
## Recompute spot locations for selected scan and write to DB
try:
scan = self.selectedScan()
if scan is None:
raise Exception("No scan selected.")
self.host.rewriteSpotPositions(scan)
self.ui.rewriteSpotPosBtn.success("Stored.")
except:
self.ui.rewriteSpotPosBtn.failure("Error.")
raise
class ScanTreeItem(pg.TreeWidgetItem):
def __init__(self, scan):
pg.TreeWidgetItem.__init__(self, [scan.name(), '', '', ''])
scan.scanTreeItem = self
self.scan = scan
self.setChecked(1, True)
self.eventWidget = SaveLockWidget()
self.statWidget = SaveLockWidget()
self.setWidget(2, self.eventWidget)
self.setWidget(3, self.statWidget)
self.scanLockChanged(scan)
self.scanStorageChanged(scan)
self.eventWidget.sigLockClicked.connect(self.eventLockClicked)
self.statWidget.sigLockClicked.connect(self.statLockClicked)
scan.sigLockStateChanged.connect(self.scanLockChanged)
scan.sigStorageStateChanged.connect(self.scanStorageChanged)
scan.sigItemVisibilityChanged.connect(self.scanItemVisibilityChanged)
def changed(self, col):
## when scan items are checked/unchecked, show/hide the canvasItem
checked = self.checkState(col) == Qt.Qt.Checked
if col == 1:
self.scan.canvasItem().setVisible(checked)
def scanLockChanged(self, scan):
## scan has been locked/unlocked (or newly loaded), update the indicator in the scanTree
item = scan.scanTreeItem
ev, st = scan.getLockState()
self.eventWidget.setLocked(ev)
self.statWidget.setLocked(st)
def eventLockClicked(self):
ev, st = self.scan.getLockState()
self.scan.lockEvents(not ev)
def statLockClicked(self):
ev, st = self.scan.getLockState()
self.scan.lockStats(not st)
def scanStorageChanged(self, scan):
ev, st = self.scan.getStorageState()
#print "set saved:", ev, st
self.eventWidget.setSaved(ev)
self.statWidget.setSaved(st)
def scanItemVisibilityChanged(self, scan):
cItem = scan.canvasItem()
checked = self.checkState(1) == Qt.Qt.Checked
vis = cItem.isVisible()
if vis == checked:
return
self.setCheckState(1, Qt.Qt.Checked if vis else Qt.Qt.Unchecked)
class SaveLockWidget(Qt.QWidget):
sigLockClicked = Qt.Signal(object) # self, lock
def __init__(self):
Qt.QWidget.__init__(self)
self.layout = Qt.QHBoxLayout()
self.setLayout(self.layout)
self.saveLabel = Qt.QLabel()
self.saveLabel.setScaledContents(True)
self.lockBtn = Qt.QPushButton()
self.lockBtn.setFixedWidth(20)
self.lockBtn.setFixedHeight(20)
self.saveLabel.setFixedWidth(20)
self.saveLabel.setFixedHeight(20)
self.layout.setSpacing(0)
self.layout.setContentsMargins(0,0,0,0)
self.layout.addWidget(self.lockBtn)
self.layout.addWidget(self.saveLabel)
self.setFixedWidth(40)
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'icons'))
images = [os.path.join(path, x) for x in ['locked.png', 'unlocked.png', 'saved.png', 'unsaved.png']]
self.images = [Qt.QPixmap(img) for img in images]
self.icons = [Qt.QIcon(img) for img in self.images[:2]]
if any([img.width() == 0 for img in self.images]):
raise Exception("Could not load icons:", images)
self.setSaved(False)
self.setLocked(False)
self.lockBtn.clicked.connect(self.lockClicked)
def lockClicked(self):
self.sigLockClicked.emit(self)
def setLocked(self, locked):
self.lockBtn.setIcon(self.icons[0 if locked else 1])
def setSaved(self, saved):
self.saveLabel.setPixmap(self.images[2 if saved else 3])
|
|
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
For functions that do not return ``Futures``, `Task` works with any
function that takes a ``callback`` keyword argument (most Tornado functions
can be used in either style, although the ``Future`` style is preferred
since it is both shorter and provides better exception handling)::
@gen.coroutine
def get(self):
yield gen.Task(AsyncHTTPClient().fetch, "http://example.com")
You can also yield a list or dict of ``Futures`` and/or ``Tasks``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. versionchanged:: 3.2
Dict support added.
For more complicated interfaces, `Task` can be split into two parts:
`Callback` and `Wait`::
class GenAsyncHandler2(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=(yield gen.Callback("key")))
response = yield gen.Wait("key")
do_something_with_response(response)
self.render("template.html")
The ``key`` argument to `Callback` and `Wait` allows for multiple
asynchronous operations to be started at different times and proceed
in parallel: yield several callbacks with different keys, then wait
for them once all the async operations have started.
The result of a `Wait` or `Task` yield expression depends on how the callback
was run. If it was called with no arguments, the result is ``None``. If
it was called with one argument, the result is that argument. If it was
called with more than one argument or any keyword arguments, the result
is an `Arguments` object, which is a named tuple ``(args, kwargs)``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado import stack_context
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
future.add_done_callback(final_callback)
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
return future
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
Applications do not normally need to use this class, but it may be
subclassed to provide additional yielding behavior.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`."""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
class Task(YieldPoint):
"""Runs a single asynchronous operation.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
A `Task` is equivalent to a `Callback`/`Wait` pair (with a unique
key generated automatically)::
result = yield gen.Task(func, args)
func(args, callback=(yield gen.Callback(key)))
result = yield gen.Wait(key)
"""
def __init__(self, func, *args, **kwargs):
assert "callback" not in kwargs
self.args = args
self.kwargs = kwargs
self.func = func
def start(self, runner):
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.kwargs["callback"] = runner.result_callback(self.key)
self.func(*self.args, **self.kwargs)
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``Tasks`` or other ``YieldPoints`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints``.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 3.3
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
timeout_handle = io_loop.add_timeout(
timeout,
lambda: result.set_exception(TimeoutError("Timeout")))
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 3.3
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
try:
value = future.result()
except Exception:
self.had_exception = True
yielded = self.gen.throw(*sys.exc_info())
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
if isinstance(yielded, (list, dict)):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
elif is_future(yielded):
self.future = yielded
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
else:
self.future = TracebackFuture()
self.future.set_exception(BadYieldError(
"yielded unknown object %r" % (yielded,)))
return True
def result_callback(self, key):
def inner(*args, **kwargs):
if kwargs or len(args) > 1:
result = Arguments(args, kwargs)
elif args:
result = args[0]
else:
result = None
self.set_result(key, result)
return stack_context.wrap(inner)
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
|
|
#todo
#zip up the files (duh)
#scp it to the remote machine
#ssh to remote machine
#blow away old location
#unzip and redeploy it over there
#drop in the build number and revision number?
#get username, password, hostname, location from environmental variables
import os
import sys
import tarfile
import gzip
import logging
import sys
import os
import select
import shutil
try:
import paramiko
except:
#commenting out this log because we don't actually use it and it's confusing to see on the build server.
pass
#logging.error("Paramiko not installed - you need it for remote ssh deployment")
import socket
import subprocess
from subprocess import PIPE
debug = True
hostport = 22
def make_archive(path, target_filename):
"""for a given path, generate the tarball for distribution"""
print "Making archive %s : %s" % (path, target_filename)
tar = tarfile.open(target_filename, "w:gz")
tar.add(path)
tar.close()
print "archive created successfully"
def run(t, cmd):
'Open channel on transport, run command, capture output and return'
global debug
out = ''
if debug: print 'DEBUG: Running cmd:', cmd
chan = t.open_session()
chan.setblocking(1)
try:
chan.exec_command(cmd)
except SSHException:
print "Error running remote command, yo", SSHException
sys.exit(1)
### Read when data is available
while True:
r,w,e = select.select([chan,], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print "EOF"
break;
out += x
except socket.timeout:
pass
if debug: print 'DEBUG: cmd results:', out
chan.close()
return out
def do_local_deploy(target_abs_path, target_deploy_path, build_number, revision_number):
#make the archive
#git revision numbers are nasty looking: b9cfdebe87d6-05/27/2009 16:03:43, so let's escape them
revision_number = revision_number.replace(' ', '_')
revision_number = revision_number.replace(':', '')
revision_number = revision_number.replace('/', '-')
#chdir to where this deploy is
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#create the archive in the root directory where both rapidsms and commcarehq reside
archive_to_deploy = os.path.join('../../../','deploy-rev%s.tar.gz' % (revision_number))
#get the basedir that these reside in. this could be arbitrary
basedir = os.path.basename(os.path.abspath('../../')) #49120312421 or commcare-/hq
curdir = os.getcwd()
#go down to that level to actual make archive
print "cwd: " + os.getcwd()
os.chdir('../../../')
print "archive to deploy: " + archive_to_deploy
print "cwd: " + os.getcwd()
make_archive(basedir,os.path.basename(archive_to_deploy))
print "chdir back to original directory"
os.chdir(curdir)
print "cwd: " + os.getcwd()
archive_filename = os.path.basename(archive_to_deploy)
print "*************************"
print "Finished archiving. Transporting file: " + archive_filename + " to: " + target_abs_path
shutil.move(archive_to_deploy, target_abs_path + archive_filename)
p = subprocess.Popen(['/var/django-sites/builds/rsdeploy.sh', 'deploy-rev%s' % (revision_number), basedir, target_deploy_path], shell=False, stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE)
p.stdin.flush()
p.stdin.close()
output = p.stdout.read()
error = p.stderr.read()
print "Command output: " + output
print "Command Errors: " + error
def do_deploy(hostname, username, password, target_abs_path, target_deploy_path, build_number, revision_number):
#we are starting in commcare-hq/utilities/build because we are operating off of the build.xml
#make the archive
#git revision numbers are nasty looking: b9cfdebe87d6-05/27/2009 16:03:43, so let's escape them
revision_number = revision_number.replace(' ', '_')
revision_number = revision_number.replace(':', '')
revision_number = revision_number.replace('/', '-')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#create the archive in the root directory where both rapidsms and commcarehq reside
archive_to_deploy = os.path.join('../../../','deploy-rev%s.tar.gz' % (revision_number))
#get the basedir that these reside in. this could be arbitrary
basedir = os.path.basename(os.path.abspath('../../')) #49120312421 or commcare-/hq
curdir = os.getcwd()
#go down to that level to actuall make archive
print "cwd: " + os.getcwd()
os.chdir('../../../')
print "archive to deploy: " + archive_to_deploy
print "cwd: " + os.getcwd()
make_archive(basedir,os.path.basename(archive_to_deploy))
os.chdir(curdir)
print "cwd: " + os.getcwd()
sys.stdout = os.fdopen(1, 'w', 0)
if debug:
print 'DEBUG: Writing log to ssh-cmd.log'
paramiko.util.log_to_file('ssh-cmd.log')
### Open SSH transport
transport = paramiko.Transport((hostname, hostport))
transport.connect(username=username, password=password)
print "starting sftp session"
sftp = paramiko.SFTPClient.from_transport(transport)
archive_filename = os.path.basename(archive_to_deploy)
print "transporting file: " + archive_filename + " to: " + target_abs_path
sftp.put(archive_to_deploy,target_abs_path + archive_filename)
sftp.close()
print "sftp file transferred, remoting in to deploy archive"
print run(transport,'/var/django-sites/builds/rsdeploy.sh deploy-rev%s %s %s' % (revision_number,basedir, target_deploy_path))
# #print run(transport, 'cd %s' %(target_abs_path))
# print run(transport,'sudo /etc/init.d/apache2 stop')
#
# print run(transport, 'rm -rf %s/%s' % (target_abs_path,target_deploy_path))
# print run(transport,'gunzip %s/%s' % (target_abs_path+"/builds",basename))
# print run(transport,'tar -xf %s/%s' % (target_abs_path+"/builds",basename[0:-3]))
#
## print run(transport,'echo CCHQ_BUILD_DATE=\\"`date`\\" >> %s/projects/cchq_main/settings.py' % (basedir))
## print run(transport,'echo CCHQ_BUILD_NUMBER=%s >> %s/projects/cchq_main/settings.py' % (build_number,basedir))
## print run(transport,'echo CCHQ_REVISION_NUMBER=%s >> %s/projects/cchq_main/settings.py' % (revision_number,basedir))
#
#
# print run(transport,'touch %s/projects/cchq_main/media/version.txt' % (basedir))
# print run(transport,'echo CCHQ_BUILD_DATE=\\"`date`\\" >> %s/projects/cchq_main/media/version.txt' % (basedir))
# print run(transport,'echo CCHQ_BUILD_NUMBER=%s >> %s/projects/cchq_main/media/version.txt' % (build_number,basedir))
# print run(transport,'echo CCHQ_REVISION_NUMBER=%s >> %s/projects/cchq_main/media/version.txt' % (revision_number,basedir))
#
# print run(transport,'rm -rf %s/projects/cchq_main/%s' % (basedir, 'xform-data'))
# print run(transport,'rm -rf %s/projects/cchq_main/%s' % (basedir, 'media'))
#
# print run(transport,'mkdir %s/projects/cchq_main/%s' % (basedir, 'xform-data'))
# print run(transport,'mkdir %s/projects/cchq_main/%s' % (basedir, 'media'))
# #print run(transport,'mkdir %s/projects/cchq_main/%s' % (basedir, 'schemas'))
#
# print run(transport,'chmod 777 %s/projects/cchq_main/' % (basedir))
# print run(transport,'chmod -R 777 %s/projects/cchq_main/' % (basedir))
# print run(transport,'chmod 777 %s/projects/cchq_main/cchq.db' % (basedir))
#
# print run(transport,'ln -s /usr/lib/python2.5/site-packages/django/contrib/admin/media/ %s' % (basedir + "/projects/cchq_main/media/admin-media"))
#
# print run(transport,'mv %s %s/%s' % (basedir,target_abs_path,target_deploy_path))
# print run(transport,'cd %s/%s/projects/cchq_main;python manage.py reset_db --noinput;python manage.py syncdb --noinput;python manage.py graph_models -a -g -o media/fullgraph.png' % (target_abs_path,target_deploy_path))
# print run(transport,'gzip %s' % (target_abs_path+"/builds/"+basename[0:-3]))
#
#
# print run(transport,'sudo /etc/init.d/apache2 start')
try:
transport.close()
except:
pass
print "Finished deployment"
if __name__ == "__main__":
try:
hostname = os.environ['DEPLOY_HOST']
username = os.environ['DEPLOY_USERNAME']
password = os.environ['DEPLOY_PASSWORD']
target_abs_path = os.environ['DEPLOY_ABS_PATH']
target_url_path = os.environ['DEPLOY_URL_PATH']
build_number= os.environ['BUILD_NUMBER']
revision_number= os.environ['REVISION_NUMBER']
except:
#no environmental variables, check to see if the arguments are there in the cmdline
if len(sys.argv) != 8:
print """\tUsage:
deploy.py
<remote host>
<remote username>
<remote password>
<remote deploy dir> - '/var/django-sites/builds/ #trailing slash is necessary!
<remote deploypath> - 'commcarehq_test' #underscores only. The database name and directory name and stuff will be based upon this
<build number>
<revision number>
"""
sys.exit(1)
else:
hostname = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
target_abs_path = sys.argv[4]
target_url_path = sys.argv[5]
build_number= sys.argv[6]
revision_number= sys.argv[7]
#do_deploy(hostname, username, password, target_abs_path,target_url_path,build_number,revision_number)
do_local_deploy(target_abs_path,target_url_path,build_number,revision_number)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates profile dictionaries for Autofill.
Used to test autofill.AutofillTest.FormFillLatencyAfterSubmit.
Can be used as a stand alone script with -h to print out help text by running:
python autofill_dataset_generator.py -h
"""
import codecs
import logging
from optparse import OptionParser
import os
import random
import re
import sys
class NullHandler(logging.Handler):
def emit(self, record):
pass
class DatasetGenerator(object):
"""Generates a dataset of dictionaries.
The lists (such as address_construct, city_construct) define the way the
corresponding field is generated. They accomplish this by specifying a
list of function-args lists.
"""
address_construct = [
[ random.randint, 1, 10000],
[ None, u'foobar'],
[ random.choice, [ u'St', u'Ave', u'Ln', u'Ct', ]],
[ random.choice, [ u'#1', u'#2', u'#3', ]],
]
city_construct = [
[ random.choice, [ u'San Jose', u'San Francisco', u'Sacramento',
u'Los Angeles', ]],
]
state_construct = [
[ None, u'CA']
]
# These zip codes are now matched to the corresponding cities in
# city_construct.
zip_construct = [ u'95110', u'94109', u'94203', u'90120']
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
log_handlers = {'StreamHandler': None}
def __init__(self, output_filename=None, logging_level=None):
"""Constructs dataset generator object.
Creates 'fields' data member which is a list of pair (two values) lists.
These pairs are comprised of a field key e.g. u'NAME_FIRST' and a
generator method e.g. self.GenerateNameFirst which will generate the value.
If we want the value to always be the same e.g. u'John' we can use this
instead of a method. We can even use None keyword which will give
a value of u''.
'output_pattern' for one field would have been: "{u'NAME_FIRST': u'%s',}"
which is ready to accept a value for the 'NAME_FIRST' field key once
this value is generated.
'output_pattern' is used in 'GenerateNextDict()' to generate the next
dict line.
Args:
output_filename: specified filename of generated dataset to be saved.
Default value is None and no saving takes place.
logging_level: set verbosity levels, default is None.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self.output_filename = output_filename
self.dict_no = 0
self.fields = [
[u'NAME_FIRST', self.GenerateNameFirst],
[u'NAME_MIDDLE', None],
[u'NAME_LAST', None],
[u'EMAIL_ADDRESS', self.GenerateEmail],
[u'COMPANY_NAME', None],
[u'ADDRESS_HOME_LINE1', self.GenerateAddress],
[u'ADDRESS_HOME_LINE2', None],
[u'ADDRESS_HOME_CITY', self.GenerateCity],
[u'ADDRESS_HOME_STATE', self.GenerateState],
[u'ADDRESS_HOME_ZIP', self.GenerateZip],
[u'ADDRESS_HOME_COUNTRY', u'United States'],
[u'PHONE_HOME_WHOLE_NUMBER', None],
]
self.next_dict = {}
# Using implicit line joining does not work well in this case as each line
# has to be strings and not function calls that may return strings.
self.output_pattern = u'{\'' + \
u', '.join([u'u"%s" : u"%%s"' % key for key, method in self.fields]) + \
u',}'
def _GenerateField(self, field_construct):
"""Generates each field in each dictionary.
Args:
field_construct: it is a list of lists.
The first value (index 0) of each containing list is a function or None.
The remaining values are the args. If function is None then arg is just
returned.
Example 1: zip_construct = [[ None, u'95110']]. There is one
containing list only and function here is None and arg is u'95110'.
This just returns u'95110'.
Example 2: address_construct = [ [ random.randint, 1, 10000],
[ None, u'foobar'] ] This has two containing lists and it will return
the result of:
random.randint(1, 10000) + ' ' + u'foobar'
which could be u'7832 foobar'
"""
parts = []
for function_and_args in field_construct:
function = function_and_args[0]
args = function_and_args[1:]
if not function:
function = lambda x: x
parts.append(str(function(*args)))
return (' ').join(parts)
def GenerateAddress(self):
"""Uses _GenerateField() and address_construct to gen a random address.
Returns:
A random address.
"""
return self._GenerateField(self.address_construct)
def GenerateCity(self):
"""Uses _GenerateField() and city_construct to gen a random city.
Returns:
A random city.
"""
return self._GenerateField(self.city_construct)
def GenerateState(self):
"""Uses _GenerateField() and state_construct to generate a state.
Returns:
A state.
"""
return self._GenerateField(self.state_construct)
def GenerateZip(self):
"""Uses zip_construct and generated cities to return a matched zip code.
Returns:
A zip code matched to the corresponding city.
"""
city_selected = self.next_dict['ADDRESS_HOME_CITY'][0]
index = self.city_construct[0][1].index(city_selected)
return self.zip_construct[index]
def GenerateCountry(self):
"""Uses _GenerateField() and country_construct to generate a country.
Returns:
A country.
"""
return self._GenerateField(self.country_construct)
def GenerateNameFirst(self):
"""Generates a numerical first name.
The name is the number of the current dict.
i.e. u'1', u'2', u'3'
Returns:
A numerical first name.
"""
return u'%s' % self.dict_no
def GenerateEmail(self):
"""Generates an email that corresponds to the first name.
i.e. u'1@example.com', u'2@example.com', u'3@example.com'
Returns:
An email address that corresponds to the first name.
"""
return u'%s@example.com' % self.dict_no
def GenerateNextDict(self):
"""Generates next dictionary of the dataset.
Returns:
The output dictionary.
"""
self.dict_no += 1
self.next_dict = {}
for key, method_or_value in self.fields:
if not method_or_value:
self.next_dict[key] = ['']
elif type(method_or_value) in [str, unicode]:
self.next_dict[key] = ['%s' % method_or_value]
else:
self.next_dict[key] = [method_or_value()]
return self.next_dict
def GenerateDataset(self, num_of_dict_to_generate=10):
"""Generates a list of dictionaries.
Args:
num_of_dict_to_generate: The number of dictionaries to be generated.
Default value is 10.
Returns:
The dictionary list.
"""
random.seed(0) # All randomly generated values are reproducible.
if self.output_filename:
output_file = codecs.open(
self.output_filename, mode='wb', encoding='utf-8-sig')
else:
output_file = None
try:
list_of_dict = []
if output_file:
output_file.write('[')
output_file.write(os.linesep)
while self.dict_no < num_of_dict_to_generate:
output_dict = self.GenerateNextDict()
list_of_dict.append(output_dict)
output_line = self.output_pattern % tuple(
[output_dict[key] for key, method in self.fields])
if output_file:
output_file.write(output_line)
output_file.write(os.linesep)
self.logger.info(
'%d: [%s]' % (self.dict_no, output_line.encode(sys.stdout.encoding,
'ignore')))
if output_file:
output_file.write(']')
output_file.write(os.linesep)
self.logger.info('%d dictionaries generated SUCCESSFULLY!', self.dict_no)
self.logger.info('--- FINISHED ---')
return list_of_dict
finally:
if output_file:
output_file.close()
def main():
parser = OptionParser()
parser.add_option(
'-o', '--output', dest='output_filename', default='',
help='write output to FILE [optional]', metavar='FILE')
parser.add_option(
'-d', '--dict', type='int', dest='dict_no', metavar='DICT_NO', default=10,
help='DICT_NO: number of dictionaries to be generated [default: %default]')
parser.add_option(
'-l', '--log_level', dest='log_level', default='debug',
metavar='LOG_LEVEL',
help='LOG_LEVEL: "debug", "info", "warning" or "error" [default: %default]')
(options, args) = parser.parse_args()
if args:
parser.print_help()
return 1
options.log_level = options.log_level.lower()
if options.log_level not in ['debug', 'info', 'warning', 'error']:
parser.error('Wrong log_level argument.')
parser.print_help()
else:
if options.log_level == 'debug':
options.log_level = logging.DEBUG
elif options.log_level == 'info':
options.log_level = logging.INFO
elif options.log_level == 'warning':
options.log_level = logging.WARNING
elif options.log_level == 'error':
options.log_level = logging.ERROR
gen = DatasetGenerator(options.output_filename, options.log_level)
gen.GenerateDataset(options.dict_no)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
import logging
from PyQt4.QtGui import (
QGraphicsItem, QGraphicsPathItem, QGraphicsWidget, QGraphicsTextItem,
QGraphicsDropShadowEffect, QPainterPath, QPainterPathStroker,
QPolygonF, QColor, QPen
)
from PyQt4.QtCore import (
Qt, QPointF, QSizeF, QRectF, QLineF, QEvent, qVersion
)
from PyQt4.QtCore import pyqtSignal as Signal
from PyQt4.QtCore import pyqtProperty as Property
log = logging.getLogger(__name__)
from .graphicspathobject import GraphicsPathObject
class Annotation(QGraphicsWidget):
"""Base class for annotations in the canvas scheme.
"""
def __init__(self, parent=None, **kwargs):
QGraphicsWidget.__init__(self, parent, **kwargs)
if qVersion() < "4.7":
geometryChanged = Signal()
def setGeometry(self, rect):
QGraphicsWidget.setGeometry(self, rect)
self.geometryChanged.emit()
class GraphicsTextEdit(QGraphicsTextItem):
"""
QGraphicsTextItem subclass defining an additional placeholderText
property (text displayed when no text is set).
"""
def __init__(self, *args, **kwargs):
QGraphicsTextItem.__init__(self, *args, **kwargs)
self.__placeholderText = ""
def setPlaceholderText(self, text):
"""
Set the placeholder text. This is shown when the item has no text,
i.e when `toPlainText()` returns an empty string.
"""
if self.__placeholderText != text:
self.__placeholderText = text
if not self.toPlainText():
self.update()
def placeholderText(self):
"""
Return the placeholder text.
"""
return str(self.__placeholderText)
placeholderText_ = Property(str, placeholderText, setPlaceholderText,
doc="Placeholder text")
def paint(self, painter, option, widget=None):
QGraphicsTextItem.paint(self, painter, option, widget)
# Draw placeholder text if necessary
if not (self.toPlainText() and self.toHtml()) and \
self.__placeholderText and \
not (self.hasFocus() and \
self.textInteractionFlags() & Qt.TextEditable):
brect = self.boundingRect()
painter.setFont(self.font())
metrics = painter.fontMetrics()
text = metrics.elidedText(self.__placeholderText, Qt.ElideRight,
brect.width())
color = self.defaultTextColor()
color.setAlpha(min(color.alpha(), 150))
painter.setPen(QPen(color))
painter.drawText(brect, Qt.AlignTop | Qt.AlignLeft, text)
class TextAnnotation(Annotation):
"""Text annotation item for the canvas scheme.
"""
editingFinished = Signal()
"""Emitted when the editing is finished (i.e. the item loses focus)."""
textEdited = Signal()
"""Emitted when the edited text changes."""
def __init__(self, parent=None, **kwargs):
Annotation.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFocusPolicy(Qt.ClickFocus)
self.__textMargins = (2, 2, 2, 2)
rect = self.geometry().translated(-self.pos())
self.__framePathItem = QGraphicsPathItem(self)
self.__framePathItem.setPen(QPen(Qt.NoPen))
self.__textItem = GraphicsTextEdit(self)
self.__textItem.setPlaceholderText(self.tr("Enter text here"))
self.__textItem.setPos(2, 2)
self.__textItem.setTextWidth(rect.width() - 4)
self.__textItem.setTabChangesFocus(True)
self.__textItem.setTextInteractionFlags(Qt.NoTextInteraction)
self.__textItem.setFont(self.font())
self.__textInteractionFlags = Qt.NoTextInteraction
layout = self.__textItem.document().documentLayout()
layout.documentSizeChanged.connect(self.__onDocumentSizeChanged)
self.__updateFrame()
def adjustSize(self):
"""Resize to a reasonable size.
"""
self.__textItem.setTextWidth(-1)
self.__textItem.adjustSize()
size = self.__textItem.boundingRect().size()
left, top, right, bottom = self.textMargins()
geom = QRectF(self.pos(), size + QSizeF(left + right, top + bottom))
self.setGeometry(geom)
def setFramePen(self, pen):
"""Set the frame pen. By default Qt.NoPen is used (i.e. the frame
is not shown).
"""
self.__framePathItem.setPen(pen)
def framePen(self):
"""Return the frame pen.
"""
return self.__framePathItem.pen()
def setFrameBrush(self, brush):
"""Set the frame brush.
"""
self.__framePathItem.setBrush(brush)
def frameBrush(self):
"""Return the frame brush.
"""
return self.__framePathItem.brush()
def setPlainText(self, text):
"""Set the annotation plain text.
"""
self.__textItem.setPlainText(text)
def toPlainText(self):
return self.__textItem.toPlainText()
def setHtml(self, text):
"""Set the annotation rich text.
"""
self.__textItem.setHtml(text)
def toHtml(self):
return self.__textItem.toHtml()
def setDefaultTextColor(self, color):
"""Set the default text color.
"""
self.__textItem.setDefaultTextColor(color)
def defaultTextColor(self):
return self.__textItem.defaultTextColor()
def setTextMargins(self, left, top, right, bottom):
"""Set the text margins.
"""
margins = (left, top, right, bottom)
if self.__textMargins != margins:
self.__textMargins = margins
self.__textItem.setPos(left, top)
self.__textItem.setTextWidth(
max(self.geometry().width() - left - right, 0)
)
def textMargins(self):
"""Return the text margins.
"""
return self.__textMargins
def document(self):
"""Return the QTextDocument instance used internally.
"""
return self.__textItem.document()
def setTextCursor(self, cursor):
self.__textItem.setTextCursor(cursor)
def textCursor(self):
return self.__textItem.textCursor()
def setTextInteractionFlags(self, flags):
self.__textInteractionFlags = flags
def textInteractionFlags(self):
return self.__textInteractionFlags
def setDefaultStyleSheet(self, stylesheet):
self.document().setDefaultStyleSheet(stylesheet)
def mouseDoubleClickEvent(self, event):
Annotation.mouseDoubleClickEvent(self, event)
if event.buttons() == Qt.LeftButton and \
self.__textInteractionFlags & Qt.TextEditable:
self.startEdit()
def startEdit(self):
"""Start the annotation text edit process.
"""
self.__textItem.setTextInteractionFlags(self.__textInteractionFlags)
self.__textItem.setFocus(Qt.MouseFocusReason)
# Install event filter to find out when the text item loses focus.
self.__textItem.installSceneEventFilter(self)
self.__textItem.document().contentsChanged.connect(
self.textEdited
)
def endEdit(self):
"""End the annotation edit.
"""
self.__textItem.setTextInteractionFlags(Qt.NoTextInteraction)
self.__textItem.removeSceneEventFilter(self)
self.__textItem.document().contentsChanged.disconnect(
self.textEdited
)
cursor = self.__textItem.textCursor()
cursor.clearSelection()
self.__textItem.setTextCursor(cursor)
self.editingFinished.emit()
def __onDocumentSizeChanged(self, size):
# The size of the text document has changed. Expand the text
# control rect's height if the text no longer fits inside.
try:
rect = self.geometry()
_, top, _, bottom = self.textMargins()
if rect.height() < (size.height() + bottom + top):
rect.setHeight(size.height() + bottom + top)
self.setGeometry(rect)
except Exception:
log.error("error in __onDocumentSizeChanged",
exc_info=True)
def __updateFrame(self):
rect = self.geometry()
rect.moveTo(0, 0)
path = QPainterPath()
path.addRect(rect)
self.__framePathItem.setPath(path)
def resizeEvent(self, event):
width = event.newSize().width()
left, _, right, _ = self.textMargins()
self.__textItem.setTextWidth(max(width - left - right, 0))
self.__updateFrame()
QGraphicsWidget.resizeEvent(self, event)
def sceneEventFilter(self, obj, event):
if obj is self.__textItem and event.type() == QEvent.FocusOut:
self.__textItem.focusOutEvent(event)
self.endEdit()
return True
return Annotation.sceneEventFilter(self, obj, event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedHasChanged:
if self.isSelected():
self.setFramePen(QPen(Qt.DashDotLine))
else:
self.setFramePen(QPen(Qt.NoPen))
return Annotation.itemChange(self, change, value)
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.__textItem.setFont(self.font())
Annotation.changeEvent(self, event)
class ArrowItem(GraphicsPathObject):
#: Arrow Style
Plain, Concave = 1, 2
def __init__(self, parent=None, line=None, lineWidth=4, **kwargs):
GraphicsPathObject.__init__(self, parent, **kwargs)
if line is None:
line = QLineF(0, 0, 10, 0)
self.__line = line
self.__lineWidth = lineWidth
self.__arrowStyle = ArrowItem.Plain
self.__updateArrowPath()
def setLine(self, line):
"""Set the baseline of the arrow (:class:`QLineF`).
"""
if self.__line != line:
self.__line = QLineF(line)
self.__updateArrowPath()
def line(self):
"""Return the baseline of the arrow.
"""
return QLineF(self.__line)
def setLineWidth(self, lineWidth):
"""Set the width of the arrow.
"""
if self.__lineWidth != lineWidth:
self.__lineWidth = lineWidth
self.__updateArrowPath()
def lineWidth(self):
"""Return the width of the arrow.
"""
return self.__lineWidth
def setArrowStyle(self, style):
"""Set the arrow style (`ArrowItem.Plain` or `ArrowItem.Concave`)
"""
if self.__arrowStyle != style:
self.__arrowStyle = style
self.__updateArrowPath()
def arrowStyle(self):
"""Return the arrow style
"""
return self.__arrowStyle
def __updateArrowPath(self):
if self.__arrowStyle == ArrowItem.Plain:
path = arrow_path_plain(self.__line, self.__lineWidth)
else:
path = arrow_path_concave(self.__line, self.__lineWidth)
self.setPath(path)
def arrow_path_plain(line, width):
"""
Return an :class:`QPainterPath` of a plain looking arrow.
"""
path = QPainterPath()
p1, p2 = line.p1(), line.p2()
if p1 == p2:
return path
baseline = QLineF(line)
# Require some minimum length.
baseline.setLength(max(line.length() - width * 3, width * 3))
path.moveTo(baseline.p1())
path.lineTo(baseline.p2())
stroker = QPainterPathStroker()
stroker.setWidth(width)
path = stroker.createStroke(path)
arrow_head_len = width * 4
arrow_head_angle = 50
line_angle = line.angle() - 180
angle_1 = line_angle - arrow_head_angle / 2.0
angle_2 = line_angle + arrow_head_angle / 2.0
points = [p2,
p2 + QLineF.fromPolar(arrow_head_len, angle_1).p2(),
p2 + QLineF.fromPolar(arrow_head_len, angle_2).p2(),
p2]
poly = QPolygonF(points)
path_head = QPainterPath()
path_head.addPolygon(poly)
path = path.united(path_head)
return path
def arrow_path_concave(line, width):
"""
Return a :class:`QPainterPath` of a pretty looking arrow.
"""
path = QPainterPath()
p1, p2 = line.p1(), line.p2()
if p1 == p2:
return path
baseline = QLineF(line)
# Require some minimum length.
baseline.setLength(max(line.length() - width * 3, width * 3))
start, end = baseline.p1(), baseline.p2()
mid = (start + end) / 2.0
normal = QLineF.fromPolar(1.0, baseline.angle() + 90).p2()
path.moveTo(start)
path.lineTo(start + (normal * width / 4.0))
path.quadTo(mid + (normal * width / 4.0),
end + (normal * width / 1.5))
path.lineTo(end - (normal * width / 1.5))
path.quadTo(mid - (normal * width / 4.0),
start - (normal * width / 4.0))
path.closeSubpath()
arrow_head_len = width * 4
arrow_head_angle = 50
line_angle = line.angle() - 180
angle_1 = line_angle - arrow_head_angle / 2.0
angle_2 = line_angle + arrow_head_angle / 2.0
points = [p2,
p2 + QLineF.fromPolar(arrow_head_len, angle_1).p2(),
baseline.p2(),
p2 + QLineF.fromPolar(arrow_head_len, angle_2).p2(),
p2]
poly = QPolygonF(points)
path_head = QPainterPath()
path_head.addPolygon(poly)
path = path.united(path_head)
return path
class ArrowAnnotation(Annotation):
def __init__(self, parent=None, line=None, **kwargs):
Annotation.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFocusPolicy(Qt.ClickFocus)
if line is None:
line = QLineF(0, 0, 20, 0)
self.__line = line
self.__color = QColor(Qt.red)
self.__arrowItem = ArrowItem(self)
self.__arrowItem.setLine(line)
self.__arrowItem.setBrush(self.__color)
self.__arrowItem.setPen(QPen(Qt.NoPen))
self.__arrowItem.setArrowStyle(ArrowItem.Concave)
self.__arrowItem.setLineWidth(5)
self.__shadow = QGraphicsDropShadowEffect(
blurRadius=5, offset=QPointF(1.0, 2.0),
)
self.__arrowItem.setGraphicsEffect(self.__shadow)
self.__shadow.setEnabled(True)
self.__autoAdjustGeometry = True
def setAutoAdjustGeometry(self, autoAdjust):
"""
If set to `True` then the geometry will be adjusted whenever
the arrow is changed with `setLine`. Otherwise the geometry
of the item is only updated so the `line` lies within the
`geometry()` rect (i.e. it only grows). True by default
"""
self.__autoAdjustGeometry = autoAdjust
if autoAdjust:
self.adjustGeometry()
def autoAdjustGeometry(self):
"""
Should the geometry of the item be adjusted automatically when
`setLine` is called.
"""
return self.__autoAdjustGeometry
def setLine(self, line):
"""
Set the arrow base line (a `QLineF` in object coordinates).
"""
if self.__line != line:
self.__line = line
# local item coordinate system
geom = self.geometry().translated(-self.pos())
if geom.isNull() and not line.isNull():
geom = QRectF(0, 0, 1, 1)
arrow_shape = arrow_path_concave(line, self.lineWidth())
arrow_rect = arrow_shape.boundingRect()
if not (geom.contains(arrow_rect)):
geom = geom.united(arrow_rect)
if self.__autoAdjustGeometry:
# Shrink the geometry if required.
geom = geom.intersected(arrow_rect)
# topLeft can move changing the local coordinates.
diff = geom.topLeft()
line = QLineF(line.p1() - diff, line.p2() - diff)
self.__arrowItem.setLine(line)
self.__line = line
# parent item coordinate system
geom.translate(self.pos())
self.setGeometry(geom)
def line(self):
"""
Return the arrow base line (`QLineF` in object coordinates).
"""
return QLineF(self.__line)
def setColor(self, color):
"""
Set arrow brush color.
"""
if self.__color != color:
self.__color = QColor(color)
self.__updateBrush()
def color(self):
"""
Return the arrow brush color.
"""
return QColor(self.__color)
def setLineWidth(self, lineWidth):
"""
Set the arrow line width.
"""
self.__arrowItem.setLineWidth(lineWidth)
def lineWidth(self):
"""
Return the arrow line width.
"""
return self.__arrowItem.lineWidth()
def adjustGeometry(self):
"""
Adjust the widget geometry to exactly fit the arrow inside
while preserving the arrow path scene geometry.
"""
# local system coordinate
geom = self.geometry().translated(-self.pos())
line = self.__line
arrow_rect = self.__arrowItem.shape().boundingRect()
if geom.isNull() and not line.isNull():
geom = QRectF(0, 0, 1, 1)
if not (geom.contains(arrow_rect)):
geom = geom.united(arrow_rect)
geom = geom.intersected(arrow_rect)
diff = geom.topLeft()
line = QLineF(line.p1() - diff, line.p2() - diff)
geom.translate(self.pos())
self.setGeometry(geom)
self.setLine(line)
def shape(self):
arrow_shape = self.__arrowItem.shape()
return self.mapFromItem(self.__arrowItem, arrow_shape)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedHasChanged:
self.__updateBrush()
return Annotation.itemChange(self, change, value)
def __updateBrush(self):
"""
Update the arrow brush.
"""
if self.isSelected():
color = self.__color.darker(150)
else:
color = self.__color
self.__arrowItem.setBrush(color)
|
|
"""
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
"""
from collections import deque
from typing import (
cast,
Deque,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Union,
)
import h11
from .connection import Connection, ConnectionState, ConnectionType
from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
from .extensions import Extension
from .typing import Headers
from .utilities import (
generate_accept_token,
generate_nonce,
LocalProtocolError,
normed_header_dict,
RemoteProtocolError,
split_comma_header,
)
# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
WEBSOCKET_VERSION = b"13"
class H11Handshake:
"""A Handshake implementation for HTTP/1.1 connections."""
def __init__(self, connection_type: ConnectionType) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._state = ConnectionState.CONNECTING
if self.client:
self._h11_connection = h11.Connection(h11.CLIENT)
else:
self._h11_connection = h11.Connection(h11.SERVER)
self._connection: Optional[Connection] = None
self._events: Deque[Event] = deque()
self._initiating_request: Optional[Request] = None
self._nonce: Optional[bytes] = None
@property
def state(self) -> ConnectionState:
return self._state
@property
def connection(self) -> Optional[Connection]:
"""Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
"""
return self._connection
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
"""Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
"""
if self.client:
raise LocalProtocolError(
"Cannot initiate an upgrade connection when acting as the client"
)
upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request))
def send(self, event: Event) -> bytes:
"""Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
"""
data = b""
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError(
f"Event {event} cannot be sent during the handshake"
)
return data
def receive_data(self, data: Optional[bytes]) -> None:
"""Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
"""
self._h11_connection.receive_data(data or b"")
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError(
"Bad HTTP message", event_hint=RejectConnection()
)
if (
isinstance(event, h11.ConnectionClosed)
or event is h11.NEED_DATA
or event is h11.PAUSED
):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if event.status_code == 101:
self._events.append(self._establish_client_connection(event))
else:
self._events.append(
RejectConnection(
headers=list(event.headers),
status_code=event.status_code,
has_body=False,
)
)
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(
RejectConnection(
headers=list(event.headers),
status_code=event.status_code,
has_body=True,
)
)
elif isinstance(event, h11.Data):
self._events.append(
RejectData(data=event.data, body_finished=False)
)
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b"", body_finished=True))
self._state = ConnectionState.CLOSED
else:
if isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event))
def events(self) -> Generator[Event, None, None]:
"""Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
"""
while self._events:
yield self._events.popleft()
# Server mode methods
def _process_connection_request( # noqa: MC0001
self, event: h11.Request
) -> Request:
if event.method != b"GET":
raise RemoteProtocolError(
"Request method must be GET", event_hint=RejectConnection()
)
connection_tokens = None
extensions: List[str] = []
host = None
key = None
subprotocols: List[str] = []
upgrade = b""
version = None
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
elif name == b"host":
host = value.decode("ascii")
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
extensions = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-key":
key = value
elif name == b"sec-websocket-protocol":
subprotocols = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-version":
version = value
elif name == b"upgrade":
upgrade = value
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if version != WEBSOCKET_VERSION:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'",
event_hint=RejectConnection(
headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
status_code=426 if version else 400,
),
)
if key is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
if host is None:
raise RemoteProtocolError(
"Missing header, 'Host'", event_hint=RejectConnection()
)
self._initiating_request = Request(
extensions=extensions,
extra_headers=headers,
host=host,
subprotocols=subprotocols,
target=event.target.decode("ascii"),
)
return self._initiating_request
def _accept(self, event: AcceptConnection) -> bytes:
# _accept is always called after _process_connection_request.
assert self._initiating_request is not None
request_headers = normed_header_dict(self._initiating_request.extra_headers)
nonce = request_headers[b"sec-websocket-key"]
accept_token = generate_accept_token(nonce)
headers = [
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Accept", accept_token),
]
if event.subprotocol is not None:
if event.subprotocol not in self._initiating_request.subprotocols:
raise LocalProtocolError(f"unexpected subprotocol {event.subprotocol}")
headers.append(
(b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
)
if event.extensions:
accepts = server_extensions_handshake(
cast(Sequence[str], self._initiating_request.extensions),
event.extensions,
)
if accepts:
headers.append((b"Sec-WebSocket-Extensions", accepts))
response = h11.InformationalResponse(
status_code=101, headers=headers + event.extra_headers
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
event.extensions,
)
self._state = ConnectionState.OPEN
return self._h11_connection.send(response) or b""
def _reject(self, event: RejectConnection) -> bytes:
if self.state != ConnectionState.CONNECTING:
raise LocalProtocolError(
"Connection cannot be rejected in state %s" % self.state
)
headers = list(event.headers)
if not event.has_body:
headers.append((b"content-length", b"0"))
response = h11.Response(status_code=event.status_code, headers=headers)
data = self._h11_connection.send(response) or b""
self._state = ConnectionState.REJECTING
if not event.has_body:
data += self._h11_connection.send(h11.EndOfMessage()) or b""
self._state = ConnectionState.CLOSED
return data
def _send_reject_data(self, event: RejectData) -> bytes:
if self.state != ConnectionState.REJECTING:
raise LocalProtocolError(
f"Cannot send rejection data in state {self.state}"
)
data = self._h11_connection.send(h11.Data(data=event.data)) or b""
if event.body_finished:
data += self._h11_connection.send(h11.EndOfMessage()) or b""
self._state = ConnectionState.CLOSED
return data
# Client mode methods
def _initiate_connection(self, request: Request) -> bytes:
self._initiating_request = request
self._nonce = generate_nonce()
headers = [
(b"Host", request.host.encode("ascii")),
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Key", self._nonce),
(b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
]
if request.subprotocols:
headers.append(
(
b"Sec-WebSocket-Protocol",
(", ".join(request.subprotocols)).encode("ascii"),
)
)
if request.extensions:
offers: Dict[str, Union[str, bool]] = {}
for e in request.extensions:
assert isinstance(e, Extension)
offers[e.name] = e.offer()
extensions = []
for name, params in offers.items():
bname = name.encode("ascii")
if isinstance(params, bool):
if params:
extensions.append(bname)
else:
extensions.append(b"%s; %s" % (bname, params.encode("ascii")))
if extensions:
headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
upgrade = h11.Request(
method=b"GET",
target=request.target.encode("ascii"),
headers=headers + request.extra_headers,
)
return self._h11_connection.send(upgrade) or b""
def _establish_client_connection(
self, event: h11.InformationalResponse
) -> AcceptConnection: # noqa: MC0001
# _establish_client_connection is always called after _initiate_connection.
assert self._initiating_request is not None
assert self._nonce is not None
accept = None
connection_tokens = None
accepts: List[str] = []
subprotocol = None
upgrade = b""
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
accepts = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-accept":
accept = value
continue # Skip appending to headers
elif name == b"sec-websocket-protocol":
subprotocol = value.decode("ascii")
continue # Skip appending to headers
elif name == b"upgrade":
upgrade = value
continue # Skip appending to headers
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
accept_token = generate_accept_token(self._nonce)
if accept != accept_token:
raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
if subprotocol is not None:
if subprotocol not in self._initiating_request.subprotocols:
raise RemoteProtocolError(
f"unrecognized subprotocol {subprotocol}",
event_hint=RejectConnection(),
)
extensions = client_extensions_handshake(
accepts, cast(Sequence[Extension], self._initiating_request.extensions)
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
extensions,
self._h11_connection.trailing_data[0],
)
self._state = ConnectionState.OPEN
return AcceptConnection(
extensions=extensions, extra_headers=headers, subprotocol=subprotocol
)
def __repr__(self) -> str:
return "{}(client={}, state={})".format(
self.__class__.__name__, self.client, self.state
)
def server_extensions_handshake(
requested: Iterable[str], supported: List[Extension]
) -> Optional[bytes]:
"""Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
"""
accepts: Dict[str, Union[bool, bytes]] = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if isinstance(accept, bool):
if accept:
accepts[extension.name] = True
elif accept is not None:
accepts[extension.name] = accept.encode("ascii")
if accepts:
extensions: List[bytes] = []
for name, params in accepts.items():
name_bytes = name.encode("ascii")
if isinstance(params, bool):
assert params
extensions.append(name_bytes)
else:
if params == b"":
extensions.append(b"%s" % (name_bytes))
else:
extensions.append(b"%s; %s" % (name_bytes, params))
return b", ".join(extensions)
return None
def client_extensions_handshake(
accepted: Iterable[str], supported: Sequence[Extension]
) -> List[Extension]:
# This raises RemoteProtocolError is the accepted extension is not
# supported.
extensions = []
for accept in accepted:
name = accept.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
extension.finalize(accept)
extensions.append(extension)
break
else:
raise RemoteProtocolError(
f"unrecognized extension {name}", event_hint=RejectConnection()
)
return extensions
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import calendar
from flask import Flask, jsonify, render_template, request, abort, redirect, url_for, make_response
from flask.json import JSONEncoder
from datetime import datetime
import time
import json
import threading
import random
import string
import os
from . import config
from .models import Pokemon, Gym, Pokestop
from .scan import ScanMetrics, Scanner
from .utils import get_locale
log = logging.getLogger(__name__)
class Pogom(Flask):
def __init__(self, scan_config, *args, **kwargs):
super(Pogom, self).__init__(*args, **kwargs)
self.scan_config = scan_config
self.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
self.json_encoder = CustomJSONEncoder
self.route('/', methods=['GET'])(self.fullmap)
self.route('/heatmap-data', methods=['GET'])(self.heatmap_data)
self.route('/map-data', methods=['GET'])(self.map_data)
self.route('/cover', methods=['GET'])(self.cover)
self.route('/location', methods=['POST'])(self.add_location)
self.route('/location', methods=['DELETE'])(self.delete_location)
self.route('/stats', methods=['GET'])(self.stats)
self.route('/config', methods=['GET'])(self.get_config_site)
self.route('/config', methods=['POST'])(self.post_config_site)
self.route('/login', methods=['GET', 'POST'])(self.login)
self.route('/locale', methods=['GET'])(self.locale)
def is_authenticated(self):
if config.get('CONFIG_PASSWORD', None) and not request.cookies.get("auth") == config['AUTH_KEY']:
return False
else:
return True
def fullmap(self):
# if 'search_thread' not in [t.name for t in threading.enumerate()]:
if (not config.get('GOOGLEMAPS_KEY', None) or
not config.get('ACCOUNTS', None)):
return redirect(url_for('get_config_site'))
return render_template('map.html',
scan_locations=json.dumps(self.scan_config.SCAN_LOCATIONS.values()),
gmaps_key=config['GOOGLEMAPS_KEY'],
is_authenticated=self.is_authenticated())
def login(self):
if self.is_authenticated():
return redirect(url_for('get_config_site'))
if request.method == "GET":
return render_template('login.html')
if request.form.get('password', None) == config.get('CONFIG_PASSWORD', None):
resp = make_response(redirect(url_for('get_config_site')))
resp.set_cookie('auth', config['AUTH_KEY'])
return resp
def heatmap_data(self):
return jsonify( Pokemon.get_heat_stats() )
def get_config_site(self):
if not self.is_authenticated():
return redirect(url_for('login'))
return render_template(
'config.html',
locale=config.get('LOCALE', ''),
locales_available=config.get('LOCALES_AVAILABLE', []),
gmaps_key=config.get('GOOGLEMAPS_KEY', None),
accounts=config.get('ACCOUNTS', []),
password=config.get('CONFIG_PASSWORD', None))
def post_config_site(self):
if not self.is_authenticated():
return redirect(url_for('login'))
config['LOCALE'] = request.form.get('locale', 'en')
config['GOOGLEMAPS_KEY'] = request.form.get('gmapsKey', '')
pw = request.form.get('configPassword', None)
pw_changed = (pw != config.get('CONFIG_PASSWORD', None))
if pw_changed:
config['CONFIG_PASSWORD'] = pw
config['AUTH_KEY'] = ''.join(random.choice(string.lowercase) for _ in range(32))
accounts_str = request.form.get('accounts', None)
usernames_before = set([])
for account in config.get('ACCOUNTS', []):
usernames_before.add(account['username'])
usernames = set([])
accounts_parsed = []
if accounts_str:
for a in accounts_str.splitlines():
a = a.split(":")
if (len(a) == 2) and (a[0].strip() not in usernames):
accounts_parsed.append({'username': a[0].strip(), 'password': a[1].strip()})
usernames.add(a[0].strip())
config['ACCOUNTS'] = accounts_parsed
self.scan_config.ACCOUNTS_CHANGED = (usernames_before != usernames)
self.save_config()
self.scan_config.RESTART = True
resp = make_response(render_template(
'config.html',
locale=config.get('LOCALE', ''),
locales_available=config.get('LOCALES_AVAILABLE', []),
gmaps_key=config.get('GOOGLEMAPS_KEY', None),
accounts=config.get('ACCOUNTS', []),
password=config.get('CONFIG_PASSWORD', None),
alert=True))
if pw_changed:
resp.set_cookie('auth', config['AUTH_KEY'])
return resp
def save_config(self):
if not self.is_authenticated():
return redirect(url_for('login'))
if (config['CONFIG_PATH'] is not None and
os.path.isfile(config['CONFIG_PATH'])):
config_path = config['CONFIG_PATH']
else:
config_path = os.path.join(config['ROOT_PATH'], 'config.json')
with open(config_path, 'w') as f:
data = {'GOOGLEMAPS_KEY': config['GOOGLEMAPS_KEY'],
'LOCALE': config['LOCALE'],
'CONFIG_PASSWORD': config['CONFIG_PASSWORD'],
'SCAN_LOCATIONS': self.scan_config.SCAN_LOCATIONS.values(),
'ACCOUNTS': config['ACCOUNTS']}
f.write(json.dumps(data))
def map_data(self):
d = {}
if not ScanMetrics.LAST_SUCCESSFUL_REQUEST:
time_since_last_req = "na"
elif ScanMetrics.LAST_SUCCESSFUL_REQUEST == -1:
time_since_last_req = "sleep"
else:
time_since_last_req = time.time() - ScanMetrics.LAST_SUCCESSFUL_REQUEST
d['server_status'] = {'num-threads': ScanMetrics.NUM_THREADS,
'num-accounts': ScanMetrics.NUM_ACCOUNTS,
'last-successful-request': time_since_last_req,
'complete-scan-time': ScanMetrics.COMPLETE_SCAN_TIME,
'current-scan-percent': ScanMetrics.CURRENT_SCAN_PERCENT}
d['scan_locations'] = self.scan_config.SCAN_LOCATIONS
if request.args.get('pokemon', 'true') == 'true':
d['pokemons'] = Pokemon.get_active()
if request.args.get('pokestops', 'false') == 'true':
d['pokestops'] = Pokestop.get_all()
# TODO: Lured pokestops
if request.args.get('gyms', 'true') == 'true':
d['gyms'] = Gym.get_all()
return jsonify(d)
def cover(self):
return jsonify({'cover': self.scan_config.COVER,
'scan_locations': self.scan_config.SCAN_LOCATIONS.values()})
def add_location(self):
if not self.is_authenticated():
return redirect(url_for('login'))
lat = request.values.get('lat', type=float)
lng = request.values.get('lng', type=float)
radius = request.values.get('radius', type=int)
if not (lat and lng and radius):
abort(400)
self.scan_config.add_scan_location(lat, lng, radius)
self.save_config()
return ('', 204)
def delete_location(self):
if not self.is_authenticated():
return redirect(url_for('login'))
lat = request.values.get('lat', type=float)
lng = request.values.get('lng', type=float)
if not (lat and lng):
abort(400)
self.scan_config.delete_scan_location(lat, lng)
self.save_config()
return ('', 204)
def stats(self):
stats = Pokemon.get_stats()
count = sum(p['count'] for p in stats)
return render_template('stats.html', pokemons=stats, total=count)
def locale(self):
return jsonify(get_locale())
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from os import path
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
class FileCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(FileCacheTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
super(FileCacheTest, self).tearDown()
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(dataset_fn()))
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@combinations.generate(test_base.default_test_combinations())
def testReadingPastEndOfSequence(self):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
dataset = dataset.map(lambda a: a).batch(4).repeat(2)
expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCacheZipped(self):
def make_dataset(i):
cache_path = self.cache_prefix + "_" + str(i)
return dataset_ops.Dataset.range(100).shuffle(100).cache(cache_path)
datasets = [make_dataset(i) for i in range(3)]
dataset = dataset_ops.Dataset.zip(tuple(datasets))
first_order = self.getDatasetOutput(dataset)
second_order = self.getDatasetOutput(dataset)
self.assertEqual(first_order, second_order)
@combinations.generate(test_base.default_test_combinations())
def testCleaningUpCacheFiles(self):
def do_test(i):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
get_next = self.getNext(dataset)
for _ in range(i):
try:
self.evaluate(get_next())
except errors.OutOfRangeError:
break
if not context.executing_eagerly():
self.skipTest(
"Test requires eager mode for iterators to be deconstructed")
for i in [0, 3, 10, 12, 15]:
do_test(i)
class MemoryCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
@combinations.generate(test_base.default_test_combinations())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
self.assertDatasetProduces(cache_dataset, expected_output=[])
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
dataset_fn = lambda: dataset_ops.Dataset.range(5).cache()
d1 = dataset_fn().map(lambda x: x + 1)
d2 = dataset_fn().map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
@combinations.generate(test_base.default_test_combinations())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCacheRepeatEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache().repeat(2)
get_next = self.getNext(dataset, requires_initialization=True)
# first epoch
for i in range(10):
self.assertEqual(i, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
# second epoch
for i in range(10):
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheIterationEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache()
# first epoch
i = 0
for elem in dataset:
self.assertEqual(i, self.evaluate(elem))
i += 1
self.assertEqual(i, self.evaluate(counter))
# second epoch
i = 0
for elem in dataset:
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(elem))
i += 1
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.cache()
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ConcurrentIterators(self):
dataset = dataset_ops.Dataset.range(10).cache()
it1 = iter(dataset)
it2 = iter(dataset)
for i in range(10):
self.assertEqual(next(it1), i)
self.assertEqual(next(it2), i)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheKnownCardinality(self):
# Check that a dataset which produces random permutation of range(10) ends
# up being cached when we read all of its element but do not reach EOF.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.shuffle(10, reshuffle_each_iteration=True).cache()
it = iter(dataset)
results = []
for _ in range(10):
results.append(next(it))
it = iter(dataset)
for i in range(10):
self.assertEqual(next(it), results[i])
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointFinishedCache(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.cache()
iterator = iter(ds)
for i in range(num_elements):
self.assertEqual(next(iterator).numpy(), i)
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
manager.restore_or_initialize()
with self.assertRaises(StopIteration):
next(iterator)
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeCache(self):
# Tensor of size 100M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((25, 1000, 1000), dtype=dtypes.float32))
# Repeat 25 times to exceed the 2G proto limit
dataset = dataset.repeat(25)
dataset = dataset.cache()
# Iterate to fill the cache.
iterator = iter(dataset)
for _ in range(23):
next(iterator)
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
class CacheCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def setUp(self):
super(CacheCheckpointTest, self).setUp()
self.range_size = 10
self.num_repeats = 3
self.num_outputs = self.range_size * self.num_repeats
self.cache_file_prefix = "test"
def make_dataset_fn(self, is_memory):
if is_memory:
filename = ""
else:
filename = os.path.join(self.get_temp_dir(), self.cache_file_prefix)
def ds_fn():
return dataset_ops.Dataset.range(self.range_size).cache(filename).repeat(
self.num_repeats)
return ds_fn
def expected_outputs(self):
return list(range(self.range_size)) * self.num_repeats
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 5 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 8 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 8, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(8))
outputs = outputs[:5]
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointAfterOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 15 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointAfterOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 18 entries from iterator but save checkpoint after producing 15.
outputs = self.gen_outputs(
ds_fn, [15], 18, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(8)))
outputs = list(range(10)) + list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpochButRunCompleteEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 13 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 13, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(3)))
# Since we ran for more than one epoch, the cache was completely written.
# The ckpt was saved when the iterator was in cache-write mode. Test that
# the iterator falls back to read mode after restoring if the cache has
# been completely written.
outputs = list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointUnusedWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Checkpoint before get_next is called even once.
outputs = self.gen_outputs(ds_fn, [], 0, verify_exhausted=False)
self.assertSequenceEqual(outputs, [])
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, ckpt_saved=True, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointUnusedMidwayWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint, then produce no elements and checkpoint.
outputs.extend(
self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce rest of the elements.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testUnusedCheckpointError(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and save ckpt.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
if is_memory:
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, self.expected_outputs())
else:
# Since the complete cache has not been written, a new iterator which does
# not restore the checkpoint will throw an error since there is a partial
# cache shard.
with self.assertRaises(errors.AlreadyExistsError):
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testIgnoreCheckpointIfCacheWritten(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 15 elements and save ckpt. This will write the complete cache.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Build the iterator again but do not restore from ckpt. Since the cache
# has already been written we should be able to use it.
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
if __name__ == "__main__":
test.main()
|
|
import json
import logging as logmodule
import os
import re
import sys
import tempfile
import uuid
from django.core.files import File as DjFile
from django.core.management import call_command
from django.core.management.base import BaseCommand
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import licenses
from contentcuration.api import write_file_to_storage
from contentcuration.models import AssessmentItem
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import ContentTag
from contentcuration.models import File
from contentcuration.models import FormatPreset
from contentcuration.models import Invitation
from contentcuration.models import License
from contentcuration.models import User
from contentcuration.utils.files import duplicate_file
from contentcuration.utils.minio_utils import ensure_storage_bucket_public
from contentcuration.views.nodes import duplicate_node_bulk
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
DESCRIPTION = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
LICENSE = licenses.SPECIAL_PERMISSIONS
LICENSE_DESCRIPTION = "Sample text for content with special permissions"
TAGS = ["Tag 1", "Tag 2", "Tag 3"]
SORT_ORDER = 0
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--email', dest="email", default="a@a.com")
parser.add_argument('--password', dest="password", default="a")
def handle(self, *args, **options):
# Validate email
email = options["email"]
password = options["password"]
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
print "{} is not a valid email".format(email)
sys.exit()
# create the minio bucket
ensure_storage_bucket_public()
# create the cache table
call_command("createcachetable")
# Run migrations
call_command('migrate')
# Run loadconstants
call_command('loadconstants')
# Set up user as admin
admin = create_user(email, password, "Admin", "User", admin=True)
# Create other users
user1 = create_user("user@a.com", "a", "User", "A")
user2 = create_user("user@b.com", "b", "User", "B")
user3 = create_user("user@c.com", "c", "User", "C")
# Create channels
channel1 = create_channel("Published Channel", DESCRIPTION, editors=[admin], bookmarkers=[user1, user2], public=True)
channel2 = create_channel("Ricecooker Channel", DESCRIPTION, editors=[admin, user1], bookmarkers=[user2], viewers=[user3])
channel3 = create_channel("Empty Channel", editors=[user3], viewers=[user2])
channel4 = create_channel("Imported Channel", editors=[admin])
# Invite admin to channel 3
invitation, _new = Invitation.objects.get_or_create(
invited=admin,
sender=user3,
channel=channel3,
email=admin.email,
)
invitation.share_mode = "edit"
invitation.save()
# Create pool of tags
tags = []
for t in TAGS:
tag, _new = ContentTag.objects.get_or_create(tag_name=t, channel=channel1)
# Generate file objects
document_file = create_file("Sample Document", format_presets.DOCUMENT, file_formats.PDF, user=admin)
video_file = create_file("Sample Video", format_presets.VIDEO_HIGH_RES, file_formats.MP4, user=admin)
subtitle_file = create_file("Sample Subtitle", format_presets.VIDEO_SUBTITLE, file_formats.VTT, user=admin)
audio_file = create_file("Sample Audio", format_presets.AUDIO, file_formats.MP3, user=admin)
html5_file = create_file("Sample HTML", format_presets.HTML5_ZIP, file_formats.HTML5, user=admin)
# Populate channel 1 with content and publish
generate_tree(channel1.main_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags)
call_command('exportchannel', channel1.pk)
# Populate channel 2 with staged content
channel2.ricecooker_version = "0.0.0"
channel2.save()
generate_tree(channel2.staging_tree, document_file, video_file, subtitle_file, audio_file, html5_file, user=admin, tags=tags)
# Import content from channel 1 into channel 4
duplicate_node_bulk(channel1.main_tree.children.first(), parent=channel4.main_tree)
print "\n\n\nSETUP DONE: Log in as admin to view data (email: {}, password: {})\n\n\n".format(email, password)
def generate_tree(root, document, video, subtitle, audio, html5, user=None, tags=None):
topic1 = create_topic("Topic 1", root, description=DESCRIPTION)
topic2 = create_topic("Topic 2", root)
create_topic("Topic 3", topic2, description=DESCRIPTION)
create_topic("Topic 4", topic2, description=DESCRIPTION)
# Add files to topic 1
license_id = License.objects.get(license_name=LICENSE).pk
videonode = create_contentnode("Sample Video", topic1, video, content_kinds.VIDEO, license_id, user=user, tags=tags)
duplicate_file(subtitle, node=videonode)
create_contentnode("Sample Document", topic1, document, content_kinds.DOCUMENT, license_id, user=user, tags=tags)
create_contentnode("Sample Audio", topic1, audio, content_kinds.AUDIO, license_id, user=user, tags=tags)
create_contentnode("Sample HTML", topic1, html5, content_kinds.HTML5, license_id, user=user, tags=tags)
create_exercise("Sample Exercise", topic1, license_id, user=user)
def create_user(email, password, first_name, last_name, admin=False):
user, new = User.objects.get_or_create(email=email)
if new:
user.set_password(password)
user.first_name = first_name
user.last_name = last_name
print "User created (email: {}, password: {}, admin: {})".format(email, password, admin)
user.is_staff = admin
user.is_admin = admin
user.is_active = True
user.save()
return user
def create_channel(name, description="", editors=None, language="en", bookmarkers=None, viewers=None, public=False):
domain = uuid.uuid5(uuid.NAMESPACE_DNS, name)
node_id = uuid.uuid5(domain, name)
channel, _new = Channel.objects.get_or_create(pk=node_id.hex)
channel.name = name
channel.description = description
channel.language_id = language
channel.public = public
channel.deleted = False
editors = editors or []
bookmarkers = bookmarkers or []
viewers = viewers or []
for e in editors:
channel.editors.add(e)
for b in bookmarkers:
channel.bookmarked_by.add(b)
for v in viewers:
channel.viewers.add(v)
channel.save()
channel.main_tree.get_descendants().delete()
channel.staging_tree and channel.staging_tree.get_descendants().delete()
return channel
def add_tags(node, tags):
tags = tags or []
for t in tags:
node.tags.add(t)
node.save()
def get_sort_order():
global SORT_ORDER
SORT_ORDER += 1
return SORT_ORDER
def create_topic(title, parent, description=""):
topic = ContentNode.objects.create(
title=title,
description=description,
parent=parent,
kind_id=content_kinds.TOPIC,
sort_order=get_sort_order(),
)
topic.save()
return topic
def create_exercise(title, parent, license_id, description="", user=None):
mastery_model = {
"mastery_model": exercises.M_OF_N,
"randomize": False,
"m": 3,
"n": 5,
}
exercise = ContentNode.objects.create(
title=title,
description=description,
parent=parent,
kind_id=content_kinds.EXERCISE,
author="{} {}".format(user.first_name, user.last_name),
copyright_holder="{} {}".format(user.first_name, user.last_name),
license_id=license_id,
license_description=LICENSE_DESCRIPTION,
extra_fields=json.dumps(mastery_model),
sort_order=get_sort_order(),
)
exercise.save()
create_question(exercise, "Question 1", exercises.SINGLE_SELECTION)
create_question(exercise, "Question 2", exercises.MULTIPLE_SELECTION)
create_question(exercise, "Question 3", exercises.INPUT_QUESTION)
return exercise
def create_question(node, question, question_type):
answers = [
{"answer": "1", "correct": False, "order": 1},
{"answer": "2", "correct": True, "order": 2},
{"answer": "3", "correct": False, "order": 3},
{"answer": "4", "correct": False, "order": 4},
]
hints = [
{"hint": "Hint 1", "order": 1},
{"hint": "Hint 2", "order": 2},
{"hint": "Hint 3", "order": 3},
]
ai = AssessmentItem.objects.create(
contentnode=node,
type=question_type,
question=question,
hints=json.dumps(hints),
answers=json.dumps(answers),
order=node.assessment_items.count(),
)
ai.save()
def create_contentnode(title, parent, file, kind_id, license_id, description="", user=None, tags=None):
node = ContentNode.objects.create(
title=title,
description=description,
parent=parent,
kind_id=kind_id,
author="{} {}".format(user.first_name, user.last_name),
copyright_holder="{} {}".format(user.first_name, user.last_name),
license_id=license_id,
license_description=LICENSE_DESCRIPTION,
sort_order=get_sort_order(),
)
node.save()
duplicate_file(file, node=node)
add_tags(node, tags)
return node
def create_file(display_name, preset_id, ext, user=None):
with tempfile.NamedTemporaryFile(suffix=".{}".format(ext), mode='w+t', delete=False) as f:
f.write(":)")
f.flush()
size = f.tell()
filename = write_file_to_storage(f, name=f.name)
checksum, _ext = os.path.splitext(filename)
f.seek(0)
file_object = File(
file_size=size,
file_on_disk=DjFile(f),
checksum=checksum,
file_format_id=ext,
original_filename=display_name,
preset_id=preset_id,
uploaded_by=user,
language_id="mul" if FormatPreset.objects.filter(id=preset_id, multi_language=True).exists() else None,
)
file_object.save()
f.close()
os.unlink(f.name)
return file_object
|
|
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path[0:0] = [""]
import os
import pymongo
from nose.plugins.skip import SkipTest
from datetime import datetime
from mongoengine import *
from mongoengine.connection import get_db, get_connection
__all__ = ("IndexesTest", )
class IndexesTest(unittest.TestCase):
def setUp(self):
self.connection = connect(db='mongoenginetest')
self.db = get_db()
class Person(Document):
name = StringField()
age = IntField()
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def test_indexes_document(self):
"""Ensure that indexes are used when meta[indexes] is specified for
Documents
"""
self._index_test(Document)
def test_indexes_dynamic_document(self):
"""Ensure that indexes are used when meta[indexes] is specified for
Dynamic Documents
"""
self._index_test(DynamicDocument)
def _index_test(self, InheritFrom):
class BlogPost(InheritFrom):
date = DateTimeField(db_field='addDate', default=datetime.now)
category = StringField()
tags = ListField(StringField())
meta = {
'indexes': [
'-date',
'tags',
('category', '-date')
]
}
expected_specs = [{'fields': [('addDate', -1)]},
{'fields': [('tags', 1)]},
{'fields': [('category', 1), ('addDate', -1)]}]
self.assertEqual(expected_specs, BlogPost._meta['index_specs'])
BlogPost.ensure_indexes()
info = BlogPost.objects._collection.index_information()
# _id, '-date', 'tags', ('cat', 'date')
self.assertEqual(len(info), 4)
info = [value['key'] for key, value in info.iteritems()]
for expected in expected_specs:
self.assertTrue(expected['fields'] in info)
def _index_test_inheritance(self, InheritFrom):
class BlogPost(InheritFrom):
date = DateTimeField(db_field='addDate', default=datetime.now)
category = StringField()
tags = ListField(StringField())
meta = {
'indexes': [
'-date',
'tags',
('category', '-date')
],
'allow_inheritance': True
}
expected_specs = [{'fields': [('_cls', 1), ('addDate', -1)]},
{'fields': [('_cls', 1), ('tags', 1)]},
{'fields': [('_cls', 1), ('category', 1),
('addDate', -1)]}]
self.assertEqual(expected_specs, BlogPost._meta['index_specs'])
BlogPost.ensure_indexes()
info = BlogPost.objects._collection.index_information()
# _id, '-date', 'tags', ('cat', 'date')
# NB: there is no index on _cls by itself, since
# the indices on -date and tags will both contain
# _cls as first element in the key
self.assertEqual(len(info), 4)
info = [value['key'] for key, value in info.iteritems()]
for expected in expected_specs:
self.assertTrue(expected['fields'] in info)
class ExtendedBlogPost(BlogPost):
title = StringField()
meta = {'indexes': ['title']}
expected_specs.append({'fields': [('_cls', 1), ('title', 1)]})
self.assertEqual(expected_specs, ExtendedBlogPost._meta['index_specs'])
BlogPost.drop_collection()
ExtendedBlogPost.ensure_indexes()
info = ExtendedBlogPost.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
for expected in expected_specs:
self.assertTrue(expected['fields'] in info)
def test_indexes_document_inheritance(self):
"""Ensure that indexes are used when meta[indexes] is specified for
Documents
"""
self._index_test_inheritance(Document)
def test_indexes_dynamic_document_inheritance(self):
"""Ensure that indexes are used when meta[indexes] is specified for
Dynamic Documents
"""
self._index_test_inheritance(DynamicDocument)
def test_inherited_index(self):
"""Ensure index specs are inhertited correctly"""
class A(Document):
title = StringField()
meta = {
'indexes': [
{
'fields': ('title',),
},
],
'allow_inheritance': True,
}
class B(A):
description = StringField()
self.assertEqual(A._meta['index_specs'], B._meta['index_specs'])
self.assertEqual([{'fields': [('_cls', 1), ('title', 1)]}],
A._meta['index_specs'])
def test_index_no_cls(self):
"""Ensure index specs are inhertited correctly"""
class A(Document):
title = StringField()
meta = {
'indexes': [
{'fields': ('title',), 'cls': False},
],
'allow_inheritance': True,
'index_cls': False
}
self.assertEqual([('title', 1)], A._meta['index_specs'][0]['fields'])
A._get_collection().drop_indexes()
A.ensure_indexes()
info = A._get_collection().index_information()
self.assertEqual(len(info.keys()), 2)
class B(A):
c = StringField()
d = StringField()
meta = {
'indexes': [{'fields': ['c']}, {'fields': ['d'], 'cls': True}],
'allow_inheritance': True
}
self.assertEqual([('c', 1)], B._meta['index_specs'][1]['fields'])
self.assertEqual([('_cls', 1), ('d', 1)], B._meta['index_specs'][2]['fields'])
def test_build_index_spec_is_not_destructive(self):
class MyDoc(Document):
keywords = StringField()
meta = {
'indexes': ['keywords'],
'allow_inheritance': False
}
self.assertEqual(MyDoc._meta['index_specs'],
[{'fields': [('keywords', 1)]}])
# Force index creation
MyDoc.ensure_indexes()
self.assertEqual(MyDoc._meta['index_specs'],
[{'fields': [('keywords', 1)]}])
def test_embedded_document_index_meta(self):
"""Ensure that embedded document indexes are created explicitly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank = EmbeddedDocumentField(Rank, required=False)
meta = {
'indexes': [
'rank.title',
],
'allow_inheritance': False
}
self.assertEqual([{'fields': [('rank.title', 1)]}],
Person._meta['index_specs'])
Person.drop_collection()
# Indexes are lazy so use list() to perform query
list(Person.objects)
info = Person.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('rank.title', 1)] in info)
def test_explicit_geo2d_index(self):
"""Ensure that geo2d indexes work when created via meta[indexes]
"""
class Place(Document):
location = DictField()
meta = {
'allow_inheritance': True,
'indexes': [
'*location.point',
]
}
self.assertEqual([{'fields': [('location.point', '2d')]}],
Place._meta['index_specs'])
Place.ensure_indexes()
info = Place._get_collection().index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('location.point', '2d')] in info)
def test_explicit_geo2d_index_embedded(self):
"""Ensure that geo2d indexes work when created via meta[indexes]
"""
class EmbeddedLocation(EmbeddedDocument):
location = DictField()
class Place(Document):
current = DictField(field=EmbeddedDocumentField('EmbeddedLocation'))
meta = {
'allow_inheritance': True,
'indexes': [
'*current.location.point',
]
}
self.assertEqual([{'fields': [('current.location.point', '2d')]}],
Place._meta['index_specs'])
Place.ensure_indexes()
info = Place._get_collection().index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('current.location.point', '2d')] in info)
def test_dictionary_indexes(self):
"""Ensure that indexes are used when meta[indexes] contains
dictionaries instead of lists.
"""
class BlogPost(Document):
date = DateTimeField(db_field='addDate', default=datetime.now)
category = StringField()
tags = ListField(StringField())
meta = {
'indexes': [
{'fields': ['-date'], 'unique': True, 'sparse': True},
],
}
self.assertEqual([{'fields': [('addDate', -1)], 'unique': True,
'sparse': True}],
BlogPost._meta['index_specs'])
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
# _id, '-date'
self.assertEqual(len(info), 2)
# Indexes are lazy so use list() to perform query
list(BlogPost.objects)
info = BlogPost.objects._collection.index_information()
info = [(value['key'],
value.get('unique', False),
value.get('sparse', False))
for key, value in info.iteritems()]
self.assertTrue(([('addDate', -1)], True, True) in info)
BlogPost.drop_collection()
def test_abstract_index_inheritance(self):
class UserBase(Document):
user_guid = StringField(required=True)
meta = {
'abstract': True,
'indexes': ['user_guid'],
'allow_inheritance': True
}
class Person(UserBase):
name = StringField()
meta = {
'indexes': ['name'],
}
Person.drop_collection()
Person(name="test", user_guid='123').save()
self.assertEqual(1, Person.objects.count())
info = Person.objects._collection.index_information()
self.assertEqual(sorted(info.keys()),
['_cls_1_name_1', '_cls_1_user_guid_1', '_id_'])
def test_disable_index_creation(self):
"""Tests setting auto_create_index to False on the connection will
disable any index generation.
"""
class User(Document):
meta = {
'allow_inheritance': True,
'indexes': ['user_guid'],
'auto_create_index': False
}
user_guid = StringField(required=True)
class MongoUser(User):
pass
User.drop_collection()
User(user_guid='123').save()
MongoUser(user_guid='123').save()
self.assertEqual(2, User.objects.count())
info = User.objects._collection.index_information()
self.assertEqual(info.keys(), ['_id_'])
User.ensure_indexes()
info = User.objects._collection.index_information()
self.assertEqual(sorted(info.keys()), ['_cls_1_user_guid_1', '_id_'])
User.drop_collection()
def test_embedded_document_index(self):
"""Tests settings an index on an embedded document
"""
class Date(EmbeddedDocument):
year = IntField(db_field='yr')
class BlogPost(Document):
title = StringField()
date = EmbeddedDocumentField(Date)
meta = {
'indexes': [
'-date.year'
],
}
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
self.assertEqual(sorted(info.keys()), ['_id_', 'date.yr_-1'])
BlogPost.drop_collection()
def test_list_embedded_document_index(self):
"""Ensure list embedded documents can be indexed
"""
class Tag(EmbeddedDocument):
name = StringField(db_field='tag')
class BlogPost(Document):
title = StringField()
tags = ListField(EmbeddedDocumentField(Tag))
meta = {
'indexes': [
'tags.name'
]
}
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
# we don't use _cls in with list fields by default
self.assertEqual(sorted(info.keys()), ['_id_', 'tags.tag_1'])
post1 = BlogPost(title="Embedded Indexes tests in place",
tags=[Tag(name="about"), Tag(name="time")])
post1.save()
BlogPost.drop_collection()
def test_recursive_embedded_objects_dont_break_indexes(self):
class RecursiveObject(EmbeddedDocument):
obj = EmbeddedDocumentField('self')
class RecursiveDocument(Document):
recursive_obj = EmbeddedDocumentField(RecursiveObject)
meta = {'allow_inheritance': True}
RecursiveDocument.ensure_indexes()
info = RecursiveDocument._get_collection().index_information()
self.assertEqual(sorted(info.keys()), ['_cls_1', '_id_'])
def test_covered_index(self):
"""Ensure that covered indexes can be used
"""
class Test(Document):
a = IntField()
meta = {
'indexes': ['a'],
'allow_inheritance': False
}
Test.drop_collection()
obj = Test(a=1)
obj.save()
# Need to be explicit about covered indexes as mongoDB doesn't know if
# the documents returned might have more keys in that here.
query_plan = Test.objects(id=obj.id).exclude('a').explain()
self.assertFalse(query_plan['indexOnly'])
query_plan = Test.objects(id=obj.id).only('id').explain()
self.assertTrue(query_plan['indexOnly'])
query_plan = Test.objects(a=1).only('a').exclude('id').explain()
self.assertTrue(query_plan['indexOnly'])
def test_index_on_id(self):
class BlogPost(Document):
meta = {
'indexes': [
['categories', 'id']
]
}
title = StringField(required=True)
description = StringField(required=True)
categories = ListField()
BlogPost.drop_collection()
indexes = BlogPost.objects._collection.index_information()
self.assertEqual(indexes['categories_1__id_1']['key'],
[('categories', 1), ('_id', 1)])
def test_hint(self):
class BlogPost(Document):
tags = ListField(StringField())
meta = {
'indexes': [
'tags',
],
}
BlogPost.drop_collection()
for i in xrange(0, 10):
tags = [("tag %i" % n) for n in xrange(0, i % 2)]
BlogPost(tags=tags).save()
self.assertEqual(BlogPost.objects.count(), 10)
self.assertEqual(BlogPost.objects.hint().count(), 10)
self.assertEqual(BlogPost.objects.hint([('tags', 1)]).count(), 10)
self.assertEqual(BlogPost.objects.hint([('ZZ', 1)]).count(), 10)
if pymongo.version >= '2.8':
self.assertEqual(BlogPost.objects.hint('tags').count(), 10)
else:
def invalid_index():
BlogPost.objects.hint('tags')
self.assertRaises(TypeError, invalid_index)
def invalid_index_2():
return BlogPost.objects.hint(('tags', 1))
self.assertRaises(Exception, invalid_index_2)
def test_unique(self):
"""Ensure that uniqueness constraints are applied to fields.
"""
class BlogPost(Document):
title = StringField()
slug = StringField(unique=True)
BlogPost.drop_collection()
post1 = BlogPost(title='test1', slug='test')
post1.save()
# Two posts with the same slug is not allowed
post2 = BlogPost(title='test2', slug='test')
self.assertRaises(NotUniqueError, post2.save)
# Ensure backwards compatibilty for errors
self.assertRaises(OperationError, post2.save)
def test_unique_with(self):
"""Ensure that unique_with constraints are applied to fields.
"""
class Date(EmbeddedDocument):
year = IntField(db_field='yr')
class BlogPost(Document):
title = StringField()
date = EmbeddedDocumentField(Date)
slug = StringField(unique_with='date.year')
BlogPost.drop_collection()
post1 = BlogPost(title='test1', date=Date(year=2009), slug='test')
post1.save()
# day is different so won't raise exception
post2 = BlogPost(title='test2', date=Date(year=2010), slug='test')
post2.save()
# Now there will be two docs with the same slug and the same day: fail
post3 = BlogPost(title='test3', date=Date(year=2010), slug='test')
self.assertRaises(OperationError, post3.save)
BlogPost.drop_collection()
def test_unique_embedded_document(self):
"""Ensure that uniqueness constraints are applied to fields on embedded documents.
"""
class SubDocument(EmbeddedDocument):
year = IntField(db_field='yr')
slug = StringField(unique=True)
class BlogPost(Document):
title = StringField()
sub = EmbeddedDocumentField(SubDocument)
BlogPost.drop_collection()
post1 = BlogPost(title='test1',
sub=SubDocument(year=2009, slug="test"))
post1.save()
# sub.slug is different so won't raise exception
post2 = BlogPost(title='test2',
sub=SubDocument(year=2010, slug='another-slug'))
post2.save()
# Now there will be two docs with the same sub.slug
post3 = BlogPost(title='test3',
sub=SubDocument(year=2010, slug='test'))
self.assertRaises(NotUniqueError, post3.save)
BlogPost.drop_collection()
def test_unique_embedded_document_in_list(self):
"""
Ensure that the uniqueness constraints are applied to fields in
embedded documents, even when the embedded documents in in a
list field.
"""
class SubDocument(EmbeddedDocument):
year = IntField(db_field='yr')
slug = StringField(unique=True)
class BlogPost(Document):
title = StringField()
subs = ListField(EmbeddedDocumentField(SubDocument))
BlogPost.drop_collection()
post1 = BlogPost(
title='test1', subs=[
SubDocument(year=2009, slug='conflict'),
SubDocument(year=2009, slug='conflict')
]
)
post1.save()
post2 = BlogPost(
title='test2', subs=[SubDocument(year=2014, slug='conflict')]
)
self.assertRaises(NotUniqueError, post2.save)
BlogPost.drop_collection()
def test_unique_with_embedded_document_and_embedded_unique(self):
"""Ensure that uniqueness constraints are applied to fields on
embedded documents. And work with unique_with as well.
"""
class SubDocument(EmbeddedDocument):
year = IntField(db_field='yr')
slug = StringField(unique=True)
class BlogPost(Document):
title = StringField(unique_with='sub.year')
sub = EmbeddedDocumentField(SubDocument)
BlogPost.drop_collection()
post1 = BlogPost(title='test1',
sub=SubDocument(year=2009, slug="test"))
post1.save()
# sub.slug is different so won't raise exception
post2 = BlogPost(title='test2',
sub=SubDocument(year=2010, slug='another-slug'))
post2.save()
# Now there will be two docs with the same sub.slug
post3 = BlogPost(title='test3',
sub=SubDocument(year=2010, slug='test'))
self.assertRaises(NotUniqueError, post3.save)
# Now there will be two docs with the same title and year
post3 = BlogPost(title='test1',
sub=SubDocument(year=2009, slug='test-1'))
self.assertRaises(NotUniqueError, post3.save)
BlogPost.drop_collection()
def test_ttl_indexes(self):
class Log(Document):
created = DateTimeField(default=datetime.now)
meta = {
'indexes': [
{'fields': ['created'], 'expireAfterSeconds': 3600}
]
}
Log.drop_collection()
if pymongo.version_tuple[0] < 2 and pymongo.version_tuple[1] < 3:
raise SkipTest('pymongo needs to be 2.3 or higher for this test')
connection = get_connection()
version_array = connection.server_info()['versionArray']
if version_array[0] < 2 and version_array[1] < 2:
raise SkipTest('MongoDB needs to be 2.2 or higher for this test')
# Indexes are lazy so use list() to perform query
list(Log.objects)
info = Log.objects._collection.index_information()
self.assertEqual(3600,
info['created_1']['expireAfterSeconds'])
def test_unique_and_indexes(self):
"""Ensure that 'unique' constraints aren't overridden by
meta.indexes.
"""
class Customer(Document):
cust_id = IntField(unique=True, required=True)
meta = {
'indexes': ['cust_id'],
'allow_inheritance': False,
}
Customer.drop_collection()
cust = Customer(cust_id=1)
cust.save()
cust_dupe = Customer(cust_id=1)
try:
cust_dupe.save()
raise AssertionError("We saved a dupe!")
except NotUniqueError:
pass
Customer.drop_collection()
def test_unique_and_primary(self):
"""If you set a field as primary, then unexpected behaviour can occur.
You won't create a duplicate but you will update an existing document.
"""
class User(Document):
name = StringField(primary_key=True, unique=True)
password = StringField()
User.drop_collection()
user = User(name='huangz', password='secret')
user.save()
user = User(name='huangz', password='secret2')
user.save()
self.assertEqual(User.objects.count(), 1)
self.assertEqual(User.objects.get().password, 'secret2')
User.drop_collection()
def test_index_with_pk(self):
"""Ensure you can use `pk` as part of a query"""
class Comment(EmbeddedDocument):
comment_id = IntField(required=True)
try:
class BlogPost(Document):
comments = EmbeddedDocumentField(Comment)
meta = {'indexes': [
{'fields': ['pk', 'comments.comment_id'],
'unique': True}]}
except UnboundLocalError:
self.fail('Unbound local error at index + pk definition')
info = BlogPost.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
index_item = [('_id', 1), ('comments.comment_id', 1)]
self.assertTrue(index_item in info)
def test_compound_key_embedded(self):
class CompoundKey(EmbeddedDocument):
name = StringField(required=True)
term = StringField(required=True)
class Report(Document):
key = EmbeddedDocumentField(CompoundKey, primary_key=True)
text = StringField()
Report.drop_collection()
my_key = CompoundKey(name="n", term="ok")
report = Report(text="OK", key=my_key).save()
self.assertEqual({'text': 'OK', '_id': {'term': 'ok', 'name': 'n'}},
report.to_mongo())
self.assertEqual(report, Report.objects.get(pk=my_key))
def test_compound_key_dictfield(self):
class Report(Document):
key = DictField(primary_key=True)
text = StringField()
Report.drop_collection()
my_key = {"name": "n", "term": "ok"}
report = Report(text="OK", key=my_key).save()
self.assertEqual({'text': 'OK', '_id': {'term': 'ok', 'name': 'n'}},
report.to_mongo())
self.assertEqual(report, Report.objects.get(pk=my_key))
def test_string_indexes(self):
class MyDoc(Document):
provider_ids = DictField()
meta = {
"indexes": ["provider_ids.foo", "provider_ids.bar"],
}
info = MyDoc.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('provider_ids.foo', 1)] in info)
self.assertTrue([('provider_ids.bar', 1)] in info)
def test_text_indexes(self):
class Book(Document):
title = DictField()
meta = {
"indexes": ["$title"],
}
indexes = Book.objects._collection.index_information()
self.assertTrue("title_text" in indexes)
key = indexes["title_text"]["key"]
self.assertTrue(('_fts', 'text') in key)
def test_indexes_after_database_drop(self):
"""
Test to ensure that indexes are re-created on a collection even
after the database has been dropped.
Issue #812
"""
class BlogPost(Document):
title = StringField()
slug = StringField(unique=True)
BlogPost.drop_collection()
# Create Post #1
post1 = BlogPost(title='test1', slug='test')
post1.save()
# Drop the Database
self.connection.drop_database(BlogPost._get_db().name)
# Re-create Post #1
post1 = BlogPost(title='test1', slug='test')
post1.save()
# Create Post #2
post2 = BlogPost(title='test2', slug='test')
self.assertRaises(NotUniqueError, post2.save)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Cici,Li<cici.x.li@intel.com>
# Lin, Wanming <wanming.lin@intel.com>
import os
import sys
import commands
import shutil
import glob
import fnmatch
import re
import json
reload(sys)
sys.setdefaultencoding("utf-8")
script_path = os.path.realpath(__file__)
const_path = os.path.dirname(script_path)
tool_path = const_path + "/../tools/"
plugin_tool = const_path + "/../tools/cordova-plugin-crosswalk-webview/"
testapp_path = "/tmp/cordova-sampleapp/"
def setUp():
global ARCH, MODE, CORDOVA_VERSION, device, CROSSWALK_VERSION, CROSSWALK_BRANCH, PACK_TYPE
device = os.environ.get('DEVICE_ID')
if not device:
print (" get env error\n")
sys.exit(1)
f_arch = open(const_path + "/../arch.txt", 'r')
arch_tmp = f_arch.read()
if arch_tmp.strip("\n\t") == "arm":
ARCH = "arm"
elif arch_tmp.strip("\n\t") == "x86":
ARCH = "x86"
else:
print (
" get arch error, the content of arch.txt should be 'arm' or 'x86'\n")
sys.exit(1)
f_arch.close()
f_mode = open(const_path + "/../mode.txt", 'r')
mode_tmp = f_mode.read()
if mode_tmp.strip("\n\t") == "shared":
MODE = "shared"
elif mode_tmp.strip("\n\t") == "embedded":
MODE = "embedded"
else:
print (
" get mode error, the content of mode.txt should be 'shared' or 'embedded'\n")
sys.exit(1)
f_mode.close()
f_version = open(const_path + "/../cordova-version", 'r')
if f_version.read().strip("\n\t") != "3.6":
CORDOVA_VERSION = "4.x"
else:
CORDOVA_VERSION = "3.6"
f_version.close()
if CORDOVA_VERSION == "4.x":
f_pack_type = open(const_path + "/../pack-type", 'r')
pack_type_tmp = f_pack_type.read()
if pack_type_tmp.strip("\n\t") == "local":
PACK_TYPE = "local"
elif pack_type_tmp.strip("\n\t") == "npm":
PACK_TYPE = "npm"
else:
print (
" get pack type error, the content of pack-type should be 'local' or 'npm'\n")
sys.exit(1)
f_pack_type.close()
with open(const_path + "/../VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
CROSSWALK_BRANCH = pkg_version_json["crosswalk-branch"]
def create(appname, pkgname, mode, sourcecodepath, replace_index_list, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Create project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova create %s %s %s" % (appname, pkgname, appname)
else:
if mode == "shared":
cmd = "cordova/bin/create %s %s %s --xwalk-shared-library" % (
appname, pkgname, appname)
else:
cmd = "cordova/bin/create %s %s %s" % (appname, pkgname, appname)
createstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, createstatus[0])
print "\nGenerate project %s ----------------> OK\n" % appname
result = commands.getstatusoutput("ls")
self.assertIn(appname, result[1])
project_root = os.path.join(tool_path, appname)
if CORDOVA_VERSION == "4.x":
os.chdir(project_root)
if not replace_key(os.path.join(project_root, 'config.xml'),
'<widget android-activityName="%s"' % appname, '<widget'):
print "replace key '<widget' failed."
return False
if not replace_key(os.path.join(project_root, 'config.xml'),
' <allow-navigation href="*" />\n</widget>', '</widget>'):
print "replace key '</widget>' failed."
return False
print "Add android platforms to this project --------------> START"
cordova_platform_cmd = "cordova platform add android"
platformstatus = commands.getstatusoutput(cordova_platform_cmd)
self.assertEquals(0, platformstatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
version_cmd = ""
if CROSSWALK_BRANCH == "beta":
if MODE == "shared":
version_cmd = "--variable XWALK_VERSION=\"org.xwalk:xwalk_shared_library_beta:%s\"" % CROSSWALK_VERSION
else:
version_cmd = "--variable XWALK_VERSION=\"org.xwalk:xwalk_core_library_beta:%s\"" % CROSSWALK_VERSION
else:
version_cmd = "--variable XWALK_VERSION=\"%s\"" % CROSSWALK_VERSION
plugin_crosswalk_source = plugin_tool
if PACK_TYPE == "npm":
plugin_crosswalk_source = "cordova-plugin-crosswalk-webview"
plugin_install_cmd = "cordova plugin add %s %s --variable XWALK_MODE=\"%s\"" \
% (plugin_crosswalk_source, version_cmd, mode)
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(project_root, "www", "index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "www")))
do_copy(sourcecodepath, os.path.join(tool_path, appname, "www"))
else:
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(
project_root,
"assets",
"www",
"index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "assets", "www")))
do_copy(
sourcecodepath,
os.path.join(
tool_path,
appname,
"assets",
"www"))
def buildGoogleApp(appname, sourcecodepath, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Build project %s ----------------> START" % appname
if sourcecodepath is None:
print "sourcecodepath can't be none"
return False
if checkContains(appname, "CIRC"):
cordova_app = os.path.join(tool_path, "circ")
create_cmd = "cca create " + appname + " --link-to circ/package"
elif checkContains(appname, "EH"):
cordova_app = os.path.join(tool_path, "workshop-cca-eh")
create_cmd = "cca create " + appname + " --link-to workshop-cca-eh/workshop/step4"
if os.path.exists(cordova_app):
do_remove(glob.glob(cordova_app))
if not do_copy(sourcecodepath, cordova_app):
return False
print create_cmd
buildstatus = commands.getstatusoutput(create_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(os.path.join(tool_path, appname))
print "Add android platforms to this project --------------> START"
add_android_cmd = "cca platform add android"
addstatus = commands.getstatusoutput(add_android_cmd)
self.assertEquals(0, addstatus[0])
print "uninstall webview default plugin from this project --------------> START"
plugin_uninstall_webview = "cordova plugin remove cordova-plugin-crosswalk-webview"
uninstallStatus = commands.getstatusoutput(plugin_uninstall_webview)
self.assertEquals(0, uninstallStatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
version_cmd = ""
if CROSSWALK_BRANCH == "beta":
if MODE == "shared":
version_cmd = "--variable XWALK_VERSION=\"org.xwalk:xwalk_shared_library_beta:%s\"" % CROSSWALK_VERSION
else:
version_cmd = "--variable XWALK_VERSION=\"org.xwalk:xwalk_core_library_beta:%s\"" % CROSSWALK_VERSION
else:
version_cmd = "--variable XWALK_VERSION=\"%s\"" % CROSSWALK_VERSION
plugin_crosswalk_source = plugin_tool
if PACK_TYPE == "npm":
plugin_crosswalk_source = "cordova-plugin-crosswalk-webview"
plugin_install_cmd = "cordova plugin add %s %s --variable XWALK_MODE=\"%s\"" \
% (plugin_crosswalk_source, version_cmd, mode)
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
build_cmd = "cca build android"
buildstatus = commands.getstatusoutput(build_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def build(appname, isDebug, self):
os.chdir(os.path.join(tool_path, appname))
print "Build project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova build android"
if isDebug == True:
print "build debug app"
cmd = "cordova build android --debug"
else:
cmd = "./cordova/build"
if isDebug == True:
print "build debug app"
cmd = "./cordova/build --debug"
print cmd
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, buildstatus[0])
print "\nBuild project %s ----------------> OK\n" % appname
if CORDOVA_VERSION == "4.x":
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
else:
os.chdir(os.path.join(tool_path, appname, "bin"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def run(appname, self):
os.chdir(os.path.join(tool_path, appname))
print "Run project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova run android"
else:
cmd = "./cordova/run"
print cmd
runstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, runstatus[0])
self.assertIn("LAUNCH SUCCESS", runstatus[1])
print "\nRun project %s ----------------> OK\n" % appname
def app_install(appname, pkgname, self):
print "Install APK ----------------> START"
os.chdir(testapp_path)
apk_file = commands.getstatusoutput("ls | grep %s" % appname)[1]
if apk_file == "":
print "Error: No app: %s found in directory: %s" % (appname, testapp_path)
cmd_inst = "adb -s " + device + " install -r " + apk_file
inststatus = commands.getstatusoutput(cmd_inst)
self.assertEquals(0, inststatus[0])
print "Install APK ----------------> OK"
self.assertTrue(check_app_installed(pkgname, self))
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def check_app_installed(pkgname, self):
print "Check if app is installed ----------------> START"
cmd_find = "adb -s " + device + \
" shell pm list packages |grep %s" % pkgname
pmstatus = commands.getstatusoutput(cmd_find)
if pmstatus[0] == 0:
print "App is installed."
return True
else:
print "App is uninstalled."
return False
def app_launch(appname, pkgname, self):
print "Launch APK ----------------> START"
cmd = "adb -s " + device + " shell am start -n %s/.%s" % (pkgname, appname)
launchstatus = commands.getstatusoutput(cmd)
self.assertNotIn("error", launchstatus[1].lower())
print "Launch APK ----------------> OK"
# Find whether the app have launched
def check_app_launched(pkgname, self):
cmd_acti = "adb -s " + device + " shell ps | grep %s" % pkgname
launched = commands.getstatusoutput(cmd_acti)
if launched[0] != 0:
print "App haven't launched."
return False
else:
print "App is have launched."
return True
def app_stop(pkgname, self):
print "Stop APK ----------------> START"
cmd = "adb -s " + device + " shell am force-stop %s" % pkgname
stopstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, stopstatus[0])
print "Stop APK ----------------> OK"
def app_uninstall(pkgname, self):
print "Uninstall APK ----------------> START"
cmd_uninst = "adb -s " + device + " uninstall %s" % (pkgname)
unistatus = commands.getstatusoutput(cmd_uninst)
self.assertEquals(0, unistatus[0])
print "Uninstall APK ----------------> OK"
def replace_key(file_path, content, key):
print "Replace value ----------------> START"
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
print "Fail to replace: %s with: %s in file: %s" % (content, key, file_path)
return False
print "Replace value ----------------> OK"
return True
def do_remove(target_file_list=None):
for i_file in target_file_list:
print "Removing %s" % i_file
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
print "Fail to remove file %s: %s" % (i_file, e)
return False
return True
def do_copy(src_item=None, dest_item=None):
print "Copying %s to %s" % (src_item, dest_item)
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
print "Create non-existent dir: %s" % os.path.dirname(dest_item)
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
print "Fail to copy file %s: %s" % (src_item, e)
return False
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
|
|
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import\
RNNEncoderDecoder,\
prototype_state,\
prototype_search_with_coverage_state,\
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_dec):
self.enc_dec = enc_dec
state = self.enc_dec.state
self.eos_id = state['null_sym_target']
self.unk_id = state['unk_sym_target']
def compile(self):
self.comp_repr = self.enc_dec.create_representation_computer()
# added by Zhaopeng Tu, 2015-12-17, for fertility
if self.enc_dec.state['maintain_coverage'] and self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:
self.comp_fert = self.enc_dec.create_fertility_computer()
self.comp_init_states = self.enc_dec.create_initializers()
self.comp_next_probs = self.enc_dec.create_next_probs_computer()
self.comp_next_states = self.enc_dec.create_next_states_computer()
def search(self, seq, n_samples, ignore_unk=False, minlen=1):
c = self.comp_repr(seq)[0]
states = map(lambda x : x[None, :], self.comp_init_states(c))
dim = states[0].shape[1]
# added by Zhaopeng Tu, 2015-11-02
if self.enc_dec.state['maintain_coverage']:
coverage_dim = self.enc_dec.state['coverage_dim']
if self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['coverage_accumulated_operation'] == 'subtractive':
coverages = numpy.ones((c.shape[0], 1, coverage_dim), dtype='float32')
else:
coverages = numpy.zeros((c.shape[0], 1, coverage_dim), dtype='float32')
fin_coverages = []
else:
coverages = None
if self.enc_dec.state['maintain_coverage'] and self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:
fertility = self.comp_fert(c)
else:
fertility = None
num_levels = len(states)
fin_trans = []
fin_costs = []
fin_aligns = []
trans = [[]]
aligns = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t : t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
results = self.comp_next_probs(c, k, last_words, *states, coverage_before=coverages, fertility=fertility)
log_probs = numpy.log(results[0])
# alignment shape: (source_len, beam_size)
alignment = results[1]
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:,self.unk_id] = -numpy.inf
# TODO: report me in the paper!!!
if k < minlen:
log_probs[:,self.eos_id] = -numpy.inf
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
trans_indices = best_costs_indices / voc_size
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_aligns = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
if self.enc_dec.state['maintain_coverage']:
new_coverages = numpy.zeros((c.shape[0], n_samples, coverage_dim), dtype='float32')
else:
new_coverages = None
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
# alignment shape: (source_len, beam_size)
new_aligns[i] = aligns[orig_idx] + [alignment[:,orig_idx]]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
if self.enc_dec.state['maintain_coverage']:
new_coverages[:,i,:] = coverages[:,orig_idx,:]
new_states = self.comp_next_states(c, k, inputs, *new_states, coverage_before=new_coverages, fertility=fertility)
if self.enc_dec.state['maintain_coverage']:
new_coverages = new_states[-1]
new_states = new_states[:-1]
# Filter the sequences that end with end-of-sequence character
trans = []
aligns = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
aligns.append(new_aligns[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_aligns.append(new_aligns[i])
fin_costs.append(new_costs[i])
if self.enc_dec.state['maintain_coverage']:
fin_coverages.append(new_coverages[:,i,0])
states = map(lambda x : x[indices], new_states)
if self.enc_dec.state['maintain_coverage']:
coverages = numpy.zeros((c.shape[0], n_samples, coverage_dim), dtype='float32')
for i in xrange(n_samples):
coverages[:,i,:] = new_coverages[:, indices[i], :]
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, False, minlen)
elif n_samples < 100:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search(seq, n_samples * 2, False, minlen)
else:
fin_trans = trans
fin_aligns = aligns
fin_costs = costs
if self.enc_dec.state['maintain_coverage']:
fin_coverages = coverages[:,:,0].transpose().tolist()
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_aligns = numpy.array(fin_aligns)[numpy.argsort(fin_costs)]
if self.enc_dec.state['maintain_coverage']:
fin_coverages = numpy.array(fin_coverages)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
if self.enc_dec.state['maintain_coverage']:
if self.enc_dec.state['use_linguistic_coverage'] and self.enc_dec.state['use_fertility_model']:
return fin_trans, fin_aligns, fin_costs, fin_coverages, fertility
else:
return fin_trans, fin_aligns, fin_costs, fin_coverages
else:
return fin_trans, fin_aligns, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, n_samples,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
alpha=1, verbose=False):
if beam_search:
sentences = []
if lm_model.maintain_coverage:
if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:
trans, aligns, costs, coverages, fertility = beam_search.search(seq, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2)
else:
trans, aligns, costs, coverages = beam_search.search(seq, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2)
else:
trans, aligns, costs = beam_search.search(seq, n_samples,
ignore_unk=ignore_unk, minlen=len(seq) / 2)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.word_indxs, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
print "{}: {}".format(costs[i], sentences[i])
if lm_model.maintain_coverage:
if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:
return sentences, aligns, costs, coverages, fertility, trans
else:
return sentences, aligns, costs, coverages, trans
else:
return sentences, aligns, costs, trans
elif sampler:
sentences = []
all_probs = []
costs = []
values, cond_probs = sampler(n_samples, 3 * (len(seq) - 1), alpha, seq)
for sidx in xrange(n_samples):
sen = []
for k in xrange(values.shape[0]):
if lm_model.word_indxs[values[k, sidx]] == '<eol>':
break
sen.append(lm_model.word_indxs[values[k, sidx]])
sentences.append(" ".join(sen))
probs = cond_probs[:, sidx]
probs = numpy.array(cond_probs[:len(sen) + 1, sidx])
all_probs.append(numpy.exp(-probs))
costs.append(-numpy.sum(probs))
if normalize:
counts = [len(s.strip().split(" ")) for s in sentences]
costs = [co / cn for co, cn in zip(costs, counts)]
sprobs = numpy.argsort(costs)
if verbose:
for pidx in sprobs:
print "{}: {} {} {}".format(pidx, -costs[pidx], all_probs[pidx], sentences[pidx])
print
return sentences, costs, None
else:
raise Exception("I don't know what to do")
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-serch) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("model_path",
help="Path to the model")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_search_with_coverage_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True, compute_alignment=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word = cPickle.load(open(state['word_indx'],'rb'))
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_dec)
beam_search.compile()
else:
sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'],'r'))
if args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.debug("Beam size: {}".format(n_samples))
for i, line in enumerate(fsrc):
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
if lm_model.maintain_coverage:
if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:
trans, aligns, costs, coverages, fertility, _ = sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)
else:
trans, aligns, costs, coverages, _ = sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)
else:
trans, aligns, costs, _ = sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk, normalize=args.normalize)
if args.verbose:
print "Parsed Input:", parsed_in
if len(trans) == 0:
trans = ['Failed']
costs = [0.0]
best = numpy.argmin(costs)
print >>ftrans, trans[best]
if args.verbose:
print "Translation:", trans[best]
print "Aligns:"
# aligns shape: (target_len, source_len)
# we reverse it to the shape (source_len, target_len) to show the matrix
print numpy.array(aligns[best]).transpose().tolist()
if lm_model.maintain_coverage:
# since we filtered <eos> from trans[best], thus the index adds 1
coverage = coverages[best]
print "Coverage:",
words = parsed_in.split()
for k in xrange(len(words)):
print '%s/%.2f'%(words[k], coverage[k]),
print ''
if lm_model.use_linguistic_coverage and lm_model.use_fertility_model:
print 'Fertility: ',
for k in xrange(len(words)):
print '%s/%.2f'%(words[k], fertility[k]),
print ''
print
total_cost += costs[best]
if (i + 1) % 100 == 0:
ftrans.flush()
logger.debug("Current speed is {} per sentence".
format((time.time() - start_time) / (i + 1)))
print "Total cost of the translations: {}".format(total_cost)
fsrc.close()
ftrans.close()
else:
while True:
try:
seqin = raw_input('Input Sequence: ')
n_samples = int(raw_input('How many samples? '))
alpha = None
if not args.beam_search:
alpha = float(raw_input('Inverse Temperature? '))
seq,parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
print "Parsed Input:", parsed_in
except Exception:
print "Exception while parsing your input:"
traceback.print_exc()
continue
sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search,
ignore_unk=args.ignore_unk, normalize=args.normalize,
alpha=alpha, verbose=True)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import subprocess
import textwrap
from cinder.volume import configuration
from cinder.compute import nova
OrderedDict = collections.OrderedDict
BASEDIR = os.path.split(os.path.realpath(__file__))[0] + "/../../"
if __name__ == "__main__":
os.chdir(BASEDIR)
opt_file = open("cinder/opts.py", 'w')
opt_dict = OrderedDict()
dir_trees_list = []
REGISTER_OPTS_STR = "CONF.register_opts("
REGISTER_OPT_STR = "CONF.register_opt("
license_str = textwrap.dedent(
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.\n
""")
opt_file.write(license_str)
edit_header = textwrap.dedent(
"""
###################################################################
# WARNING!
#
# Do not edit this file directly. This file should be generated by
# running the command "tox -e genopts" any time a config option
# has been added, changed, or removed.
###################################################################\n
""")
opt_file.write(edit_header)
opt_file.write("import itertools\n\n")
opt_file.write("from keystoneauth1 import loading\n\n")
# NOTE(geguileo): We need to register all OVOs before importing any other
# cinder files, otherwise any decorator that uses cinder.objects.YYY will
# fail with exception AttributeError: 'module' object has no attribute
# 'YYY' when running tox -egenconfig
opt_file.write(
"from cinder import objects # noqa\nobjects.register_all()\n")
targetdir = 'cinder'
common_string = ('find ' + targetdir + ' -type f -name "*.py" ! '
'-path "*/tests/*" -exec grep -l "%s" {} '
'+ | sed -e "s|^' + BASEDIR +
'|/|g" | sort -u')
cmd_opts = common_string % REGISTER_OPTS_STR
output_opts = subprocess.check_output( # nosec : command is hardcoded
'{}'.format(cmd_opts), shell=True,
universal_newlines=True)
dir_trees_list = output_opts.split()
cmd_opt = common_string % REGISTER_OPT_STR
output_opt = subprocess.check_output( # nosec : command is hardcoded
'{}'.format(cmd_opt), shell=True,
universal_newlines=True)
temp_list = output_opt.split()
for item in temp_list:
dir_trees_list.append(item)
dir_trees_list.sort()
flag = False
def _check_import(aline):
if len(aline) > 79:
new_lines = aline.partition(' as ')
return new_lines
else:
return [aline]
for atree in dir_trees_list:
if atree in ["tools/config/generate_cinder_opts.py",
"cinder/tests/hacking/checks.py",
"cinder/volume/configuration.py",
"cinder/test.py",
"cinder/cmd/status.py"]:
continue
dirs_list = atree.split('/')
import_module = "from "
init_import_module = ""
import_name = ""
for dir in dirs_list:
if dir.find(".py") == -1:
import_module += dir + "."
init_import_module += dir + "."
import_name += dir + "_"
else:
if dir[:-3] != "__init__":
import_name += dir[:-3].replace("_", "")
import_module = (import_module[:-1] + " import " +
dir[:-3] + " as " + import_name)
lines = _check_import(import_module)
if len(lines) > 1:
opt_file.write(lines[0] + lines[1] + "\\\n")
opt_file.write(" " + lines[2] + "\n")
else:
opt_file.write(lines[0] + "\n")
else:
import_name = import_name[:-1].replace('/', '.')
init_import = atree[:-12].replace('/', '.')
opt_file.write("import " + init_import + "\n")
flag = True
if flag is False:
opt_dict[import_name] = atree
else:
opt_dict[init_import_module.strip(".")] = atree
flag = False
registered_opts_dict = OrderedDict([('DEFAULT', [])])
def _write_item(opts):
list_name = opts[-3:]
if list_name.lower() == "opt":
line_to_write = " [" + opts.strip("\n") + "],\n"
opt_line = _check_line_length(line_to_write)
if len(opt_line) > 1:
opt_file.write(opt_line[0] + opt_line[1] + "\n")
opt_file.write(" " + opt_line[2])
else:
opt_file.write(opt_line[0])
else:
line_to_write = " " + opts.strip("\n") + ",\n"
opt_line = _check_line_length(line_to_write)
if len(opt_line) > 1:
opt_file.write(opt_line[0] + opt_line[1] + "\n")
opt_file.write(" " + opt_line[2])
else:
opt_file.write(opt_line[0])
if opts.endswith('service_user_opts'):
su_dnt = " " * 16
su_plg = su_dnt + "loading.get_auth_plugin_conf_options"
opt_file.write(
su_plg + "('v3password'),\n"
+ su_dnt + "loading.get_session_conf_options(),\n")
def _retrieve_name(aline):
if REGISTER_OPT_STR in aline:
str_to_replace = REGISTER_OPT_STR
else:
str_to_replace = REGISTER_OPTS_STR
return aline.replace(str_to_replace, "")
def _check_line_length(aline):
if len(aline) > 79:
temp = aline.split(".")
lines_to_write = []
for section in temp:
lines_to_write.append(section)
lines_to_write.append('.')
return lines_to_write
else:
return [aline]
for key in opt_dict:
fd = os.open(opt_dict[key], os.O_RDONLY)
afile = os.fdopen(fd, "r")
for aline in afile:
exists = aline.find("CONF.register_opt")
if exists != -1:
# TODO(kjnelson) FIX THIS LATER. These are instances where
# CONF.register_opts is happening without actually registering
# real lists of opts
exists = aline.find('base_san_opts')
if (exists != -1) or (key == 'cinder_volume_configuration'):
continue
group_exists = aline.find(', group=')
formatted_opt = _retrieve_name(aline[: group_exists])
formatted_opt = formatted_opt.replace(')', '').strip()
if group_exists != -1:
group_name = aline[group_exists:-1].replace(
', group=\"\'', '').replace(
', group=', '').strip(
"\'\")").upper()
# NOTE(dulek): Hack to resolve constants manually.
if (group_name.endswith('SHARED_CONF_GROUP')
or group_name.lower() == 'backend_defaults'):
group_name = configuration.SHARED_CONF_GROUP
if (group_name == 'NOVA_GROUP'):
group_name = nova.NOVA_GROUP
if group_name in registered_opts_dict:
line = key + "." + formatted_opt
registered_opts_dict[group_name].append(line)
else:
line = key + "." + formatted_opt
registered_opts_dict[group_name] = [line]
else:
line = key + "." + formatted_opt
registered_opts_dict['DEFAULT'].append(line)
setup_str = ("\n\n"
"def list_opts():\n"
" return [\n")
opt_file.write(setup_str)
registered_opts_dict = OrderedDict(sorted(registered_opts_dict.items(),
key = lambda x: x[0]))
for key in registered_opts_dict:
# NOTE(jsbryant): We need to have 'DEFAULT' in uppercase but any
# other section using uppercase causes a Sphinx warning.
if (key == 'DEFAULT'):
section_start_str = (" ('" + key + "',\n"
" itertools.chain(\n")
else:
section_start_str = (" ('" + key.lower() + "',\n"
" itertools.chain(\n")
opt_file.write(section_start_str)
for item in registered_opts_dict[key]:
_write_item(item)
section_end_str = " )),\n"
opt_file.write(section_end_str)
closing_str = (" ]\n")
opt_file.write(closing_str)
opt_file.close()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.scatterpolargl.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
###### THIS IS A MODIFIED VERSION OF TORNADO'S HTTPSERVER FROM TORNADO 2.2 #######
#
# It has been modified to support a callback after headers finish, and
# another callback on close.
#
# HTTPRequest.__repr__ has also been modified to not show body (POST can
# contain sensitive data) or sensitive headers, since HTTPRequest is repr'ed
# when tornado logs errors.
#
# These changes will most likely need to be ported to a new version if you
# ever want to upgrade tornado.
##################################################################################
#
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server.
Typical applications have little direct interaction with the `HTTPServer`
class except to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
This module also defines the `HTTPRequest` class which is exposed via
`tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, with_statement
import Cookie
import logging
import socket
import time
import urlparse
from tornado.escape import utf8, native_str, parse_qs_bytes
from tornado import httputil
from tornado import iostream
from tornado.netutil import TCPServer
from tornado import stack_context
from tornado.util import b, bytes_type
from zygote.util import sanitize_headers
try:
import ssl # Python 2.6+
except ImportError:
ssl = None
class HTTPServer(TCPServer):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a request callback that takes an HTTPRequest
instance as an argument and writes a valid HTTP response with
`HTTPRequest.write`. `HTTPRequest.finish` finishes the request (but does
not necessarily close the connection in the case of HTTP/1.1 keep-alive
requests). A simple example server that echoes back the URI you
requested::
import httpserver
import ioloop
def handle_request(request):
message = "You requested %s\n" % request.uri
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
len(message), message))
request.finish()
http_server = httpserver.HTTPServer(handle_request)
http_server.listen(8888)
ioloop.IOLoop.instance().start()
`HTTPServer` is a very basic connection handler. Beyond parsing the
HTTP request body and headers, the only HTTP semantics implemented
in `HTTPServer` is HTTP/1.1 keep-alive connections. We do not, however,
implement chunked encoding, so the request callback must provide a
``Content-Length`` header or implement chunked encoding for HTTP/1.1
requests for the server to run correctly for HTTP/1.1 clients. If
the request handler is unable to do this, you can provide the
``no_keep_alive`` argument to the `HTTPServer` constructor, which will
ensure the connection is closed on every request no matter what HTTP
version the client is using.
If ``xheaders`` is ``True``, we support the ``X-Real-Ip`` and ``X-Scheme``
headers, which override the remote IP and HTTP scheme for all requests.
These headers are useful when running Tornado behind a reverse proxy or
load balancer.
`HTTPServer` can serve SSL traffic with Python 2.6+ and OpenSSL.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including "certfile" and "keyfile"::
HTTPServer(applicaton, ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.netutil.TCPServer`):
1. `~tornado.netutil.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.instance().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.netutil.TCPServer.bind`/`~tornado.netutil.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `IOLoop` must *not* be passed
to the `HTTPServer` constructor. `start` will always start
the server on the default singleton `IOLoop`.
3. `~tornado.netutil.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.instance().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`tornado.netutil.bind_sockets`.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, headers_callback = None,
close_callback = None, **kwargs):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._headers_callback = headers_callback
self._close_callback = close_callback
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
**kwargs)
def handle_stream(self, stream, address):
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders,
self._headers_callback, self._close_callback)
class _BadRequestException(Exception):
"""Exception class for malformed HTTP requests."""
pass
class HTTPConnection(object):
"""Handles a connection to an HTTP client, executing HTTP requests.
We parse HTTP headers and bodies, and execute the request callback
until the HTTP conection is closed.
"""
def __init__(self, stream, address, request_callback, no_keep_alive=False,
xheaders=False, headers_callback=None, close_callback=None):
self.stream = stream
if self.stream.socket.family not in (socket.AF_INET, socket.AF_INET6):
# Unix (or other) socket; fake the remote address
address = ('0.0.0.0', 0)
self.address = address
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self._request = None
self._request_finished = False
# Save stack context here, outside of any request. This keeps
# contexts from one request from leaking into the next.
self._header_callback = stack_context.wrap(self._on_headers)
if headers_callback:
self.on_headers = stack_context.wrap(headers_callback)
else:
self.on_headers = lambda *args: None
if close_callback:
self.on_finish = stack_context.wrap(close_callback)
else:
self.on_finish = lambda *args: None
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
self._write_callback = None
def write(self, chunk, callback=None):
"""Writes a chunk of output to the stream."""
assert self._request, "Request closed"
if not self.stream.closed():
self._write_callback = stack_context.wrap(callback)
self.stream.write(chunk, self._on_write_complete)
def finish(self):
"""Finishes the request."""
assert self._request, "Request closed"
self._request_finished = True
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
callback()
# _on_write_complete is enqueued on the IOLoop whenever the
# IOStream's write buffer becomes empty, but it's possible for
# another callback that runs on the IOLoop before it to
# simultaneously write more data and finish the request. If
# there is still data in the IOStream, a future
# _on_write_complete will be responsible for calling
# _finish_request.
if self._request_finished and not self.stream.writing():
self._finish_request()
def _finish_request(self):
if self.no_keep_alive:
disconnect = True
else:
connection_header = self._request.headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if self._request.supports_http_1_1():
disconnect = connection_header == "close"
elif ("Content-Length" in self._request.headers
or self._request.method in ("HEAD", "GET")):
disconnect = connection_header != "keep-alive"
else:
disconnect = True
self._request = None
self._request_finished = False
self.on_finish(disconnect)
if disconnect:
self.stream.close()
return
self.stream.read_until(b("\r\n\r\n"), self._header_callback)
def _on_headers(self, data):
try:
data = native_str(data.decode('latin1'))
eol = data.find("\r\n")
start_line = data[:eol]
try:
method, uri, version = start_line.split(" ")
except ValueError:
raise _BadRequestException("Malformed HTTP request line")
if not version.startswith("HTTP/"):
raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
headers = httputil.HTTPHeaders.parse(data[eol:])
self._request = HTTPRequest(
connection=self, method=method, uri=uri, version=version,
headers=headers, remote_ip=self.address[0])
content_length = headers.get("Content-Length")
if content_length:
content_length = int(content_length)
if content_length > self.stream.max_buffer_size:
raise _BadRequestException("Content-Length too long")
if headers.get("Expect") == "100-continue":
self.stream.write(b("HTTP/1.1 100 (Continue)\r\n\r\n"))
self.stream.read_bytes(content_length, self._on_request_body)
return
self.on_headers(start_line, self.address[0], headers)
self.request_callback(self._request)
except _BadRequestException, e:
logging.info("Malformed HTTP request from %s: %s",
self.address[0], e)
self.stream.close()
return
def _on_request_body(self, data):
self._request.body = data
content_type = self._request.headers.get("Content-Type", "")
if self._request.method in ("POST", "PUT"):
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = parse_qs_bytes(native_str(self._request.body))
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self._request.arguments.setdefault(name, []).extend(
values)
elif content_type.startswith("multipart/form-data"):
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
httputil.parse_multipart_form_data(
utf8(v), data,
self._request.arguments,
self._request.files)
break
else:
logging.warning("Invalid multipart/form-data")
self.request_callback(self._request)
class HTTPRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`HTTPHeader` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If `HTTPServer.xheaders` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` header
.. attribute:: protocol
The protocol used, either "http" or "https". If `HTTPServer.xheaders`
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of :class:`HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
"""
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
body=None, remote_ip=None, protocol=None, host=None,
files=None, connection=None):
self.method = method
self.uri = uri
self.version = version
self.headers = headers or httputil.HTTPHeaders()
self.body = body or ""
if connection and connection.xheaders:
# Squid uses X-Forwarded-For, others use X-Real-Ip
self.remote_ip = self.headers.get(
"X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip))
if not self._valid_ip(self.remote_ip):
self.remote_ip = remote_ip
# AWS uses X-Forwarded-Proto
self.protocol = self.headers.get(
"X-Scheme", self.headers.get("X-Forwarded-Proto", protocol))
if self.protocol not in ("http", "https"):
self.protocol = "http"
else:
self.remote_ip = remote_ip
if protocol:
self.protocol = protocol
elif connection and isinstance(connection.stream,
iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
scheme, netloc, path, query, fragment = urlparse.urlsplit(native_str(uri))
self.path = path
self.query = query
arguments = parse_qs_bytes(query)
self.arguments = {}
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self.arguments[name] = values
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = {}
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream."""
assert isinstance(chunk, bytes_type)
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection."""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer must have been constructed
with cert_reqs set in ssl_options, e.g.::
server = HTTPServer(app,
ssl_options=dict(
certfile="foo.crt",
keyfile="foo.key",
cert_reqs=ssl.CERT_REQUIRED,
ca_certs="cacert.crt"))
The return value is a dictionary, see SSLSocket.getpeercert() in
the standard library for more details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert()
except ssl.SSLError:
return None
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, sanitize_headers(self.headers))
def _valid_ip(self, ip):
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror, e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
|
|
"""
Koalas 0.0.1
abstract pandas utilities
"""
import os
import sys
import pandas
import numpy as np
import bokeh
from bokeh import charts, io
modself = sys.modules[__name__]
class AbstractFrame(object):
"""
Abstract frame wrapper
Passes ALL to self.frame
Can be initialized with pandas.DataFrame, pandas.Series, koalas.AbstractFrame or just lists of lists
Adds columns as attributes so that frame.column works as well as frame['column'] (get,set&del)
Does some series magic so when doing frame[pandas.Series] it casts as self.__class__
"""
def __init__(self, frame=None, **options):
if type(frame) in (pandas.DataFrame, pandas.core.series.Series) or frame: # check that frame is not none (without trying to bool(DataFrame/Series))
if frame.__class__ == pandas.core.series.Series: frame = pandas.DataFrame(frame)
elif frame.__class__ == pandas.DataFrame: pass
elif isinstance(frame, AbstractFrame):
opts = frame._options
opts.update(options)
options = opts
frame = frame.frame
else: frame = pandas.DataFrame(frame)
else: frame = pandas.DataFrame()
self.frame = frame
self._options = options
self._callback('init')
def __getitem__(self, key):
if key.__class__ == pandas.core.series.Series: return self.cast(self.frame[key])
return self.frame[key]
def __setitem__(self, key, val):
self.frame[key] = val
def __delitem__(self, key):
del self.frame[key]
def __getattr__(self, key):
if key in self.frame.columns: return self.frame[key]
if key == 'peek': return self.frame.iloc[0:5]
return self.frame.__getattribute__(key)
def __setattr__(self, key, val):
if key == 'frame' or key[0] == '_': object.__setattr__(self, key, val)
elif key in self.frame.columns: self.frame[key] = val
else:
if key in self.frame.__dict__: self.frame.__setattr__(self, key, val) # @TODO still doesnt work for af.columns=
else: self.frame.__setitem__(key, val)
def __delattr__(self, key):
self.frame.__delitem__(key)
def __dir__(self):
return self.__dict__.keys()+list(self.frame.columns)+dir(self.frame)
def _callback(self, cbname, **kwargs):
meth = getattr(self, 'on%s%s'%(cbname[0].upper(), cbname[1:]), None)
if callable(meth): meth(**kwargs)
def load(self, **kwargs):
self._callback('load', **kwargs)
return self
def cast(self, frame, **options):
"Cast as same class as self, with options inherited"
opts = {}
opts.update(self._options)
opts.update(options)
return self.__class__(frame, **opts)
def asaf(self, type=None):
if not type: type = AbstractFrame
if not isinstance(self, type):
return type(self.frame)
return self
def capply(self, *args, **kwargs):
return self.cast(self.frame.apply(*args, **kwargs))
# monkeypatch
def asaf(self, type=AbstractFrame):
return type(self)
pandas.DataFrame.asaf = asaf
def firstDayOfWeek(date):
"Assuming monday"
return (date - pandas.Timedelta(days=date.weekday())).date()
class UtilMixin():
# utilities
def takeIndexes(self, index):
"Filter frame by given index"
return self.loc[index].asaf(self.__class__) # much faster (test it :D)
#return self[self.frame.apply(lambda row: row.name in index, axis=1)]
def groupSizeSort(self, field, ascending=False):
"Group by given field, size() and sort (ascending=False by default)"
grouped = self.frame.groupby(field).size()
grouped.sort(ascending=ascending, inplace=True)
return grouped
def occurencePerColumn(self, val):
return self.transpose().apply(lambda row: float(len(row[row==val]))/len(row[row.notnull()]), axis=1)
def valuesPerColumn(self):
return self.apply(lambda col: col.value_counts(), axis=0)
def booleanize(self):
"Replace all values greater than 0 with 1"
def rowToBitArray(row):
return map(lambda d: d if pandas.isnull(d) else (1 if d > 0 else 0), row)
return self.cast(self.frame.apply(rowToBitArray, axis=1))
class NumberMixin():
"Utils for frames that have series of numbers (e.g. counts or users' launches/week)"
#def percentiles(self, field, percentiles=10):
# pass
def bins(self, field, bins=30):
"Apply bin categories to a numeric series, returns (label, appliedSeries)"
fld = self.frame[field]
mn, mx = fld.min(), fld.max()
binsize = (float(mx-mn)/bins)
binedges = map(lambda i: mn+(i*binsize), range(bins+1))
labels = map(lambda x: x+(binsize/2), binedges[:-1])
def getbin(val):
i = int(np.ceil((val-mn)/binsize))-1
return labels[i if i > -1 else 0]
return labels, fld.apply(getbin)
def binCounts(self, field, bins=30):
labels, applied = self.bins(field, bins)
applied = NumberFrame(applied)
counts = pandas.DataFrame(labels)
counts.set_index(0, inplace=True)
counts['num'] = applied.groupby(0).size()
counts['num'].fillna(0, inplace=True)
return counts
class NumberFrame(AbstractFrame, NumberMixin, UtilMixin):
pass
class EventsMixin():
"Utils for frames that are series of events (e.g. logs)"
def onLoad(self, datetime_field='datetime'):
self._options = {'datetime_field': datetime_field}
self.frame[datetime_field] = pandas.to_datetime(self.frame[datetime_field])
#self.frame['date'] = self.frame[datetime_field].apply(lambda d: d.date())
def periodCount(self, groupby, period='daily'):
"""
Count events by groupby field in daily/weekly/monthly resolution
Returns a frame with groupby field as indexes and a column for each period
"""
frame = self.frame.copy()
if period == 'hourly':
def hour(d):
return pandas.Timestamp(d.strftime('%Y-%m-%d %H'))
frame['hour'] = frame[self._options['datetime_field']].apply(hour)
counts = frame.groupby([groupby, 'hour']).size().unstack()
counts.fillna(0, inplace=True)
mn = frame['hour'].min()
mx = frame['hour'].max()
print mn, mx
for d in pandas.date_range(start=mn, end=mx, freq='H'):
d = d + pandas.Timedelta('1 hour')
if not d in counts.columns:
counts[d] = 0
return TimeFrame(counts).load()
elif period == 'daily':
frame['date'] = frame[self._options['datetime_field']].apply(lambda d: d.date())
counts = frame.groupby([groupby, 'date']).size().unstack()
counts.fillna(0, inplace=True)
# blow up (fill dates that have no events with zeroes)
d = frame['date'].min()
mx = frame['date'].max()
while d < mx:
d = d + pandas.Timedelta('1 day')
if not d in counts.columns:
counts[d] = 0
return TimeFrame(counts).load()
elif period == 'weekly':
frame['week'] = frame[self._options['datetime_field']].apply(firstDayOfWeek)
counts = frame.groupby([groupby, 'week']).size().unstack()
counts.fillna(0, inplace=True)
# blow up (fill weeks that have no events with zeroes)
mn = frame['week'].min()
mx = frame['week'].max()
for d in pandas.date_range(start=mn, end=mx, freq='W-MON'):
d = d.date()
#d = d.strftime('%Y-%W')
if not d in counts.columns:
counts[d] = 0
return TimeFrame(counts).load()
else: raise Exception("EventsMixin.periodCount period not implemented: %s"%period)
class EventsFrame(AbstractFrame, EventsMixin, UtilMixin):
pass
class TimeMixin():
"Mixin for frames that have dates as columns (e.g. counts of events for each day for users)"
def onLoad(self):
# sort columns
self.frame = self.frame.reindex_axis(sorted(self.frame.columns), axis=1)
def min(self):
return self.frame.columns.min()
def max(self):
return self.frame.columns.max()
def maskInvalid(self, min_date=None, max_date=None):
min_series = isinstance(min_date, pandas.Series)
max_series = isinstance(max_date, pandas.Series)
if not min_series and not min_date: min_date = self.min()
if not max_series and not max_date: max_date = self.max()
def doMask(row):
return row[min_date[row.name] if min_series else min_date:max_date[row.name] if max_series else max_date]
return self.cast(self.frame.apply(doMask, axis=1))
def toOffsets(self, nan=np.nan):
"Replace columns as days from beginning, beginning taken for each device the next day/week of last NaN"
def deltize_row(row):
notnull = row[row.notnull()] if pandas.isnull(nan) else row[row != nan]
notnull.index = range(0,len(notnull))
return notnull
return self.capply(deltize_row, axis=1)
def classify(self, date, *config):
"""
Classify indexes by evaluating a (bool)function with sum of counts in a certain period before given date
e.g. classify(
('NaN', '30 days', lambda events: np.isnull(events)),
('heavy', '3 days', lambda events: events > 20),
('regular', '7 days', lambda events: events > 10),
('dead', '30 days', lambda events: events == 0),
('idle',None,None)
)
"""
print '.',
sys.stdout.flush()
# sum events for every period given in config
periods = NumberFrame(pandas.DataFrame(index=self.frame.index))
for period in set(map(lambda line: line[1] if len(line) > 1 else None, config)):
if not period: continue # Nones
start = date - pandas.Timedelta(period) + pandas.Timedelta('1 day')
periods[period] = self.frame.loc[self.frame.index, start:date].sum(axis=1, skipna=False)
def cls(row):
for cls, period, func in config:
if not period: return cls
if func(periods.loc[row.name, period]): return cls
return periods.apply(cls, axis=1)
@classmethod
def newDaily(self, start_date, days, index=None):
frame = TimeFrame(pandas.DataFrame(index=index))
for date in pandas.date_range(start_date, periods=days):
frame[date.date()] = np.nan
return frame
@classmethod
def newWeekly(self, start_date, weeks, index=None):
frame = TimeFrame(pandas.DataFrame(index=index))
for date in pandas.date_range(start_date, periods=weeks):
frame[date.strftime('%Y-%W')] = np.nan
return frame
class TimeFrame(AbstractFrame, TimeMixin, NumberMixin, UtilMixin):
pass
class BokehMixin():
def _chart(self, chartcls, **kwargs):
opts = dict(width=1000, height=500, legend='bottom_left')
show = kwargs.pop('show', True)
opts.update(self.kwargs())
opts.update(kwargs)
p = chartcls(self.frame, **opts)
if show: charts.show(p)
else: charts.save(p)
return p
def line(self, **kwargs):
return self._chart(charts.Line, **kwargs)
def bar(self, **kwargs):
return self._chart(charts.Bar, **kwargs)
def time(self, **kwargs):
return self._chart(charts.TimeSeries, **kwargs)
def area(self, **kwargs):
return self._chart(charts.Area, **kwargs)
def kwargs(self):
return {}
class BokehFrame(AbstractFrame, BokehMixin, NumberMixin, UtilMixin):
pass
class BokehGroups():
def __init__(self, keyFunc, basename, **kwargs):
self.files = dict()
self.basename = basename
self.keyFunc = keyFunc
self.kw = dict()
self.kw.update(dict(mode='cdn'))
self.kw.update(kwargs)
def setOutput(self, comb):
gr = self.keyFunc(comb)
if gr not in self.files: self.files[gr] = []
fn = '%s_%s_%s.html'%(self.basename, gr, len(self.files[gr]))
self.files[gr].append(fn)
io.output_file(fn, **self.kw)
return fn
def glue(self, clean=True):
for group, files in self.files.items():
fn = '%s_%s.html'%(self.basename, group)
f = open(fn, 'w')
for src in files:
s = open(src, 'r')
f.write(s.read())
s.close()
os.unlink(src)
f.close()
class ArgCombGraph(BokehFrame):
def onLoad(self, name=None, comb=None):
self._options['name'] = name
self._options['comb'] = comb
def kwargs(self):
return dict(title=self._options['name'])
# utilities @TODO tests, move to another file, something..
class cacher(object):
"Cache decorator"
def __init__(self, name, load_columns=None, asaf=AbstractFrame, method=True, rootdir='cache/'):
self.name = name
self.load_columns = load_columns
self.asaf = asaf
self.method = method
self.rootdir = rootdir
def _filename(self, args, kwargs):
if self.method: args = args[1:]
parts = map(self._cleanArg, [self.name]+list(args)+map(lambda k: kwargs[k], sorted(kwargs.keys())))
return self.rootdir + '_'.join(parts) + '.csv'
def _cleanArg(self, arg):
if type(arg) in (list, tuple): return '-'.join(map(str, arg))
return str(arg)
def __call__(self, func):
def wrapped(*args, **kwargs):
filename = self._filename(args, kwargs)
if os.path.exists(filename):
print 'Loading from %s'%filename
frame = pandas.io.parsers.read_csv(filename, index_col=0).asaf(self.asaf)
if self.load_columns:
frame.frame.columns = frame.frame.apply(lambda col: self.load_columns(col.name), axis=0)
return frame
else:
print 'Generating %s'%filename
frame = func(*args, **kwargs)
frame.to_csv(filename)
return frame
return wrapped
def pairs(k, vs): return map(lambda v: (k,v), vs)
import itertools
class ArgComb():
def __init__(self, name, **kwargs):
self.name = name
self.kw = dict()
self.kwd = dict()
self.kwpersist = kwargs
self.dimensions = []
self.conditionals = []
def conditional(self, func):
self.conditionals.append(func)
def add(self, name, *values, **kwargs):
self._frames = []
self.dimensions.append(name)
self.kw[name] = values
display = kwargs.pop('display', None)
if type(display) == list:
if len(display) != len(values): raise Exception('Display length mismatch')
self.kwd[name] = display
def _name(self, comb):
def dimension_display(d):
dd = d.split('_')
dd = ''.join(map(lambda s: s[0], dd[:-1]))+dd[-1]
dd = dd[0:15]
if comb[d] == None: return None
vd = str(comb[d])[0:10]
if d in self.kwd:
vd = self.kwd[d][self.kw[d].index(comb[d])]
self.kw[d].index(comb[d])
return "%s=%s"%(dd, vd)
return self.name + '; ' + ', '.join(filter(bool, map(dimension_display, self.dimensions)))
def __iter__(self):
def append(dct):
d = dict()
d.update(self.kwpersist)
d.update(dict(dct))
for cond in self.conditionals:
d.update(cond(d))
return d
for c in map(append, itertools.product(*map(lambda d: pairs(d, self.kw[d]), self.dimensions))):
yield self._name(c), c
def apply(self, func):
l = len(list(self))
i = 0
for name, c in self:
i += 1
print "### (%s/%s) %s"%(i, l, name)
yield name, c, func(**c)
print
def charts(self, frames, group_func, basename, cls, chartname='line', appendframe=True, **kwargs):
fr = []
bg = BokehGroups(group_func, basename)
for name, c, frame in (self.apply(frames) if callable(frames) else frames):
fr.append((name, c, frame))
fn = bg.setOutput(c)
graph = cls(frame).load(name=name, comb=c)
getattr(graph, chartname)(**kwargs)
if appendframe:
f = open(fn, 'r')
content = f.read()
f.close()
f = open(fn, 'w')
table = '<style>table.frame td, table.frame th{border: 0px solid black;} table.frame{border: 1px solid black; font-family: sans; float: right;} @media print { table.frame {float: none;} .plotdiv { page-break-after: always; } }</style>'\
+ frame.to_html().replace('<table', '<table class="frame"')
content = content.replace('<div class="plotdiv"', '%s<div class="plotdiv"'%table)
f.write(content)
f.close()
bg.glue()
return fr
def __str__(self):
return "ArgComb(%s): %s"%(len(list(self)), ', '.join(map(lambda d: "%s(%s)"%(d, len(self.kw[d])), self.dimensions)))
def __repr__(self):
return str(self)
|
|
import socket
import time
from OpenSSL import SSL
from netlib import tcp, http, socks
from netlib.certutils import SSLCert
from netlib.http import authentication
from libpathod import pathoc, pathod
from libmproxy.proxy.config import HostMatcher
from libmproxy.protocol import KILL, Error
from libmproxy.protocol.http import CONTENT_MISSING
import tutils
import tservers
"""
Note that the choice of response code in these tests matters more than you
might think. libcurl treats a 304 response code differently from, say, a
200 response code - it will correctly terminate a 304 response with no
content-length header, whereas it will block forever waiting for content
for a 200 response.
"""
class CommonMixin:
def test_large(self):
assert len(self.pathod("200:b@50k").content) == 1024 * 50
@staticmethod
def wait_until_not_live(flow):
"""
Race condition: We don't want to replay the flow while it is still live.
"""
s = time.time()
while flow.live:
time.sleep(0.001)
if time.time() - s > 5:
raise RuntimeError("Flow is live for too long.")
def test_replay(self):
assert self.pathod("304").status_code == 304
if isinstance(self, tservers.HTTPUpstreamProxTest) and self.ssl:
assert len(self.master.state.view) == 2
else:
assert len(self.master.state.view) == 1
l = self.master.state.view[-1]
assert l.response.code == 304
l.request.path = "/p/305"
self.wait_until_not_live(l)
rt = self.master.replay_request(l, block=True)
assert l.response.code == 305
# Disconnect error
l.request.path = "/p/305:d0"
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.code == 502
else:
assert l.error
# Port error
l.request.port = 1
# In upstream mode, we get a 502 response from the upstream proxy server.
# In upstream mode with ssl, the replay will fail as we cannot establish
# SSL with the upstream proxy.
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest) and not self.ssl:
assert l.response.code == 502
else:
assert l.error
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
# In Upstream mode with SSL, we may already have a previous CONNECT
# request.
l = self.master.state.view[-1]
assert l.client_conn.address
assert "host" in l.request.headers
assert l.response.code == 304
def test_invalid_http(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("invalid\r\n\r\n")
t.wfile.flush()
line = t.rfile.readline()
assert ("Bad Request" in line) or ("Bad Gateway" in line)
def test_sni(self):
if not self.ssl:
return
f = self.pathod("304", sni="testserver.com")
assert f.status_code == 304
log = self.server.last_log()
assert log["request"]["sni"] == "testserver.com"
class TcpMixin:
def _ignore_on(self):
assert not hasattr(self, "_ignore_backup")
self._ignore_backup = self.config.check_ignore
self.config.check_ignore = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_ignore.patterns)
def _ignore_off(self):
assert hasattr(self, "_ignore_backup")
self.config.check_ignore = self._ignore_backup
del self._ignore_backup
def test_ignore(self):
spec = '304:h"Alternate-Protocol"="mitmproxy-will-remove-this"'
n = self.pathod(spec)
self._ignore_on()
i = self.pathod(spec)
i2 = self.pathod(spec)
self._ignore_off()
assert i.status_code == i2.status_code == n.status_code == 304
assert "Alternate-Protocol" in i.headers
assert "Alternate-Protocol" in i2.headers
assert "Alternate-Protocol" not in n.headers
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert
assert i_cert != n_cert
# Test Non-HTTP traffic
spec = "200:i0,@100:d0" # this results in just 100 random bytes
# mitmproxy responds with bad gateway
assert self.pathod(spec).status_code == 502
self._ignore_on()
tutils.raises(
"invalid server response",
self.pathod,
spec) # pathoc tries to parse answer as HTTP
self._ignore_off()
def _tcpproxy_on(self):
assert not hasattr(self, "_tcpproxy_backup")
self._tcpproxy_backup = self.config.check_tcp
self.config.check_tcp = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_tcp.patterns)
def _tcpproxy_off(self):
assert hasattr(self, "_tcpproxy_backup")
self.config.check_ignore = self._tcpproxy_backup
del self._tcpproxy_backup
def test_tcp(self):
spec = '304:h"Alternate-Protocol"="mitmproxy-will-remove-this"'
n = self.pathod(spec)
self._tcpproxy_on()
i = self.pathod(spec)
i2 = self.pathod(spec)
self._tcpproxy_off()
assert i.status_code == i2.status_code == n.status_code == 304
assert "Alternate-Protocol" in i.headers
assert "Alternate-Protocol" in i2.headers
assert "Alternate-Protocol" not in n.headers
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert == n_cert
# Make sure that TCP messages are in the event log.
assert any("mitmproxy-will-remove-this" in m for m in self.master.log)
class AppMixin:
def test_app(self):
ret = self.app("/")
assert ret.status_code == 200
assert "mitmproxy" in ret.content
class TestHTTP(tservers.HTTPProxTest, CommonMixin, AppMixin):
def test_app_err(self):
p = self.pathoc()
ret = p.request("get:'http://errapp/'")
assert ret.status_code == 500
assert "ValueError" in ret.content
def test_invalid_connect(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("CONNECT invalid\n\n")
t.wfile.flush()
assert "Bad Request" in t.rfile.readline()
def test_upstream_ssl_error(self):
p = self.pathoc()
ret = p.request("get:'https://localhost:%s/'" % self.server.port)
assert ret.status_code == 400
def test_connection_close(self):
# Add a body, so we have a content-length header, which combined with
# HTTP1.1 means the connection is kept alive.
response = '%s/p/200:b@1' % self.server.urlbase
# Lets sanity check that the connection does indeed stay open by
# issuing two requests over the same connection
p = self.pathoc()
assert p.request("get:'%s'" % response)
assert p.request("get:'%s'" % response)
# Now check that the connection is closed as the client specifies
p = self.pathoc()
assert p.request("get:'%s':h'Connection'='close'" % response)
# There's a race here, which means we can get any of a number of errors.
# Rather than introduce yet another sleep into the test suite, we just
# relax the Exception specification.
tutils.raises(Exception, p.request, "get:'%s'" % response)
def test_reconnect(self):
req = "get:'%s/p/200:b@1:da'" % self.server.urlbase
p = self.pathoc()
assert p.request(req)
# Server has disconnected. Mitmproxy should detect this, and reconnect.
assert p.request(req)
assert p.request(req)
def test_get_connection_switching(self):
def switched(l):
for i in l:
if "serverdisconnect" in i:
return True
req = "get:'%s/p/200:b@1'"
p = self.pathoc()
assert p.request(req % self.server.urlbase)
assert p.request(req % self.server2.urlbase)
assert switched(self.proxy.log)
def test_get_connection_err(self):
p = self.pathoc()
ret = p.request("get:'http://localhost:0'")
assert ret.status_code == 502
def test_blank_leading_line(self):
p = self.pathoc()
req = "get:'%s/p/201':i0,'\r\n'"
assert p.request(req % self.server.urlbase).status_code == 201
def test_invalid_headers(self):
p = self.pathoc()
req = p.request("get:'http://foo':h':foo'='bar'")
assert req.status_code == 400
def test_empty_chunked_content(self):
"""
https://github.com/mitmproxy/mitmproxy/issues/186
"""
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
spec = '301:h"Transfer-Encoding"="chunked":r:b"0\\r\\n\\r\\n"'
connection.send(
"GET http://localhost:%d/p/%s HTTP/1.1\r\n" %
(self.server.port, spec))
connection.send("\r\n")
resp = connection.recv(50000)
connection.close()
assert "content-length" in resp.lower()
def test_stream(self):
self.master.set_stream_large_bodies(1024 * 2)
self.pathod("200:b@1k")
assert not self.master.state.view[-1].response.stream
assert len(self.master.state.view[-1].response.content) == 1024 * 1
self.pathod("200:b@3k")
assert self.master.state.view[-1].response.stream
assert self.master.state.view[-1].response.content == CONTENT_MISSING
self.master.set_stream_large_bodies(None)
def test_stream_modify(self):
self.master.load_script(
tutils.test_data.path("scripts/stream_modify.py"))
d = self.pathod('200:b"foo"')
assert d.content == "bar"
self.master.unload_scripts()
class TestHTTPAuth(tservers.HTTPProxTest):
authenticator = http.authentication.BasicProxyAuth(
http.authentication.PassManSingleUser(
"test",
"test"),
"realm")
def test_auth(self):
assert self.pathod("202").status_code == 407
p = self.pathoc()
ret = p.request("""
get
'http://localhost:%s/p/202'
h'%s'='%s'
""" % (
self.server.port,
http.authentication.BasicProxyAuth.AUTH_HEADER,
authentication.assemble_http_basic_auth("basic", "test", "test")
))
assert ret.status_code == 202
class TestHTTPConnectSSLError(tservers.HTTPProxTest):
certfile = True
def test_go(self):
self.config.ssl_ports.append(self.proxy.port)
p = self.pathoc_raw()
dst = ("localhost", self.proxy.port)
p.connect(connect_to=dst)
tutils.raises("502 - Bad Gateway", p.http_connect, dst)
class TestHTTPS(tservers.HTTPProxTest, CommonMixin, TcpMixin):
ssl = True
ssloptions = pathod.SSLOptions(request_client_cert=True)
clientcerts = True
def test_clientcert(self):
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
def test_error_post_connect(self):
p = self.pathoc()
assert p.request("get:/:i0,'invalid\r\n\r\n'").status_code == 400
class TestHTTPSCertfile(tservers.HTTPProxTest, CommonMixin):
ssl = True
certfile = True
def test_certfile(self):
assert self.pathod("304")
class TestHTTPSUpstreamServerVerificationWTrustedCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with a trusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="trusted-cert",
certs=[
("trusted-cert", tutils.test_data.path("data/trusted-server.crt"))
])
def test_verification_w_cadir(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_cadir_server = tutils.test_data.path(
"data/trusted-cadir/")
self.pathoc()
def test_verification_w_pemfile(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
class TestHTTPSUpstreamServerVerificationWBadCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with an untrusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="untrusted-cert",
certs=[
("untrusted-cert", tutils.test_data.path("data/untrusted-server.crt"))
])
def test_default_verification_w_bad_cert(self):
"""Should use no verification."""
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
def test_no_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_NONE
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
def test_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
tutils.raises("SSL handshake error", self.pathoc)
class TestHTTPSNoCommonName(tservers.HTTPProxTest):
"""
Test what happens if we get a cert without common name back.
"""
ssl = True
ssloptions = pathod.SSLOptions(
certs=[
("*", tutils.test_data.path("data/no_common_name.pem"))
]
)
def test_http(self):
f = self.pathod("202")
assert f.sslinfo.certchain[0].get_subject().CN == "127.0.0.1"
class TestReverse(tservers.ReverseProxTest, CommonMixin, TcpMixin):
reverse = True
class TestSocks5(tservers.SocksModeTest):
def test_simple(self):
p = self.pathoc()
p.socks_connect(("localhost", self.server.port))
f = p.request("get:/p/200")
assert f.status_code == 200
def test_with_authentication_only(self):
p = self.pathoc()
f = p.request("get:/p/200")
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
def test_no_connect(self):
"""
mitmproxy doesn't support UDP or BIND SOCKS CMDs
"""
p = self.pathoc()
socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
).to_file(p.wfile)
socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.BIND,
socks.ATYP.DOMAINNAME,
("example.com", 8080)
).to_file(p.wfile)
p.wfile.flush()
p.rfile.read(2) # read server greeting
f = p.request("get:/p/200") # the request doesn't matter, error response from handshake will be read anyway.
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
class TestSpoof(tservers.SpoofModeTest):
def test_http(self):
alist = (
("localhost", self.server.port),
("127.0.0.1", self.server.port)
)
for a in alist:
self.server.clear_log()
p = self.pathoc()
f = p.request("get:/p/304:h'Host'='%s:%s'" % a)
assert self.server.last_log()
assert f.status_code == 304
l = self.master.state.view[-1]
assert l.server_conn.address
assert l.server_conn.address.host == a[0]
assert l.server_conn.address.port == a[1]
def test_http_without_host(self):
p = self.pathoc()
f = p.request("get:/p/304:r")
assert f.status_code == 400
class TestSSLSpoof(tservers.SSLSpoofModeTest):
def test_https(self):
alist = (
("localhost", self.server.port),
("127.0.0.1", self.server.port)
)
for a in alist:
self.server.clear_log()
self.config.mode.sslport = a[1]
p = self.pathoc(sni=a[0])
f = p.request("get:/p/304")
assert self.server.last_log()
assert f.status_code == 304
l = self.master.state.view[-1]
assert l.server_conn.address
assert l.server_conn.address.host == a[0]
assert l.server_conn.address.port == a[1]
def test_https_without_sni(self):
a = ("localhost", self.server.port)
self.config.mode.sslport = a[1]
p = self.pathoc(sni=None)
f = p.request("get:/p/304")
assert f.status_code == 400
class TestHttps2Http(tservers.ReverseProxTest):
@classmethod
def get_proxy_config(cls):
d = super(TestHttps2Http, cls).get_proxy_config()
d["upstream_server"][0] = True
return d
def pathoc(self, ssl, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathoc.Pathoc(
("localhost", self.proxy.port), ssl=ssl, sni=sni, fp=None
)
p.connect()
return p
def test_all(self):
p = self.pathoc(ssl=True)
assert p.request("get:'/p/200'").status_code == 200
def test_sni(self):
p = self.pathoc(ssl=True, sni="example.com")
assert p.request("get:'/p/200'").status_code == 200
assert all("Error in handle_sni" not in msg for msg in self.proxy.log)
def test_http(self):
p = self.pathoc(ssl=False)
assert p.request("get:'/p/200'").status_code == 400
class TestTransparent(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = False
class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = True
def test_sslerr(self):
p = pathoc.Pathoc(("localhost", self.proxy.port), fp=None)
p.connect()
r = p.request("get:/")
assert r.status_code == 400
class TestProxy(tservers.HTTPProxTest):
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
f = self.master.state.view[0]
assert f.client_conn.address
assert "host" in f.request.headers
assert f.response.code == 304
def test_response_timestamps(self):
# test that we notice at least 2 sec delay between timestamps
# in response object
f = self.pathod("304:b@1k:p50,1")
assert f.status_code == 304
response = self.master.state.view[0].response
assert 1 <= response.timestamp_end - response.timestamp_start <= 1.2
def test_request_timestamps(self):
# test that we notice a delay between timestamps in request object
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
# call pathod server, wait a second to complete the request
connection.send(
"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
time.sleep(1)
connection.send("\r\n")
connection.recv(50000)
connection.close()
request, response = self.master.state.view[
0].request, self.master.state.view[0].response
assert response.code == 304 # sanity test for our low level request
# time.sleep might be a little bit shorter than a second
assert 0.95 < (request.timestamp_end - request.timestamp_start) < 1.2
def test_request_timestamps_not_affected_by_client_time(self):
# test that don't include user wait time in request's timestamps
f = self.pathod("304:b@10k")
assert f.status_code == 304
f = self.pathod("304:b@10k")
assert f.status_code == 304
request = self.master.state.view[0].request
assert request.timestamp_end - request.timestamp_start <= 0.1
request = self.master.state.view[1].request
assert request.timestamp_end - request.timestamp_start <= 0.1
def test_request_tcp_setup_timestamp_presence(self):
# tests that the client_conn a tcp connection has a tcp_setup_timestamp
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("localhost", self.proxy.port))
connection.send(
"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
connection.recv(5000)
connection.send(
"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
connection.recv(5000)
connection.close()
first_flow = self.master.state.view[0]
second_flow = self.master.state.view[1]
assert first_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_ssl_setup is None
assert second_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_tcp_setup == second_flow.server_conn.timestamp_tcp_setup
def test_request_ip(self):
f = self.pathod("200:b@100")
assert f.status_code == 200
f = self.master.state.view[0]
assert f.server_conn.address == ("127.0.0.1", self.server.port)
class TestProxySSL(tservers.HTTPProxTest):
ssl = True
def test_request_ssl_setup_timestamp_presence(self):
# tests that the ssl timestamp is present when ssl is used
f = self.pathod("304:b@10k")
assert f.status_code == 304
first_flow = self.master.state.view[0]
assert first_flow.server_conn.timestamp_ssl_setup
class MasterRedirectRequest(tservers.TestMaster):
redirect_port = None # Set by TestRedirectRequest
def handle_request(self, f):
request = f.request
if request.path == "/p/201":
addr = f.live.c.server_conn.address
assert f.live.change_server(
("127.0.0.1", self.redirect_port), ssl=False)
assert not f.live.change_server(
("127.0.0.1", self.redirect_port), ssl=False)
tutils.raises(
"SSL handshake error",
f.live.change_server,
("127.0.0.1",
self.redirect_port),
ssl=True)
assert f.live.change_server(addr, ssl=False)
request.url = "http://127.0.0.1:%s/p/201" % self.redirect_port
tservers.TestMaster.handle_request(self, f)
def handle_response(self, f):
f.response.content = str(f.client_conn.address.port)
f.response.headers[
"server-conn-id"] = [str(f.server_conn.source_address.port)]
tservers.TestMaster.handle_response(self, f)
class TestRedirectRequest(tservers.HTTPProxTest):
masterclass = MasterRedirectRequest
def test_redirect(self):
self.master.redirect_port = self.server2.port
p = self.pathoc()
self.server.clear_log()
self.server2.clear_log()
r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
assert r1.status_code == 200
assert self.server.last_log()
assert not self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r2 = p.request("get:'%s/p/201'" % self.server.urlbase)
assert r2.status_code == 201
assert not self.server.last_log()
assert self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r3 = p.request("get:'%s/p/202'" % self.server.urlbase)
assert r3.status_code == 202
assert self.server.last_log()
assert not self.server2.last_log()
assert r1.content == r2.content == r3.content
assert r1.headers.get_first(
"server-conn-id") == r3.headers.get_first("server-conn-id")
# Make sure that we actually use the same connection in this test case
class MasterStreamRequest(tservers.TestMaster):
"""
Enables the stream flag on the flow for all requests
"""
def handle_responseheaders(self, f):
f.response.stream = True
f.reply()
class TestStreamRequest(tservers.HTTPProxTest):
masterclass = MasterStreamRequest
def test_stream_simple(self):
p = self.pathoc()
# a request with 100k of data but without content-length
self.server.clear_log()
r1 = p.request("get:'%s/p/200:r:b@100k:d102400'" % self.server.urlbase)
assert r1.status_code == 200
assert len(r1.content) > 100000
assert self.server.last_log()
def test_stream_multiple(self):
p = self.pathoc()
# simple request with streaming turned on
self.server.clear_log()
r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
assert r1.status_code == 200
assert self.server.last_log()
# now send back 100k of data, streamed but not chunked
self.server.clear_log()
r1 = p.request("get:'%s/p/200:b@100k'" % self.server.urlbase)
assert r1.status_code == 200
assert self.server.last_log()
def test_stream_chunked(self):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
fconn = connection.makefile()
spec = '200:h"Transfer-Encoding"="chunked":r:b"4\\r\\nthis\\r\\n7\\r\\nisatest\\r\\n0\\r\\n\\r\\n"'
connection.send(
"GET %s/p/%s HTTP/1.1\r\n" %
(self.server.urlbase, spec))
connection.send("\r\n")
protocol = http.http1.HTTP1Protocol(rfile=fconn)
resp = protocol.read_response("GET", None, include_body=False)
assert resp.headers["Transfer-Encoding"][0] == 'chunked'
assert resp.status_code == 200
chunks = list(
content for _, content, _ in protocol.read_http_body_chunked(
resp.headers, None, "GET", 200, False))
assert chunks == ["this", "isatest", ""]
connection.close()
class MasterFakeResponse(tservers.TestMaster):
def handle_request(self, f):
resp = tutils.tresp()
f.reply(resp)
class TestFakeResponse(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
def test_fake(self):
f = self.pathod("200")
assert "header_response" in f.headers.keys()
class TestServerConnect(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
no_upstream_cert = True
ssl = True
def test_unnecessary_serverconnect(self):
"""A replayed/fake response with no_upstream_cert should not connect to an upstream server"""
assert self.pathod("200").status_code == 200
for msg in self.proxy.tmaster.log:
assert "serverconnect" not in msg
class MasterKillRequest(tservers.TestMaster):
def handle_request(self, f):
f.reply(KILL)
class TestKillRequest(tservers.HTTPProxTest):
masterclass = MasterKillRequest
def test_kill(self):
tutils.raises("server disconnect", self.pathod, "200")
# Nothing should have hit the server
assert not self.server.last_log()
class MasterKillResponse(tservers.TestMaster):
def handle_response(self, f):
f.reply(KILL)
class TestKillResponse(tservers.HTTPProxTest):
masterclass = MasterKillResponse
def test_kill(self):
tutils.raises("server disconnect", self.pathod, "200")
# The server should have seen a request
assert self.server.last_log()
class EResolver(tservers.TResolver):
def original_addr(self, sock):
raise RuntimeError("Could not resolve original destination.")
class TestTransparentResolveError(tservers.TransparentProxTest):
resolver = EResolver
def test_resolve_error(self):
assert self.pathod("304").status_code == 502
class MasterIncomplete(tservers.TestMaster):
def handle_request(self, f):
resp = tutils.tresp()
resp.content = CONTENT_MISSING
f.reply(resp)
class TestIncompleteResponse(tservers.HTTPProxTest):
masterclass = MasterIncomplete
def test_incomplete(self):
assert self.pathod("200").status_code == 502
class TestUpstreamProxy(tservers.HTTPUpstreamProxTest, CommonMixin, AppMixin):
ssl = False
def test_order(self):
self.proxy.tmaster.replacehooks.add(
"~q",
"foo",
"bar") # replace in request
self.chain[0].tmaster.replacehooks.add("~q", "bar", "baz")
self.chain[1].tmaster.replacehooks.add("~q", "foo", "oh noes!")
self.chain[0].tmaster.replacehooks.add(
"~s",
"baz",
"ORLY") # replace in response
p = self.pathoc()
req = p.request("get:'%s/p/418:b\"foo\"'" % self.server.urlbase)
assert req.content == "ORLY"
assert req.status_code == 418
class TestUpstreamProxySSL(
tservers.HTTPUpstreamProxTest,
CommonMixin,
TcpMixin):
ssl = True
def _host_pattern_on(self, attr):
"""
Updates config.check_tcp or check_ignore, depending on attr.
"""
assert not hasattr(self, "_ignore_%s_backup" % attr)
backup = []
for proxy in self.chain:
old_matcher = getattr(
proxy.tmaster.server.config,
"check_%s" %
attr)
backup.append(old_matcher)
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
HostMatcher([".+:%s" % self.server.port] + old_matcher.patterns)
)
setattr(self, "_ignore_%s_backup" % attr, backup)
def _host_pattern_off(self, attr):
backup = getattr(self, "_ignore_%s_backup" % attr)
for proxy in reversed(self.chain):
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
backup.pop()
)
assert not backup
delattr(self, "_ignore_%s_backup" % attr)
def _ignore_on(self):
super(TestUpstreamProxySSL, self)._ignore_on()
self._host_pattern_on("ignore")
def _ignore_off(self):
super(TestUpstreamProxySSL, self)._ignore_off()
self._host_pattern_off("ignore")
def _tcpproxy_on(self):
super(TestUpstreamProxySSL, self)._tcpproxy_on()
self._host_pattern_on("tcp")
def _tcpproxy_off(self):
super(TestUpstreamProxySSL, self)._tcpproxy_off()
self._host_pattern_off("tcp")
def test_simple(self):
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == "content"
assert req.status_code == 418
# CONNECT from pathoc to chain[0],
assert self.proxy.tmaster.state.flow_count() == 2
# request from pathoc to chain[0]
# CONNECT from proxy to chain[1],
assert self.chain[0].tmaster.state.flow_count() == 2
# request from proxy to chain[1]
# request from chain[0] (regular proxy doesn't store CONNECTs)
assert self.chain[1].tmaster.state.flow_count() == 1
def test_closing_connect_response(self):
"""
https://github.com/mitmproxy/mitmproxy/issues/313
"""
def handle_request(f):
f.request.httpversion = (1, 0)
del f.request.headers["Content-Length"]
f.reply()
_handle_request = self.chain[0].tmaster.handle_request
self.chain[0].tmaster.handle_request = handle_request
try:
assert self.pathoc().request("get:/p/418").status_code == 418
finally:
self.chain[0].tmaster.handle_request = _handle_request
class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
ssl = True
def test_reconnect(self):
"""
Tests proper functionality of ConnectionHandler.server_reconnect mock.
If we have a disconnect on a secure connection that's transparently proxified to
an upstream http proxy, we need to send the CONNECT request again.
"""
def kill_requests(master, attr, exclude):
k = [0] # variable scope workaround: put into array
_func = getattr(master, attr)
def handler(f):
k[0] += 1
if not (k[0] in exclude):
f.client_conn.finish()
f.error = Error("terminated")
f.reply(KILL)
return _func(f)
setattr(master, attr, handler)
kill_requests(self.chain[1].tmaster, "handle_request",
exclude=[
# fail first request
2, # allow second request
])
kill_requests(self.chain[0].tmaster, "handle_request",
exclude=[
1, # CONNECT
# fail first request
3, # reCONNECT
4, # request
])
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert self.proxy.tmaster.state.flow_count() == 2 # CONNECT and request
# CONNECT, failing request,
assert self.chain[0].tmaster.state.flow_count() == 4
# reCONNECT, request
# failing request, request
assert self.chain[1].tmaster.state.flow_count() == 2
# (doesn't store (repeated) CONNECTs from chain[0]
# as it is a regular proxy)
assert req.content == "content"
assert req.status_code == 418
assert not self.chain[1].tmaster.state.flows[0].response # killed
assert self.chain[1].tmaster.state.flows[1].response
assert self.proxy.tmaster.state.flows[0].request.form_in == "authority"
assert self.proxy.tmaster.state.flows[1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
0].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
2].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
3].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
0].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
1].request.form_in == "relative"
req = p.request("get:'/p/418:b\"content2\"'")
assert req.status_code == 502
assert self.proxy.tmaster.state.flow_count() == 3 # + new request
# + new request, repeated CONNECT from chain[1]
assert self.chain[0].tmaster.state.flow_count() == 6
# (both terminated)
# nothing happened here
assert self.chain[1].tmaster.state.flow_count() == 2
|
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Tests for the rdp_classifier_2.0.1 application controller"""
from cStringIO import StringIO
from os import getcwd, environ, remove, listdir
from shutil import rmtree
import tempfile
from unittest import TestCase, main
from bfillings.rdp_classifier import (RdpClassifier, RdpTrainer, assign_taxonomy,
train_rdp_classifier,
train_rdp_classifier_and_assign_taxonomy,
parse_rdp_assignment)
class RdpClassifierTests(TestCase):
def setUp(self):
# fetch user's RDP_JAR_PATH
if 'RDP_JAR_PATH' in environ:
self.user_rdp_jar_path = environ['RDP_JAR_PATH']
else:
self.user_rdp_jar_path = 'rdp_classifier-2.2.jar'
self.output_file = tempfile.NamedTemporaryFile()
def test_default_java_vm_parameters(self):
"""RdpClassifier should store default arguments to Java VM."""
a = RdpClassifier()
self.assertTrue('-Xmx' in a.Parameters)
self.assertEqual(a.Parameters['-Xmx'].Value, '1000m')
def test_parameters_list(self):
a = RdpClassifier()
parameters = a.Parameters.keys()
parameters.sort()
self.assertEqual(parameters, ['-Xmx', '-f', '-o', '-t'])
def test_assign_jvm_parameters(self):
"""RdpCalssifier should pass alternate parameters to Java VM."""
app = RdpClassifier()
app.Parameters['-Xmx'].on('75M')
exp = ''.join([
'cd "', getcwd(), '/"; java -Xmx75M -jar "',
self.user_rdp_jar_path, '" -q'])
self.assertEqual(app.BaseCommand, exp)
def test_basecommand_property(self):
"""RdpClassifier BaseCommand property should use overridden method."""
app = RdpClassifier()
self.assertEqual(app.BaseCommand, app._get_base_command())
def test_base_command(self):
"""RdpClassifier should return expected shell command."""
app = RdpClassifier()
exp = ''.join([
'cd "', getcwd(), '/"; java -Xmx1000m -jar "',
self.user_rdp_jar_path, '" -q'])
self.assertEqual(app.BaseCommand, exp)
def test_change_working_dir(self):
"""RdpClassifier should run program in expected working directory."""
test_dir = '/tmp/RdpTest'
app = RdpClassifier(WorkingDir=test_dir)
exp = ''.join([
'cd "', test_dir, '/"; java -Xmx1000m -jar "',
self.user_rdp_jar_path, '" -q'])
self.assertEqual(app.BaseCommand, exp)
rmtree(test_dir)
def test_sample_fasta(self):
"""RdpClassifier should classify its own sample data correctly"""
test_dir = '/tmp/RdpTest'
app = RdpClassifier(WorkingDir=test_dir)
_, output_fp = tempfile.mkstemp(dir=test_dir)
app.Parameters['-o'].on(output_fp)
results = app(StringIO(rdp_sample_fasta))
assignment_toks = results['Assignments'].readline().split('\t')
self.assertEqual(assignment_toks[0], 'X67228')
lineage = [x.strip('"') for x in assignment_toks[2::3]]
self.assertEqual(lineage, [
'Root', 'Bacteria', 'Proteobacteria', 'Alphaproteobacteria',
'Rhizobiales', 'Rhizobiaceae', 'Rhizobium'])
rmtree(test_dir)
class RdpTrainerTests(TestCase):
"""Tests of the trainer for the RdpClassifier app
"""
def setUp(self):
self.reference_file = StringIO(rdp_training_sequences)
self.reference_file.seek(0)
self.taxonomy_file = tempfile.NamedTemporaryFile(
prefix="RdpTaxonomy", suffix=".txt")
self.taxonomy_file.write(rdp_training_taxonomy)
self.taxonomy_file.seek(0)
self.training_dir = tempfile.mkdtemp(prefix='RdpTrainer_output_')
def tearDown(self):
rmtree(self.training_dir)
def test_call(self):
app = RdpTrainer()
app.Parameters['taxonomy_file'].on(self.taxonomy_file.name)
app.Parameters['model_output_dir'].on(self.training_dir)
results = app(self.reference_file)
exp_file_list = [
'bergeyTrainingTree.xml', 'genus_wordConditionalProbList.txt',
'logWordPrior.txt', 'RdpClassifier.properties',
'wordConditionalProbIndexArr.txt',
]
obs_file_list = listdir(self.training_dir)
exp_file_list.sort()
obs_file_list.sort()
self.assertEqual(obs_file_list, exp_file_list)
autogenerated_headers = {
'bergeyTree': 'bergeyTrainingTree',
'probabilityList': 'genus_wordConditionalProbList',
'wordPrior': 'logWordPrior',
'probabilityIndex': 'wordConditionalProbIndexArr',
}
for id, basename in autogenerated_headers.iteritems():
obs_header = results[id].readline()
exp_header = exp_training_header_template % basename
self.assertEqual(exp_header, obs_header)
class RdpWrapperTests(TestCase):
""" Tests of RDP classifier wrapper functions
"""
def setUp(self):
self.num_trials = 10
self.test_input1 = rdp_test_fasta.split('\n')
self.expected_assignments1 = rdp_expected_out
# Files for training
self.reference_file = StringIO(rdp_training_sequences)
self.reference_file.seek(0)
self.taxonomy_file = StringIO(rdp_training_taxonomy)
self.taxonomy_file.seek(0)
self.training_dir = tempfile.mkdtemp(prefix='RdpTrainer_output_')
# Sequences for trained classifier
self.test_trained_input = rdp_trained_fasta.split("\n")
def tearDown(self):
rmtree(self.training_dir)
def test_parse_rdp_assignment(self):
seqid, direction, assignments = parse_rdp_assignment(
"X67228\t\t"
"Root\tnorank\t1.0\t"
"Bacteria\tdomain\t1.0\t"
"\"Proteobacteria\"\tphylum\t1.0\t"
"Alphaproteobacteria\tclass\t0.9\t"
"Rhizobiales\torder\t0.9\t"
"Rhizobiaceae\tfamily\t0.47\t"
"Rhizobium\tgenus\t0.46")
self.assertEqual(seqid, "X67228")
def test_assign_taxonomy_short_sequence(self):
"""assign_taxonomy should return Unclassifiable if sequence is too short
"""
assignments = assign_taxonomy([
'>MySeq 1',
'TTCCGGTTGATCCTGCCGGACCCGACTGCTATCCGGA',
])
self.assertEqual(assignments, {'MySeq 1': ('Unassignable', 1.0)})
def test_assign_taxonomy(self):
"""assign_taxonomy wrapper functions as expected
This test may fail periodicially, but failure should be rare.
"""
unverified_seq_ids = set(self.expected_assignments1.keys())
for i in range(self.num_trials):
obs_assignments = assign_taxonomy(self.test_input1)
for seq_id in list(unverified_seq_ids):
obs_lineage, obs_confidence = obs_assignments[seq_id]
exp_lineage = self.expected_assignments1[seq_id]
if (obs_lineage == exp_lineage):
unverified_seq_ids.remove(seq_id)
if not unverified_seq_ids:
break
messages = []
for seq_id in unverified_seq_ids:
messages.append("Unable to verify %s trials" % self.num_trials)
messages.append(" Sequence ID: %s" % seq_id)
messages.append(" Expected: %s" % self.expected_assignments1[seq_id])
messages.append(" Observed: %s" % obs_assignments[seq_id][0])
messages.append(" Confidence: %s" % obs_assignments[seq_id][1])
# make sure all taxonomic results were correct at least once
self.assertFalse(unverified_seq_ids, msg='\n'.join(messages))
def test_assign_taxonomy_alt_confidence(self):
"""assign_taxonomy wrapper functions as expected with alt confidence
"""
obs_assignments = assign_taxonomy(
self.test_input1, min_confidence=0.95)
for seq_id, assignment in obs_assignments.items():
obs_lineage, obs_confidence = assignment
exp_lineage = self.expected_assignments1[seq_id]
message = "Sequence ID: %s, assignment: %s" % (seq_id, assignment)
self.assertTrue(
exp_lineage.startswith(obs_lineage) or \
(obs_lineage == "Unclassified"),
msg=message,
)
self.assertTrue(obs_confidence >= 0.95, msg=message)
def test_assign_taxonomy_file_output(self):
""" assign_taxonomy wrapper writes correct file output when requested
This function tests for sucessful completion of assign_taxonomy
when writing to file, that the lines in the file roughly look
correct by verifying how many are written (by zipping with
expected), and that each line starts with the correct seq id.
Actual testing of taxonomy data is performed elsewhere.
"""
_, output_fp = tempfile.mkstemp(prefix='RDPAssignTaxonomyTests',
suffix='.txt')
# convert the expected dict to a list of lines to match
# file output
expected_file_headers = self.expected_assignments1.keys()
expected_file_headers.sort()
actual_return_value = assign_taxonomy(\
self.test_input1,min_confidence=0.95,output_fp=output_fp)
actual_file_output = list(open(output_fp))
actual_file_output.sort()
# remove the output_fp before running the tests, so if they
# fail the output file is still cleaned-up
remove(output_fp)
# None return value on write to file
self.assertEqual(actual_return_value,None)
# check that each line starts with the correct seq_id -- not
# checking the taxonomies or confidences here as these are variable and
# tested elsewhere
for a,e in zip(actual_file_output,expected_file_headers):
self.assertTrue(a.startswith(e))
def test_train_rdp_classifier(self):
results = train_rdp_classifier(
self.reference_file, self.taxonomy_file, self.training_dir)
exp_file_list = [
'bergeyTrainingTree.xml', 'genus_wordConditionalProbList.txt',
'logWordPrior.txt', 'RdpClassifier.properties',
'wordConditionalProbIndexArr.txt',
]
obs_file_list = listdir(self.training_dir)
exp_file_list.sort()
obs_file_list.sort()
self.assertEqual(obs_file_list, exp_file_list)
autogenerated_headers = {
'bergeyTree': 'bergeyTrainingTree',
'probabilityList': 'genus_wordConditionalProbList',
'wordPrior': 'logWordPrior',
'probabilityIndex': 'wordConditionalProbIndexArr',
}
for id, basename in autogenerated_headers.iteritems():
obs_header = results[id].readline()
exp_header = exp_training_header_template % basename
self.assertEqual(exp_header, obs_header)
def test_train_rdp_classifier_and_assign_taxonomy(self):
obs = train_rdp_classifier_and_assign_taxonomy(self.reference_file,
self.taxonomy_file, self.test_trained_input, min_confidence=0.80,
model_output_dir=self.training_dir)
exp = {'X67228': (
'Bacteria;Proteobacteria;Alphaproteobacteria;Rhizobiales;'
'Rhizobiaceae;Rhizobium', 1.0
)}
self.assertEqual(obs, exp)
def test_train_rdp_classifier_and_assign_taxonomy_no_model_output(self):
obs = train_rdp_classifier_and_assign_taxonomy(
self.reference_file, self.taxonomy_file, self.test_trained_input)
exp = {'X67228': (
'Bacteria;Proteobacteria;Alphaproteobacteria;Rhizobiales;'
'Rhizobiaceae;Rhizobium', 1.0
)}
self.assertEqual(obs, exp)
# Sample data copied from rdp_classifier-2.0, which is licensed under
# the GPL 2.0 and Copyright 2008 Michigan State University Board of
# Trustees
rdp_training_sequences = """>X67228 Bacteria;Proteobacteria;Alphaproteobacteria;Rhizobiales;Rhizobiaceae;Rhizobium
aacgaacgctggcggcaggcttaacacatgcaagtcgaacgctccgcaaggagagtggcagacgggtgagtaacgcgtgggaatctacccaaccctgcggaatagctctgggaaactggaattaataccgcatacgccctacgggggaaagatttatcggggatggatgagcccgcgttggattagctagttggtggggtaaaggcctaccaaggcgacgatccatagctggtctgagaggatgatcagccacattgggactgagacacggcccaaa
>X73443 Bacteria;Firmicutes;Clostridia;Clostridiales;Clostridiaceae;Clostridium
nnnnnnngagatttgatcctggctcaggatgaacgctggccggccgtgcttacacatgcagtcgaacgaagcgcttaaactggatttcttcggattgaagtttttgctgactgagtggcggacgggtgagtaacgcgtgggtaacctgcctcatacagggggataacagttagaaatgactgctaataccnnataagcgcacagtgctgcatggcacagtgtaaaaactccggtggtatgagatggacccgcgtctgattagctagttggtggggt
>AB004750 Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales;Enterobacteriaceae;Enterobacter
acgctggcggcaggcctaacacatgcaagtcgaacggtagcagaaagaagcttgcttctttgctgacgagtggcggacgggtgagtaatgtctgggaaactgcccgatggagggggataactactggaaacggtagctaataccgcataacgtcttcggaccaaagagggggaccttcgggcctcttgccatcggatgtgcccagatgggattagctagtaggtggggtaacggctcacctaggcgacgatccctagctggtctgagaggatgaccagccacactggaactgagacacggtccagactcctacgggaggcagcagtggggaatattgca
>xxxxxx Bacteria;Proteobacteria;Gammaproteobacteria;Pseudomonadales;Pseudomonadaceae;Pseudomonas
ttgaacgctggcggcaggcctaacacatgcaagtcgagcggcagcannnncttcgggaggctggcgagcggcggacgggtgagtaacgcatgggaacttacccagtagtgggggatagcccggggaaacccggattaataccgcatacgccctgagggggaaagcgggctccggtcgcgctattggatgggcccatgtcggattagttagttggtggggtaatggcctaccaaggcgacgatccgtagctggtctgagaggatgatcagccacaccgggactgagacacggcccggactcctacgggaggcagcagtggggaatattggacaatgggggcaaccctgatccagccatgccg
>AB004748 Bacteria;Proteobacteria;Gammaproteobacteria;Enterobacteriales;Enterobacteriaceae;Enterobacter
acgctggcggcaggcctaacacatgcaagtcgaacggtagcagaaagaagcttgcttctttgctgacgagtggcggacgggtgagtaatgtctgggaaactgcccgatggagggggataactactggaaacggtagctaataccgcataacgtcttcggaccaaagagggggaccttcgggcctcttgccatcggatgtgcccagatgggattagctagtaggtggggtaacggctcacctaggcgacgatccctagctggtctgagaggatgaccagccacactggaactgagacacggtccagactcctacgggaggcagcagtggggaatattgcacaatgggcgcaagcctgatgcagccatgccgcgtgtatgaagaaggccttcgggttg
>AB000278 Bacteria;Proteobacteria;Gammaproteobacteria;Vibrionales;Vibrionaceae;Photobacterium
caggcctaacacatgcaagtcgaacggtaanagattgatagcttgctatcaatgctgacgancggcggacgggtgagtaatgcctgggaatataccctgatgtgggggataactattggaaacgatagctaataccgcataatctcttcggagcaaagagggggaccttcgggcctctcgcgtcaggattagcccaggtgggattagctagttggtggggtaatggctcaccaaggcgacgatccctagctggtctgagaggatgatcagccacactggaactgagacacggtccagactcctacgggaggcagcagtggggaatattgcacaatgggggaaaccctgatgcagccatgccgcgtgta
>AB000390 Bacteria;Proteobacteria;Gammaproteobacteria;Vibrionales;Vibrionaceae;Vibrio
tggctcagattgaacgctggcggcaggcctaacacatgcaagtcgagcggaaacgantnntntgaaccttcggggnacgatnacggcgtcgagcggcggacgggtgagtaatgcctgggaaattgccctgatgtgggggataactattggaaacgatagctaataccgcataatgtctacggaccaaagagggggaccttcgggcctctcgcttcaggatatgcccaggtgggattagctagttggtgaggtaatggctcaccaaggcgacgatccctagctggtctgagaggatgatcagccacactggaactgag
"""
rdp_training_taxonomy = """\
1*Bacteria*0*0*domain
765*Firmicutes*1*1*phylum
766*Clostridia*765*2*class
767*Clostridiales*766*3*order
768*Clostridiaceae*767*4*family
769*Clostridium*768*5*genus
160*Proteobacteria*1*1*phylum
433*Gammaproteobacteria*160*2*class
586*Vibrionales*433*3*order
587*Vibrionaceae*586*4*family
588*Vibrio*587*5*genus
592*Photobacterium*587*5*genus
552*Pseudomonadales*433*3*order
553*Pseudomonadaceae*552*4*family
554*Pseudomonas*553*5*genus
604*Enterobacteriales*433*3*order
605*Enterobacteriaceae*604*4*family
617*Enterobacter*605*5*genus
161*Alphaproteobacteria*160*2*class
260*Rhizobiales*161*3*order
261*Rhizobiaceae*260*4*family
262*Rhizobium*261*5*genus"""
exp_training_header_template = "<trainsetNo>1</trainsetNo><version>version1</version><modversion>cogent</modversion><file>%s</file>\n"
rdp_trained_fasta = """>X67228
aacgaacgctggcggcaggcttaacacatgcaagtcgaacgctccgcaaggagagtggcagacgggtgagtaacgcgtgggaatctacccaaccctgcggaatagctctgggaaactggaattaataccgcatacgccctacgggggaaagatttatcggggatggatgagcccgcgttggattagctagttggtggggtaaaggcctaccaaggcgacgatccatagctggtctgagaggatgatcagccacattgggactgagacacggcccaaa
"""
rdp_sample_fasta = """>X67228 Bacteria;Proteobacteria;Alphaproteobacteria;Rhizobiales;Rhizobiaceae;Rhizobium
aacgaacgctggcggcaggcttaacacatgcaagtcgaacgctccgcaaggagagtggcagacgggtgagtaacgcgtgggaatctacccaaccctgcggaatagctctgggaaactggaattaataccgcatacgccctacgggggaaagatttatcggggatggatgagcccgcgttggattagctagttggtggggtaaaggcctaccaaggcgacgatccatagctggtctgagaggatgatcagccacattgggactgagacacggcccaaa
"""
rdp_sample_classification = """>X67228 reverse=false
Root; 1.0; Bacteria; 1.0; Proteobacteria; 1.0; Alphaproteobacteria; 1.0; Rhizobiales; 1.0; Rhizobiaceae; 1.0; Rhizobium; 0.95;
"""
rdp_test_fasta = """>AY800210 description field
TTCCGGTTGATCCTGCCGGACCCGACTGCTATCCGGATGCGACTAAGCCATGCTAGTCTAACGGATCTTCGGATCCGTGGCATACCGCTCTGTAACACGTAGATAACCTACCCTGAGGTCGGGGAAACTCCCGGGAAACTGGGCCTAATCCCCGATAGATAATTTGTACTGGAATGTCTTTTTATTGAAACCTCCGAGGCCTCAGGATGGGTCTGCGCCAGATTATGGTCGTAGGTGGGGTAACGGCCCACCTAGCCTTTGATCTGTACCGGACATGAGAGTGTGTGCCGGGAGATGGCCACTGAGACAAGGGGCCAGGCCCTACGGGGCGCAGCAGGCGCGAAAACTTCACAATGCCCGCAAGGGTGATGAGGGTATCCGAGTGCTACCTTAGCCGGTAGCTTTTATTCAGTGTAAATAGCTAGATGAATAAGGGGAGGGCAAGGCTGGTGCCAGCCGCCGCGGTAAAACCAGCTCCCGAGTGGTCGGGATTTTTATTGGGCCTAAAGCGTCCGTAGCCGGGCGTGCAAGTCATTGGTTAAATATCGGGTCTTAAGCCCGAACCTGCTAGTGATACTACACGCCTTGGGACCGGAAGAGGCAAATGGTACGTTGAGGGTAGGGGTGAAATCCTGTAATCCCCAACGGACCACCGGTGGCGAAGCTTGTTCAGTCATGAACAACTCTACACAAGGCGATTTGCTGGGACGGATCCGACGGTGAGGGACGAAACCCAGGGGAGCGAGCGGGATTAGATACCCCGGTAGTCCTGGGCGTAAACGATGCGAACTAGGTGTTGGCGGAGCCACGAGCTCTGTCGGTGCCGAAGCGAAGGCGTTAAGTTCGCCGCCAGGGGAGTACGGCCGCAAGGCTGAAACTTAAAGGAATTGGCGGGGGAGCAC
>EU883771
TGGCGTACGGCTCAGTAACACGTGGATAACTTACCCTTAGGACTGGGATAACTCTGGGAAACTGGGGATAATACTGGATATTAGGCTATGCCTGGAATGGTTTGCCTTTGAAATGTTTTTTTTCGCCTAAGGATAGGTCTGCGGCTGATTAGGTCGTTGGTGGGGTAATGGCCCACCAAGCCGATGATCGGTACGGGTTGTGAGAGCAAGGGCCCGGAGATGGAACCTGAGACAAGGTTCCAGACCCTACGGGGTGCAGCAGGCGCGAAACCTCCGCAATGTACGAAAGTGCGACGGGGGGATCCCAAGTGTTATGCTTTTTTGTATGACTTTTCATTAGTGTAAAAAGCTTTTAGAATAAGAGCTGGGCAAGACCGGTGCCAGCCGCCGCGGTAACACCGGCAGCTCGAGTGGTGACCACTTTTATTGGGCTTAAAGCGTTCGTAGCTTGATTTTTAAGTCTCTTGGGAAATCTCACGGCTTAACTGTGAGGCGTCTAAGAGATACTGGGAATCTAGGGACCGGGAGAGGTAAGAGGTACTTCAGGGGTAGAAGTGAAATTCTGTAATCCTTGAGGGACCACCGATGGCGAAGGCATCTTACCAGAACGGCTTCGACAGTGAGGAACGAAAGCTGGGGGAGCGAACGGGATTAGATACCCCGGTAGTCCCAGCCGTAAACTATGCGCGTTAGGTGTGCCTGTAACTACGAGTTACCGGGGTGCCGAAGTGAAAACGTGAAACGTGCCGCCTGGGAAGTACGGTCGCAAGGCTGAAACTTAAAGGAATTGGCGGGGGAGCACCACAACGGGTGGAGCCTGCGGTTTAATTGGACTCAACGCCGGGCAGCTCACCGGATAGGACAGCGGAATGATAGCCGGGCTGAAGACCTTGCTTGACCAGCTGAGA
>EF503699
AAGAATGGGGATAGCATGCGAGTCACGCCGCAATGTGTGGCATACGGCTCAGTAACACGTAGTCAACATGCCCAGAGGACGTGGACACCTCGGGAAACTGAGGATAAACCGCGATAGGCCACTACTTCTGGAATGAGCCATGACCCAAATCTATATGGCCTTTGGATTGGACTGCGGCCGATCAGGCTGTTGGTGAGGTAATGGCCCACCAAACCTGTAACCGGTACGGGCTTTGAGAGAAGGAGCCCGGAGATGGGCACTGAGACAAGGGCCCAGGCCCTATGGGGCGCAGCAGGCACGAAACCTCTGCAATAGGCGAAAGCTTGACAGGGTTACTCTGAGTGATGCCCGCTAAGGGTATCTTTTGGCACCTCTAAAAATGGTGCAGAATAAGGGGTGGGCAAGTCTGGTGTCAGCCGCCGCGGTAATACCAGCACCCCGAGTTGTCGGGACGATTATTGGGCCTAAAGCATCCGTAGCCTGTTCTGCAAGTCCTCCGTTAAATCCACCCGCTTAACGGATGGGCTGCGGAGGATACTGCAGAGCTAGGAGGCGGGAGAGGCAAACGGTACTCAGTGGGTAGGGGTAAAATCCTTTGATCTACTGAAGACCACCAGTGGTGAAGGCGGTTCGCCAGAACGCGCTCGAACGGTGAGGATGAAAGCTGGGGGAGCAAACCGGAATAGATACCCGAGTAATCCCAACTGTAAACGATGGCAACTCGGGGATGGGTTGGCCTCCAACCAACCCCATGGCCGCAGGGAAGCCGTTTAGCTCTCCCGCCTGGGGAATACGGTCCGCAGAATTGAACCTTAAAGGAATTTGGCGGGGAACCCCCACAAGGGGGAAAACCGTGCGGTTCAATTGGAATCCACCCCCCGGAAACTTTACCCGGGCGCG
>random_seq
AAGCTCCGTCGCGTGAGCTAAAAACCATGCTGACTTATGAGACCTAAAAGCGATGCGCCGACCTGACGATGCTCTGTTCAGTTTCATCACGATCACCGGTAGTCAGGGTACCCTCCAGACCGCGCATAGTGACTATGTTCCCGCACCTGTATATGTAATTCCCATTATACGTCTACGTTATGTAGTAAAGTTGCTCACGCCAGGCACAGTTTGTCTTGATACATAGGGTAGCTTAAGTCCCGTCCATTTCACCGCGATTGTAATAGACGAATCAGCAGTGGTGCAATCAAGTCCCAACAGTTATATTTCAAAAATCTTCCGATAGTCGTGGGCGAAGTTGTCAACCTACCTACCATGGCTATAAGGCCCAGTTTACTTCAGTTGAACGTGACGGTAACCCTACTGAGTGCACGATACCTGCTCAACAACGGCCCAAAACCCGTGCGACACATTGGGCACTACAATAATCTTAGAGGACCATGGATCTGGTGGGTGGACTGAAGCATATCCCAAAAGTGTCGTGAGTCCGTTATGCAATTGACTGAAACAGCCGTACCAGAGTTCGGATGACCTCTGGGTTGCTGCGGTACACACCCGGGTGCGGCTTCTGAAATAGAAAAGACTAAGCATCGGCCGCCTCACACGCCAC
>DQ260310
GATACCCCCGGAAACTGGGGATTATACCGGATATGTGGGGCTGCCTGGAATGGTACCTCATTGAAATGCTCCCGCGCCTAAAGATGGATCTGCCGCAGAATAAGTAGTTTGCGGGGTAAATGGCCACCCAGCCAGTAATCCGTACCGGTTGTGAAAACCAGAACCCCGAGATGGAAACTGAAACAAAGGTTCAAGGCCTACCGGGCACAACAAGCGCCAAAACTCCGCCATGCGAGCCATCGCGACGGGGGAAAACCAAGTACCACTCCTAACGGGGTGGTTTTTCCGAAGTGGAAAAAGCCTCCAGGAATAAGAACCTGGGCCAGAACCGTGGCCAGCCGCCGCCGTTACACCCGCCAGCTCGAGTTGTTGGCCGGTTTTATTGGGGCCTAAAGCCGGTCCGTAGCCCGTTTTGATAAGGTCTCTCTGGTGAAATTCTACAGCTTAACCTGTGGGAATTGCTGGAGGATACTATTCAAGCTTGAAGCCGGGAGAAGCCTGGAAGTACTCCCGGGGGTAAGGGGTGAAATTCTATTATCCCCGGAAGACCAACTGGTGCCGAAGCGGTCCAGCCTGGAACCGAACTTGACCGTGAGTTACGAAAAGCCAAGGGGCGCGGACCGGAATAAAATAACCAGGGTAGTCCTGGCCGTAAACGATGTGAACTTGGTGGTGGGAATGGCTTCGAACTGCCCAATTGCCGAAAGGAAGCTGTAAATTCACCCGCCTTGGAAGTACGGTCGCAAGACTGGAACCTAAAAGGAATTGGCGGGGGGACACCACAACGCGTGGAGCCTGGCGGTTTTATTGGGATTCCACGCAGACATCTCACTCAGGGGCGACAGCAGAAATGATGGGCAGGTTGATGACCTTGCTTGACAAGCTGAAAAGGAGGTGCAT
>EF503697
TAAAATGACTAGCCTGCGAGTCACGCCGTAAGGCGTGGCATACAGGCTCAGTAACACGTAGTCAACATGCCCAAAGGACGTGGATAACCTCGGGAAACTGAGGATAAACCGCGATAGGCCAAGGTTTCTGGAATGAGCTATGGCCGAAATCTATATGGCCTTTGGATTGGACTGCGGCCGATCAGGCTGTTGGTGAGGTAATGGCCCACCAAACCTGTAACCGGTACGGGCTTTGAGAGAAGTAGCCCGGAGATGGGCACTGAGACAAGGGCCCAGGCCCTATGGGGCGCAGCAGGCGCGAAACCTCTGCAATAGGCGAAAGCCTGACAGGGTTACTCTGAGTGATGCCCGCTAAGGGTATCTTTTGGCACCTCTAAAAATGGTGCAGAATAAGGGGTGGGCAAGTCTGGTGTCAGCCGCCGCGGTAATACCAGCACCCCGAGTTGTCGGGACGATTATTGGGCCTAAAGCATCCGTAGCCTGTTCTGCAAGTCCTCCGTTAAATCCACCTGCTCAACGGATGGGCTGCGGAGGATACCGCAGAGCTAGGAGGCGGGAGAGGCAAACGGTACTCAGTGGGTAGGGGTAAAATCCATTGATCTACTGAAGACCACCAGTGGCGAAGGCGGTTTGCCAGAACGCGCTCGACGGTGAGGGATGAAAGCTGGGGGAGCAAACCGGATTAGATACCCGGGGTAGTCCCAGCTGTAAACGGATGCAGACTCGGGTGATGGGGTTGGCTTCCGGCCCAACCCCAATTGCCCCCAGGCGAAGCCCGTTAAGATCTTGCCGCCCTGTCAGATGTCAGGGCCGCCAATACTCGAAACCTTAAAAGGAAATTGGGCGCGGGAAAAGTCACCAAAAGGGGGTTGAAACCCTGCGGGTTATATATTGTAAACC
>short_seq
TAAAATGACTAGCCTGCGAGTCAC
"""
rdp_expected_out = {
'AY800210 description field': 'Archaea;Euryarchaeota',
'EU883771': 'Archaea;Euryarchaeota;Methanomicrobia;Methanomicrobiales;Methanomicrobiaceae;Methanomicrobium',
'EF503699': 'Archaea;Crenarchaeota;Thermoprotei',
'random_seq': 'Bacteria',
'DQ260310': 'Archaea;Euryarchaeota;Methanobacteria;Methanobacteriales;Methanobacteriaceae;Methanosphaera',
'EF503697': 'Archaea;Crenarchaeota;Thermoprotei',
'short_seq': 'Unassignable',
}
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""Debugging flows for the console."""
import getpass
import os
import pdb
import pickle
import tempfile
import time
from grr.client import actions
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import worker
from grr.proto import flows_pb2
class ClientActionArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.ClientActionArgs
def GetActionArgsClass(self):
if self.action:
action_cls = actions.ActionPlugin.classes.get(self.action)
if action_cls is None:
raise ValueError("Client Action '%s' not known." % self.action)
# The required semantic type for this field is in the client action's
# in_rdfvalue.
return action_cls.in_rdfvalue
class ClientAction(flow.GRRFlow):
"""A Simple flow to execute any client action."""
args_type = ClientActionArgs
@flow.StateHandler(next_state="Print")
def Start(self):
if self.args.save_to:
if not os.path.isdir(self.args.save_to):
os.makedirs(self.args.save_to, 0700)
self.CallClient(self.args.action, request=self.args.action_args,
next_state="Print")
@flow.StateHandler()
def Print(self, responses):
"""Dump the responses to a pickle file or allow for breaking."""
if not responses.success:
self.Log("ClientAction %s failed. Staus: %s" % (self.args.action,
responses.status))
if self.args.break_pdb:
pdb.set_trace()
if self.args.save_to:
self._SaveResponses(responses)
def _SaveResponses(self, responses):
"""Save responses to pickle files."""
if responses:
fd = None
try:
fdint, fname = tempfile.mkstemp(prefix="responses-",
dir=self.args.save_to)
fd = os.fdopen(fdint, "wb")
pickle.dump(responses, fd)
self.Log("Wrote %d responses to %s", len(responses), fname)
finally:
if fd: fd.close()
class ConsoleDebugFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.ConsoleDebugFlowArgs
def GetFlowArgsClass(self):
if self.flow:
flow_cls = flow.GRRFlow.classes.get(self.flow)
if flow_cls is None:
raise ValueError("Flow '%s' not known." % self.flow)
# The required semantic type for this field is in args_type.
return flow_cls.args_type
class ConsoleDebugFlow(flow.GRRFlow):
"""A Simple console flow to execute any flow and recieve back responses."""
args_type = ConsoleDebugFlowArgs
@flow.StateHandler(next_state="Print")
def Start(self):
if self.args.save_to:
if not os.path.isdir(self.args.save_to):
os.makedirs(self.args.save_to, 0700)
self.CallFlow(self.args.flow, next_state="Print",
**self.args.args.ToDict())
@flow.StateHandler()
def Print(self, responses):
"""Dump the responses to a pickle file or allow for breaking."""
if not responses.success:
self.Log("ConsoleDebugFlow %s failed. Staus: %s" % (self.args.flow,
responses.status))
self.Log("Got %d responses", len(responses))
for response in responses:
print response
if self.args.break_pdb:
pdb.set_trace()
if self.args.save_to:
self._SaveResponses(responses)
def _SaveResponses(self, responses):
"""Save responses to pickle files."""
if responses:
fd = None
try:
fdint, fname = tempfile.mkstemp(prefix="responses-",
dir=self.args.save_to)
fd = os.fdopen(fdint, "wb")
pickle.dump(responses, fd)
self.Log("Wrote %d responses to %s", len(responses), fname)
finally:
if fd: fd.close()
def StartFlowAndWorker(client_id, flow_name, **kwargs):
"""Launches the flow and worker and waits for it to finish.
Args:
client_id: The client common name we issue the request.
flow_name: The name of the flow to launch.
**kwargs: passthrough to flow.
Returns:
A flow session id.
Note: you need raw access to run this flow as it requires running a worker.
"""
queue = rdfvalue.RDFURN("DEBUG-%s-" % getpass.getuser())
session_id = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name=flow_name, queue=queue,
**kwargs)
# Empty token, only works with raw access.
worker_thrd = worker.GRRWorker(
queue=queue, token=access_control.ACLToken(username="test"),
threadpool_size=1)
while True:
try:
worker_thrd.RunOnce()
except KeyboardInterrupt:
print "exiting"
worker_thrd.thread_pool.Join()
break
time.sleep(2)
with aff4.FACTORY.Open(session_id) as flow_obj:
with flow_obj.GetRunner() as runner:
if not runner.IsRunning():
break
# Terminate the worker threads
worker_thrd.thread_pool.Join()
return session_id
def TestClientActionWithWorker(client_id, client_action, print_request=False,
break_pdb=True, **kwargs):
"""Run a client action on a client and break on return."""
action_cls = actions.ActionPlugin.classes[client_action]
request = action_cls.in_rdfvalue(**kwargs)
if print_request:
print str(request)
StartFlowAndWorker(client_id, flow_name="ClientAction", action=client_action,
break_pdb=break_pdb, action_args=request)
def WakeStuckFlow(session_id):
"""Wake up stuck flows.
A stuck flow is one which is waiting for the client to do something, but the
client requests have been removed from the client queue. This can happen if
the system is too loaded and the client messages have TTLed out. In this case
we reschedule the client requests for this session.
Args:
session_id: The session for the flow to wake.
Returns:
The total number of client messages re-queued.
"""
session_id = rdfvalue.SessionID(session_id)
woken = 0
checked_pending = False
with queue_manager.QueueManager() as manager:
for request, responses in manager.FetchRequestsAndResponses(session_id):
# We need to check if there are client requests pending.
if not checked_pending:
task = manager.Query(request.client_id,
task_id="task:%s" % request.request.task_id)
if task:
# Client has tasks pending already.
return
checked_pending = True
if not responses or responses[-1].type != rdfvalue.GrrMessage.Type.STATUS:
manager.QueueClientMessage(request.request)
woken += 1
if responses and responses[-1].type == rdfvalue.GrrMessage.Type.STATUS:
manager.QueueNotification(session_id)
return woken
|
|
'''
This file is part of ConfigShell.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
import six
import sys
from pyparsing import (alphanums, Empty, Group, OneOrMore, Optional,
ParseResults, Regex, Suppress, Word)
from . import console
from . import log
from . import prefs
from .node import ConfigNode, ExecutionError
# A fix for frozen packages
import signal
def handle_sigint(signum, frame):
'''
Raise KeyboardInterrupt when we get a SIGINT.
This is normally done by python, but even after patching
pyinstaller 1.4 to ignore SIGINT in the C wrapper code, we
still have to do the translation ourselves.
'''
raise KeyboardInterrupt
try:
signal.signal(signal.SIGINT, handle_sigint)
except Exception:
# In a thread, this fails
pass
if sys.stdout.isatty():
import readline
tty=True
else:
tty=False
# remember the original setting
oldTerm = os.environ.get('TERM')
os.environ['TERM'] = ''
import readline
# restore the orignal TERM setting
if oldTerm != None:
os.environ['TERM'] = oldTerm
del oldTerm
# Pyparsing helper to group the location of a token and its value
# http://stackoverflow.com/questions/18706631/pyparsing-get-token-location-in-results-name
locator = Empty().setParseAction(lambda s, l, t: l)
def locatedExpr(expr):
return Group(locator('location') + expr('value'))
class ConfigShell(object):
'''
This is a simple CLI command interpreter that can be used both in
interactive or non-interactive modes.
It is based on a tree of ConfigNode objects, which can be navigated.
The ConfigShell object itself provides global navigation commands.
It also handles the parsing of local commands (specific to a certain
ConfigNode) according to the ConfigNode commands definitions.
If the ConfigNode provides hooks for possible parameter values in a given
context, then the ConfigShell will also provide command-line completion
using the TAB key. If no completion hooks are available from the
ConfigNode, the completion function will still be able to display some help
and general syntax advice (as much as the ConfigNode will provide).
Interactive sessions can be saved/loaded automatically by ConfigShell is a
writable session directory is supplied. This includes command-line history,
current node and global parameters.
'''
default_prefs = {'color_path': 'magenta',
'color_command': 'cyan',
'color_parameter': 'magenta',
'color_keyword': 'cyan',
'logfile': None,
'loglevel_console': 'info',
'loglevel_file': 'debug9',
'color_mode': True,
'prompt_length': 30,
'tree_max_depth': 0,
'tree_status_mode': True,
'tree_round_nodes': True,
'tree_show_root': True
}
_completion_help_topic = ''
_current_parameter = ''
_current_token = ''
_current_completions = []
def __init__(self, preferences_dir=None):
'''
Creates a new ConfigShell.
@param preferences_dir: Directory to load/save preferences from/to
@type preferences_dir: str
'''
self._current_node = None
self._root_node = None
self._exit = False
# Grammar of the command line
command = locatedExpr(Word(alphanums + '_'))('command')
var = Word(alphanums + '?;&*$!#,=_\+/.<>()~@:-%[]')
value = var
keyword = Word(alphanums + '_\-')
kparam = locatedExpr(keyword + Suppress('=') + Optional(value, default=''))('kparams*')
pparam = locatedExpr(var)('pparams*')
parameter = kparam | pparam
parameters = OneOrMore(parameter)
bookmark = Regex('@([A-Za-z0-9:_.]|-)+')
pathstd = Regex('([A-Za-z0-9:_.\[\]]|-)*' + '/' + '([A-Za-z0-9:_.\[\]/]|-)*') \
| '..' | '.'
path = locatedExpr(bookmark | pathstd | '*')('path')
parser = Optional(path) + Optional(command) + Optional(parameters)
self._parser = parser
if tty:
readline.set_completer_delims('\t\n ~!#$^&(){}\|;\'",?')
readline.set_completion_display_matches_hook(
self._display_completions)
self.log = log.Log()
if preferences_dir is not None:
preferences_dir = os.path.expanduser(preferences_dir)
if not os.path.exists(preferences_dir):
os.makedirs(preferences_dir)
self._prefs_file = preferences_dir + '/prefs.bin'
self.prefs = prefs.Prefs(self._prefs_file)
self._cmd_history = preferences_dir + '/history.txt'
self._save_history = True
if not os.path.isfile(self._cmd_history):
try:
open(self._cmd_history, 'w').close()
except:
self.log.warning("Cannot create history file %s, "
% self._cmd_history
+ "command history will not be saved.")
self._save_history = False
if os.path.isfile(self._cmd_history) and tty:
try:
readline.read_history_file(self._cmd_history)
except IOError:
self.log.warning("Cannot read command history file %s."
% self._cmd_history)
if self.prefs['logfile'] is None:
self.prefs['logfile'] = preferences_dir + '/' + 'log.txt'
self.prefs.autosave = True
else:
self.prefs = prefs.Prefs()
self._save_history = False
try:
self.prefs.load()
except IOError:
self.log.warning("Could not load preferences file %s."
% self._prefs_file)
for pref, value in six.iteritems(self.default_prefs):
if pref not in self.prefs:
self.prefs[pref] = value
self.con = console.Console()
# Private methods
def _display_completions(self, substitution, matches, max_length):
'''
Display the completions. Invoked by readline.
@param substitution: string to complete
@param matches: list of possible matches
@param max_length: length of the longest matching item
'''
x_orig = self.con.get_cursor_xy()[0]
width = self.con.get_width()
max_length += 2
def just(text):
'''
Justifies the text to the max match length.
'''
return text.ljust(max_length, " ")
# Sort and colorize the matches
if self._current_parameter:
keywords = []
values = []
for match in matches:
if match.endswith('='):
keywords.append(
self.con.render_text(
just(match), self.prefs['color_keyword']))
elif '=' in match:
_, _, value = match.partition('=')
values.append(
self.con.render_text(
just(value), self.prefs['color_parameter']))
else:
values.append(
self.con.render_text(
just(match), self.prefs['color_parameter']))
matches = values + keywords
else:
paths = []
commands = []
for match in matches:
if '/' in match or match.startswith('@') or '*' in match:
paths.append(
self.con.render_text(
just(match), self.prefs['color_path']))
else:
commands.append(
self.con.render_text(
just(match), self.prefs['color_command']))
matches = paths + commands
# Display the possible completions in columns
self.con.raw_write("\n")
if matches:
if max_length < width:
nr_cols = width // max_length
else:
nr_cols = 1
for i in six.moves.range(0, len(matches), nr_cols):
self.con.raw_write(''.join(matches[i:i+nr_cols]))
self.con.raw_write('\n')
# Display the prompt and the command line
line = "%s%s" % (self._get_prompt(), readline.get_line_buffer())
self.con.raw_write("%s" % line)
# Move the cursor where it should be
y_pos = self.con.get_cursor_xy()[1]
self.con.set_cursor_xy(x_orig, y_pos)
def _complete_token_command(self, text, path, command):
'''
Completes a partial command token, which could also be the beginning
of a path.
@param path: Path of the target ConfigNode.
@type path: str
@param command: The command (if any) found by the parser.
@type command: str
@param text: Current text being typed by the user.
@type text: str
@return: Possible completions for the token.
@rtype: list of str
'''
completions = []
target = self._current_node.get_node(path)
commands = target.list_commands()
self.log.debug("Completing command token among %s" % str(commands))
# Start with the possible commands
for command in commands:
if command.startswith(text):
completions.append(command)
if len(completions) == 1:
completions[0] = completions[0] + ' '
# No identified path yet on the command line, this might be it
if not path:
path_completions = [child.name + '/'
for child in self._current_node.children
if child.name.startswith(text)]
if not text:
path_completions.append('/')
if len(self._current_node.children) > 1:
path_completions.append('* ')
if path_completions:
if completions:
self._current_token = \
self.con.render_text(
'path', self.prefs['color_path']) \
+ '|' \
+ self.con.render_text(
'command', self.prefs['color_command'])
else:
self._current_token = \
self.con.render_text(
'path', self.prefs['color_path'])
else:
self._current_token = \
self.con.render_text(
'command', self.prefs['color_command'])
if len(path_completions) == 1 and \
not path_completions[0][-1] in [' ', '*'] and \
not self._current_node.get_node(path_completions[0]).children:
path_completions[0] = path_completions[0] + ' '
completions.extend(path_completions)
else:
self._current_token = \
self.con.render_text(
'command', self.prefs['color_command'])
# Even a bookmark
bookmarks = ['@' + bookmark for bookmark in self.prefs['bookmarks']
if bookmark.startswith("%s" % text.lstrip('@'))]
self.log.debug("Found bookmarks %s." % str(bookmarks))
if bookmarks:
completions.extend(bookmarks)
# We are done
return completions
def _complete_token_path(self, text):
'''
Completes a partial path token.
@param text: Current text being typed by the user.
@type text: str
@return: Possible completions for the token.
@rtype: list of str
'''
completions = []
if text.endswith('.'):
text = text + '/'
(basedir, slash, partial_name) = text.rpartition('/')
self.log.debug("Got basedir=%s, partial_name=%s"
% (basedir, partial_name))
basedir = basedir + slash
target = self._current_node.get_node(basedir)
names = [child.name for child in target.children]
# Iterall path completion
if names and partial_name in ['', '*']:
# Not suggesting iterall to end a path that has only one
# child allows for fast TAB action to add the only child's
# name.
if len(names) > 1:
completions.append("%s* " % basedir)
for name in names:
num_matches = 0
if name.startswith(partial_name):
num_matches += 1
if num_matches == 1:
completions.append("%s%s/" % (basedir, name))
else:
completions.append("%s%s" % (basedir, name))
# Bookmarks
bookmarks = ['@' + bookmark for bookmark in self.prefs['bookmarks']
if bookmark.startswith("%s" % text.lstrip('@'))]
self.log.debug("Found bookmarks %s." % str(bookmarks))
if bookmarks:
completions.extend(bookmarks)
if len(completions) == 1:
self.log.debug("One completion left.")
if not completions[0].endswith("* "):
if not self._current_node.get_node(completions[0]).children:
completions[0] = completions[0].rstrip('/') + ' '
self._current_token = \
self.con.render_text(
'path', self.prefs['color_path'])
return completions
def _complete_token_pparam(self, text, path, command, pparams, kparams):
'''
Completes a positional parameter token, which can also be the keywork
part of a kparam token, as before the '=' sign is on the line, the
parser cannot know better.
@param path: Path of the target ConfigNode.
@type path: str
@param command: The command (if any) found by the parser.
@type command: str
@param pparams: Positional parameters from commandline.
@type pparams: list of str
@param kparams: Keyword parameters from commandline.
@type kparams: dict of str:str
@param text: Current text being typed by the user.
@type text: str
@return: Possible completions for the token.
@rtype: list of str
'''
completions = []
target = self._current_node.get_node(path)
cmd_params, free_pparams, free_kparams = \
target.get_command_signature(command)
current_parameters = {}
for index in range(len(pparams)):
if index < len(cmd_params):
current_parameters[cmd_params[index]] = pparams[index]
for key, value in six.iteritems(kparams):
current_parameters[key] = value
self._completion_help_topic = command
completion_method = target.get_completion_method(command)
self.log.debug("Command %s accepts parameters %s."
% (command, cmd_params))
# Do we still accept positional params ?
pparam_ok = True
for index in range(len(cmd_params)):
param = cmd_params[index]
if param in kparams:
if index <= len(pparams):
pparam_ok = False
self.log.debug(
"No more possible pparams (because of kparams).")
break
elif (text.strip() == '' and len(pparams) == len(cmd_params)) \
or (len(pparams) > len(cmd_params)):
pparam_ok = False
self.log.debug("No more possible pparams.")
break
else:
if len(cmd_params) == 0:
pparam_ok = False
self.log.debug("No more possible pparams (none exists)")
# If we do, find out which one we are completing
if pparam_ok:
if not text:
pparam_index = len(pparams)
else:
pparam_index = len(pparams) - 1
self._current_parameter = cmd_params[pparam_index]
self.log.debug("Completing pparam %s." % self._current_parameter)
if completion_method:
pparam_completions = completion_method(
current_parameters, text, self._current_parameter)
if pparam_completions is not None:
completions.extend(pparam_completions)
# Add the keywords for parameters not already on the line
if text:
offset = 1
else:
offset = 0
keyword_completions = [param + '=' \
for param in cmd_params[len(pparams)-offset:] \
if param not in kparams \
if param.startswith(text)]
self.log.debug("Possible pparam values are %s."
% str(completions))
self.log.debug("Possible kparam keywords are %s."
% str(keyword_completions))
if keyword_completions:
if self._current_parameter:
self._current_token = \
self.con.render_text(
self._current_parameter, \
self.prefs['color_parameter']) \
+ '|' \
+ self.con.render_text(
'keyword=', self.prefs['color_keyword'])
else:
self._current_token = \
self.con.render_text(
'keyword=', self.prefs['color_keyword'])
else:
if self._current_parameter:
self._current_token = \
self.con.render_text(
self._current_parameter,
self.prefs['color_parameter'])
else:
self._current_token = ''
completions.extend(keyword_completions)
if free_kparams or free_pparams:
self.log.debug("Command has free [kp]params.")
if completion_method:
self.log.debug("Calling completion method for free params.")
free_completions = completion_method(
current_parameters, text, '*')
do_free_pparams = False
do_free_kparams = False
for free_completion in free_completions:
if free_completion.endswith("="):
do_free_kparams = True
else:
do_free_pparams = True
if do_free_pparams:
self._current_token = \
self.con.render_text(
free_pparams, self.prefs['color_parameter']) \
+ '|' + self._current_token
self._current_token = self._current_token.rstrip('|')
if not self._current_parameter:
self._current_parameter = 'free_parameter'
if do_free_kparams:
if not 'keyword=' in self._current_token:
self._current_token = \
self.con.render_text(
'keyword=', self.prefs['color_keyword']) \
+ '|' + self._current_token
self._current_token = self._current_token.rstrip('|')
if not self._current_parameter:
self._current_parameter = 'free_parameter'
completions.extend(free_completions)
self.log.debug("Found completions %s." % str(completions))
return completions
def _complete_token_kparam(self, text, path, command, pparams, kparams):
'''
Completes a keyword=value parameter token.
@param path: Path of the target ConfigNode.
@type path: str
@param command: The command (if any) found by the parser.
@type command: str
@param pparams: Positional parameters from commandline.
@type pparams: list of str
@param kparams: Keyword parameters from commandline.
@type kparams: dict of str:str
@param text: Current text being typed by the user.
@type text: str
@return: Possible completions for the token.
@rtype: list of str
'''
self.log.debug("Called for text='%s'" % text)
target = self._current_node.get_node(path)
cmd_params = target.get_command_signature(command)[0]
self.log.debug("Command %s accepts parameters %s."
% (command, cmd_params))
(keyword, sep, current_value) = text.partition('=')
self.log.debug("Completing '%s' for kparam %s"
% (current_value, keyword))
self._current_parameter = keyword
current_parameters = {}
for index in range(len(pparams)):
current_parameters[cmd_params[index]] = pparams[index]
for key, value in six.iteritems(kparams):
current_parameters[key] = value
completion_method = target.get_completion_method(command)
if completion_method:
completions = completion_method(
current_parameters, current_value, keyword)
if completions is None:
completions = []
self._current_token = \
self.con.render_text(
self._current_parameter, self.prefs['color_parameter'])
self.log.debug("Found completions %s." % str(completions))
return ["%s=%s" % (keyword, completion) for completion in completions]
def _complete(self, text, state):
'''
Text completion method, directly called by readline.
Finds out what token the user wants completion for, and calls the
_dispatch_completion() to get the possible completions.
Then implements the state system needed by readline to return those
possible completions to readline.
@param text: The text to complete.
@type text: str
@returns: The next possible completion for text.
@rtype: str
'''
if state == 0:
cmdline = readline.get_line_buffer()
self._current_completions = []
self._completion_help_topic = ''
self._current_parameter = ''
(parse_results, path, command, pparams, kparams) = \
self._parse_cmdline(cmdline)
beg = readline.get_begidx()
end = readline.get_endidx()
current_token = None
if beg == end:
# No text under the cursor, fake it so that the parser
# result_trees gives us a token name on a second parser call
self.log.debug("Faking text entry on commandline.")
parse_results = self._parse_cmdline(cmdline + 'x')[0]
if parse_results.command.value == 'x':
current_token = 'command'
elif 'x' in [x.value for x in parse_results.pparams]:
current_token = 'pparam'
elif 'x' in [x.value for x in parse_results.kparams]:
current_token = 'kparam'
elif path and beg == parse_results.path.location:
current_token = 'path'
elif command and beg == parse_results.command.location:
current_token = 'command'
elif pparams and beg in [p.location for p in parse_results.pparams]:
current_token = 'pparam'
elif kparams and beg in [k.location for k in parse_results.kparams]:
current_token = 'kparam'
self._current_completions = \
self._dispatch_completion(path, command,
pparams, kparams,
text, current_token)
self.log.debug("Returning completions %s to readline."
% str(self._current_completions))
if state < len(self._current_completions):
return self._current_completions[state]
else:
return None
def _dispatch_completion(self, path, command,
pparams, kparams, text, current_token):
'''
This method takes care of dispatching the current completion request
from readline (via the _complete() method) to the relevant token
completion methods. It has to cope with the fact that the commandline
being incomplete yet,
Of course, as the command line is still unfinished, the parser can
only do so much of a job. For instance, until the '=' sign is on the
command line, there is no way to distinguish a positional parameter
from the begining of a keyword=value parameter.
@param path: Path of the target ConfigNode.
@type path: str
@param command: The command (if any) found by the parser.
@type command: str
@param pparams: Positional parameters from commandline.
@type pparams: list of str
@param kparams: Keyword parameters from commandline.
@type kparams: dict of str:str
@param text: Current text being typed by the user.
@type text: str
@param current_token: Name of token to complete.
@type current_token: str
@return: Possible completions for the token.
@rtype: list of str
'''
completions = []
self.log.debug("Dispatching completion for %s token. "
% current_token
+ "text='%s', path='%s', command='%s', "
% (text, path, command)
+ "pparams=%s, kparams=%s"
% (str(pparams), str(kparams)))
(path, iterall) = path.partition('*')[:2]
if iterall:
try:
target = self._current_node.get_node(path)
except ValueError:
cpl_path = path
else:
children = target.children
if children:
cpl_path = children[0].path
else:
cpl_path = path
if current_token == 'command':
completions = self._complete_token_command(text, cpl_path, command)
elif current_token == 'path':
completions = self._complete_token_path(text)
elif current_token == 'pparam':
completions = \
self._complete_token_pparam(text, cpl_path, command,
pparams, kparams)
elif current_token == 'kparam':
completions = \
self._complete_token_kparam(text, cpl_path, command,
pparams, kparams)
else:
self.log.debug("Cannot complete unknown token %s."
% current_token)
return completions
def _get_prompt(self):
'''
Returns the command prompt string.
'''
prompt_path = self._current_node.path
prompt_length = self.prefs['prompt_length']
if prompt_length and prompt_length < len(prompt_path):
half = (prompt_length - 3) // 2
prompt_path = "%s...%s" \
% (prompt_path[:half], prompt_path[-half:])
if 'prompt_msg' in dir(self._current_node):
return "%s%s> " % (self._current_node.prompt_msg(),
prompt_path)
else:
return "%s> " % prompt_path
def _cli_loop(self):
'''
Starts the configuration shell interactive loop, that:
- Goes to the last current path
- Displays the prompt
- Waits for user input
- Runs user command
'''
while not self._exit:
try:
readline.parse_and_bind("tab: complete")
readline.set_completer(self._complete)
cmdline = six.moves.input(self._get_prompt()).strip()
except EOFError:
self.con.raw_write('exit\n')
cmdline = "exit"
self.run_cmdline(cmdline)
if self._save_history:
try:
readline.write_history_file(self._cmd_history)
except IOError:
self.log.warning(
"Cannot write to command history file %s." \
% self._cmd_history)
self.log.warning(
"Saving command history has been disabled!")
self._save_history = False
def _parse_cmdline(self, line):
'''
Parses the command line entered by the user. This is a wrapper around
the actual pyparsing parser that pre-chews the result trees to
cleanly extract the tokens we care for (parameters, path, command).
@param line: The command line to parse.
@type line: str
@return: (result_trees, path, command, pparams, kparams),
pparams being positional parameters and kparams the keyword=value.
@rtype: (pyparsing.ParseResults, str, str, list, dict)
'''
self.log.debug("Parsing commandline.")
path = ''
command = ''
pparams = []
kparams = {}
parse_results = self._parser.parseString(line)
if isinstance(parse_results.path, ParseResults):
path = parse_results.path.value
if isinstance(parse_results.command, ParseResults):
command = parse_results.command.value
if isinstance(parse_results.pparams, ParseResults):
pparams = [pparam.value for pparam in parse_results.pparams]
if isinstance(parse_results.kparams, ParseResults):
kparams = dict([kparam.value for kparam in parse_results.kparams])
self.log.debug("Parse gave path='%s' command='%s' " % (path, command)
+ "pparams=%s " % str(pparams)
+ "kparams=%s" % str(kparams))
return (parse_results, path, command, pparams, kparams)
def _execute_command(self, path, command, pparams, kparams):
'''
Calls the target node to execute a command.
Behavior depends on the target node command's result:
- An 'EXIT' string will trigger shell exit.
- None will do nothing.
- A ConfigNode object will trigger a current_node change.
@param path: Path of the target node.
@type path: str
@param command: The command to call.
@type command: str
@param pparams: The positional parameters to use.
@type pparams: list
@param kparams: The keyword=value parameters to use.
@type kparams: dict
'''
if path.endswith('*'):
path = path.rstrip('*')
iterall = True
else:
iterall = False
if not path:
path = '.'
if not command:
if iterall:
command = 'ls'
else:
command = 'cd'
pparams = ['.']
try:
target = self._current_node.get_node(path)
except ValueError as msg:
raise ExecutionError(str(msg))
result = None
if not iterall:
targets = [target]
else:
targets = target.children
for target in targets:
if iterall:
self.con.display("[%s]" % target.path)
result = target.execute_command(command, pparams, kparams)
self.log.debug("Command execution returned %r" % result)
if isinstance(result, ConfigNode):
self._current_node = result
elif result == 'EXIT':
self._exit = True
elif result is not None:
raise ExecutionError("Unexpected result: %r" % result)
# Public methods
def run_cmdline(self, cmdline):
'''
Runs the specified command. Global commands are checked first,
then local commands from the current node.
Command syntax is:
[PATH] COMMAND [POSITIONAL_PARAMETER]+ [PARAMETER=VALUE]+
@param cmdline: The command line to run
@type cmdline: str
'''
if cmdline:
self.log.verbose("Running command line '%s'." % cmdline)
path, command, pparams, kparams = self._parse_cmdline(cmdline)[1:]
self._execute_command(path, command, pparams, kparams)
def run_script(self, script_path, exit_on_error=True):
'''
Runs the script located at script_path.
Script runs always start from the root context.
@param script_path: File path of the script to run
@type script_path: str
@param exit_on_error: If True, stops the run if an error occurs
@type exit_on_error: bool
'''
try:
script_fd = open(script_path, 'r')
self.run_stdin(script_fd, exit_on_error)
except IOError as msg:
raise IOError(msg)
finally:
script_fd.close()
def run_stdin(self, file_descriptor=sys.stdin, exit_on_error=True):
'''
Reads commands to be run from a file descriptor, stdin by default.
The run always starts from the root context.
@param file_descriptor: The file descriptor to read commands from
@type file_descriptor: file object
@param exit_on_error: If True, stops the run if an error occurs
@type exit_on_error: bool
'''
self._current_node = self._root_node
for cmdline in file_descriptor:
try:
self.run_cmdline(cmdline.strip())
except Exception as msg:
self.log.error(msg)
if exit_on_error is True:
raise ExecutionError("Aborting run on error.")
self.log.exception("Keep running after an error.")
def run_interactive(self):
'''
Starts interactive CLI mode.
'''
history = self.prefs['path_history']
index = self.prefs['path_history_index']
if history and index:
if index < len(history):
try:
target = self._root_node.get_node(history[index])
except ValueError:
self._current_node = self._root_node
else:
self._current_node = target
while True:
try:
old_completer = readline.get_completer()
self._cli_loop()
break
except KeyboardInterrupt:
self.con.raw_write('\n')
finally:
readline.set_completer(old_completer)
def attach_root_node(self, root_node):
'''
@param root_node: The root ConfigNode object
@type root_node: ConfigNode
'''
self._current_node = root_node
self._root_node = root_node
|
|
import unittest
from test.support import check_syntax_error, run_unittest
import warnings
warnings.filterwarnings("ignore", r"import \*", SyntaxWarning, "<test string>")
warnings.filterwarnings("ignore", r"import \*", SyntaxWarning, "<string>")
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testCellIsKwonlyArg(self):
# Issue 1409: Initialisation of a cell value,
# when it comes from a keyword-only parameter
def foo(*, a=17):
def bar():
return a + 5
return bar() + 3
self.assertEqual(foo(a=42), 50)
self.assertEqual(foo(), 25)
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError("x must be >= 0")
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """\
def unoptimized_clash1(strip):
def f(s):
from sys import *
return getrefcount(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from sys import *
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from sys import *
def g():
def f(s):
return getrefcount(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax_error(self, """\
def f():
def g():
from sys import *
return getrefcount # global or local?
""")
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print(y)
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
try:
errorInOuter()
except UnboundLocalError:
pass
else:
self.fail()
try:
errorInInner()
except NameError:
pass
else:
self.fail()
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec("""
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""", {'fail': self.fail})
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec("""\
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
""")
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
def testClassAndGlobal(self):
exec("""\
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assert_(X.passed)
""")
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assert_('h' in d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assert_("x" not in varnames)
self.assert_("y" in varnames)
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
sys.settrace(lambda a,b,c:None)
try:
x = 12
class C:
def f(self):
return x
self.assertEquals(x, 12) # Used to raise UnboundLocalError
finally:
sys.settrace(None)
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.__code__)
try:
exec(g.__code__, {})
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print(bad)
except NameError:
pass
else:
print("bad should not be defined")
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print(bad)
except NameError:
pass
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def testNonLocalFunction(self):
def f(x):
def inc():
nonlocal x
x += 1
return x
def dec():
nonlocal x
x -= 1
return x
return inc, dec
inc, dec = f(0)
self.assertEqual(inc(), 1)
self.assertEqual(inc(), 2)
self.assertEqual(dec(), 1)
self.assertEqual(dec(), 0)
def testNonLocalMethod(self):
def f(x):
class c:
def inc(self):
nonlocal x
x += 1
return x
def dec(self):
nonlocal x
x -= 1
return x
return c()
c = f(0)
self.assertEqual(c.inc(), 1)
self.assertEqual(c.inc(), 2)
self.assertEqual(c.dec(), 1)
self.assertEqual(c.dec(), 0)
def testNonLocalClass(self):
def f(x):
class c:
nonlocal x
x += 1
def get(self):
return x
return c()
c = f(0)
self.assertEqual(c.get(), 1)
self.assert_("x" not in c.__class__.__dict__)
def testNonLocalGenerator(self):
def f(x):
def g(y):
nonlocal x
for i in range(y):
x += 1
yield x
return g
g = f(0)
self.assertEqual(list(g(5)), [1, 2, 3, 4, 5])
def testNestedNonLocal(self):
def f(x):
def g():
nonlocal x
x -= 2
def h():
nonlocal x
x += 4
return x
return h
return g
g = f(1)
h = g()
self.assertEqual(h(), 3)
def test_main():
run_unittest(ScopeTests)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
# Copyright (c) 2015 Cisco Systems. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Tests of endpoint module.
"""
from collections import OrderedDict
from contextlib import nested
import logging
from calico.felix.plugins.fiptgenerator import FelixIptablesGenerator
from calico.felix.selectors import parse_selector
from calico.felix.endpoint import EndpointManager, LocalEndpoint
from calico.felix.fetcd import EtcdStatusReporter
from calico.felix.fiptables import IptablesUpdater
from calico.felix.dispatch import DispatchChains
from calico.felix.futils import FailedSystemCall
from calico.felix.profilerules import RulesManager
from calico.felix.fipmanager import FloatingIPManager
import mock
from mock import Mock
from calico.felix.test.base import BaseTestCase, load_config
from calico.felix.test import stub_utils
from calico.felix import endpoint
from calico.felix import futils
from calico.datamodel_v1 import EndpointId, TieredPolicyId
_log = logging.getLogger(__name__)
mock.patch.object = getattr(mock.patch, "object") # Keep PyCharm linter happy.
ENDPOINT_ID = EndpointId("hostname", "b", "c", "d")
ENDPOINT_ID_2 = EndpointId("hostname", "b", "c1", "d1")
class TestEndpointManager(BaseTestCase):
def setUp(self):
super(TestEndpointManager, self).setUp()
self.config = load_config("felix_default.cfg", env_dict={
"FELIX_FELIXHOSTNAME": "hostname"})
self.m_updater = Mock(spec=IptablesUpdater)
self.m_dispatch = Mock(spec=DispatchChains)
self.m_rules_mgr = Mock(spec=RulesManager)
self.m_fip_manager = Mock(spec=FloatingIPManager)
self.m_status_reporter = Mock(spec=EtcdStatusReporter)
self.mgr = EndpointManager(self.config, "IPv4", self.m_updater,
self.m_dispatch, self.m_rules_mgr,
self.m_fip_manager, self.m_status_reporter)
self.mgr.get_and_incref = Mock()
self.mgr.decref = Mock()
def test_create(self):
obj = self.mgr._create(ENDPOINT_ID)
self.assertTrue(isinstance(obj, LocalEndpoint))
def test_on_started(self):
ep = {"name": "tap1234"}
self.mgr.on_endpoint_update(ENDPOINT_ID,
ep,
async=True)
self.step_actor(self.mgr)
m_endpoint = Mock(spec=LocalEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
self.mgr._on_object_started(ENDPOINT_ID, m_endpoint)
self.assertEqual(
m_endpoint.on_endpoint_update.mock_calls,
[mock.call(ep, async=True)]
)
def test_on_datamodel_in_sync(self):
ep = {"name": "tap1234"}
self.mgr.on_endpoint_update(ENDPOINT_ID,
ep,
async=True)
self.step_actor(self.mgr)
self.mgr.on_datamodel_in_sync(async=True)
self.step_actor(self.mgr)
self.assertEqual(
self.m_dispatch.apply_snapshot.mock_calls,
[mock.call(frozenset(["tap1234"]), async=True)]
)
# Second call should have no effect.
self.m_dispatch.apply_snapshot.reset_mock()
self.mgr.on_datamodel_in_sync(async=True)
self.step_actor(self.mgr)
self.assertEqual(self.m_dispatch.apply_snapshot.mock_calls, [])
def test_tiered_policy_ordering_and_updates(self):
"""
Check that the tier_sequence ordering is updated correctly as we
add and remove tiers and policies.
"""
# Make sure we have an endpoint so that we can check that it gets
# put in the dirty set.
self.mgr.on_datamodel_in_sync(async=True)
self.mgr.on_endpoint_update(ENDPOINT_ID,
{"name": "tap12345"},
async=True)
self.step_actor(self.mgr)
# Pretend that the endpoint is alive so that we'll send updates to id.
m_endpoint = Mock(spec=LocalEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
self.mgr._is_starting_or_live = Mock(return_value=True)
# Add a profile into the tier so it'll apply to the endpoint.
pol_id_a = TieredPolicyId("a", "a1")
self.mgr.on_policy_selector_update(pol_id_a, parse_selector("all()"),
10, async=True)
pol_id_b = TieredPolicyId("b", "b1")
self.mgr.on_policy_selector_update(pol_id_b, parse_selector("all()"),
10, async=True)
pol_id_c1 = TieredPolicyId("c1", "c1")
self.mgr.on_policy_selector_update(pol_id_c1, parse_selector("all()"),
10, async=True)
pol_id_c2 = TieredPolicyId("c2", "c2")
self.mgr.on_policy_selector_update(pol_id_c2, parse_selector("all()"),
10, async=True)
pol_id_c3 = TieredPolicyId("c3", "c3")
self.mgr.on_policy_selector_update(pol_id_c3, parse_selector("all()"),
10, async=True)
self.step_actor(self.mgr)
# Since we haven't set the tier ID yet, the policy won't get applied...
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(OrderedDict(), async=True)] * 5)
m_endpoint.on_tiered_policy_update.reset_mock()
# Adding a tier should trigger an update, adding the tier and policy.
self.mgr.on_tier_data_update("a", {"order": 1}, async=True)
self.step_actor(self.mgr)
self.assertEqual(self.mgr.endpoints_with_dirty_policy, set())
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Idempotent update should get squashed.
self.mgr.on_tier_data_update("a", {"order": 2}, async=True)
self.mgr.on_tier_data_update("a", {"order": 2}, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [])
# Adding another tier should trigger an update.
self.mgr.on_tier_data_update("b", {"order": 3}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
tiers["b"] = [pol_id_b]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Swapping the order should trigger an update.
self.mgr.on_tier_data_update("b", {"order": 1}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["b"] = [pol_id_b]
tiers["a"] = [pol_id_a]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Check deletion and that it's idempotent.
self.mgr.on_tier_data_update("b", None, async=True)
self.step_actor(self.mgr)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.step_actor(self.mgr)
self.mgr.on_tier_data_update("b", None, async=True)
self.step_actor(self.mgr)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
self.assertEqual(
m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)] * 2 # One for policy, one for tier.
)
m_endpoint.on_tiered_policy_update.reset_mock()
# Check lexicographic tie-breaker.
self.mgr.on_tier_data_update("c1", {"order": 0}, async=True)
self.mgr.on_tier_data_update("c2", {"order": 0}, async=True)
self.mgr.on_tier_data_update("c3", {"order": 0}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
# All 'c's should sort before 'a' due to explicit ordering but 'c's
# should sort in lexicographic order.
tiers["c1"] = [pol_id_c1]
tiers["c2"] = [pol_id_c2]
tiers["c3"] = [pol_id_c3]
tiers["a"] = [pol_id_a]
actual_call = m_endpoint.on_tiered_policy_update.mock_calls[-1]
expected_call = mock.call(tiers, async=True)
self.assertEqual(actual_call, expected_call,
msg="\nExpected: %s\n Got: %s" %
(expected_call, actual_call))
m_endpoint.on_tiered_policy_update.reset_mock()
def test_label_inheritance(self):
# Make sure we have an endpoint so that we can check that it gets
# put in the dirty set. These have no labels at all so we test
# that no labels gets translated to an empty dict.
self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345",
"profile_ids": ["prof1"]},
async=True)
self.mgr.on_endpoint_update(ENDPOINT_ID_2, {"name": "tap23456",
"profile_ids": ["prof2"]},
async=True)
# And we need a selector to pick out one of the endpoints by the labels
# attached to its parent.
self.mgr.on_policy_selector_update(TieredPolicyId("a", "b"),
parse_selector('a == "b"'),
10,
async=True)
self.step_actor(self.mgr)
with mock.patch.object(self.mgr, "_update_dirty_policy") as m_update:
self.mgr.on_prof_labels_set("prof1", {"a": "b"}, async=True)
self.step_actor(self.mgr)
# Only the first endpoint should end up matching the selector.
self.assertEqual(self.mgr.endpoints_with_dirty_policy,
set([ENDPOINT_ID]))
# And an update should be triggered.
self.assertEqual(m_update.mock_calls, [mock.call()])
def test_endpoint_update_not_our_host(self):
ep = {"name": "tap1234"}
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
self.mgr.on_endpoint_update(EndpointId("notus", "b", "c", "d"),
ep,
async=True)
self.step_actor(self.mgr)
self.assertFalse(m_sol.called)
def test_endpoint_live_obj(self):
ep = {"name": "tap1234"}
# First send in an update to trigger creation.
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.step_actor(self.mgr)
self.assertEqual(self.mgr.get_and_incref.mock_calls,
[mock.call(ENDPOINT_ID)])
m_endpoint = Mock(spec=LocalEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
# Then send a second update to check that it gets passed on to the
# LocalEndpoint.
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)])
self.assertEqual(m_endpoint.on_endpoint_update.mock_calls,
[mock.call(ep, force_reprogram=False,
async=True)])
self.assertTrue(ENDPOINT_ID in self.mgr.local_endpoint_ids)
# Finally, send in a deletion.
m_endpoint.on_endpoint_update.reset_mock()
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, None, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_endpoint_update.mock_calls,
[mock.call(None, force_reprogram=False,
async=True)])
self.assertEqual(self.mgr.decref.mock_calls, [mock.call(ENDPOINT_ID)])
self.assertFalse(ENDPOINT_ID in self.mgr.local_endpoint_ids)
def test_on_interface_update_unknown(self):
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
self.mgr.on_interface_update("foo", True, async=True)
self.step_actor(self.mgr)
self.assertFalse(m_sol.called)
def test_on_interface_update_known(self):
ep = {"name": "tap1234"}
m_endpoint = Mock(spec=LocalEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.mgr.on_interface_update("tap1234", True, async=True)
self.step_actor(self.mgr)
self.assertEqual(
m_endpoint.on_interface_update.mock_calls,
[mock.call(True, async=True)]
)
def test_on_interface_update_known_but_not_live(self):
ep = {"name": "tap1234"}
m_endpoint = Mock(spec=LocalEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = False
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.mgr.on_interface_update("tap1234", True, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_interface_update.mock_calls, [])
class TestLocalEndpoint(BaseTestCase):
def setUp(self):
super(TestLocalEndpoint, self).setUp()
self.config = load_config("felix_default.cfg", global_dict={
"EndpointReportingEnabled": "False"})
self.m_ipt_gen = Mock(spec=FelixIptablesGenerator)
self.m_ipt_gen.endpoint_updates.return_value = {}, {}
self.m_iptables_updater = Mock(spec=IptablesUpdater)
self.m_dispatch_chains = Mock(spec=DispatchChains)
self.m_rules_mgr = Mock(spec=RulesManager)
self.m_manager = Mock(spec=EndpointManager)
self.m_fip_manager = Mock(spec=FloatingIPManager)
self.m_status_rep = Mock(spec=EtcdStatusReporter)
def get_local_endpoint(self, combined_id, ip_type):
local_endpoint = endpoint.LocalEndpoint(self.config,
combined_id,
ip_type,
self.m_iptables_updater,
self.m_dispatch_chains,
self.m_rules_mgr,
self.m_fip_manager,
self.m_status_rep)
local_endpoint._manager = self.m_manager
return local_endpoint
def test_on_endpoint_update_v4(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
# Call with no data; should be ignored (no configuration to remove).
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
ips = ["1.2.3.4/32"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(m_rem_conntrack.called)
# Send through an update with no changes - should be a no-op.
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(m_rem_conntrack.called)
# Change the MAC address and try again, leading to reset of ARP
data = data.copy()
data['mac'] = stub_utils.get_mac()
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv4') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
# Change the IP address, causing an iptables and route refresh.
data = data.copy()
data["ipv4_nets"] = ["1.2.3.5"]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as _m_conf,\
mock.patch('calico.felix.endpoint.LocalEndpoint._update_chains') as _m_up_c,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.5"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(local_ep._update_chains.called)
m_rem_conntrack.assert_called_once_with(set(["1.2.3.4"]), 4)
# Change the nat mappings, causing an iptables and route refresh.
data = data.copy()
data['ipv4_nat'] = [
{
'int_ip': '1.2.3.4',
'ext_ip': '5.6.7.8'
}
]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as _m_conf,\
mock.patch('calico.felix.endpoint.LocalEndpoint._update_chains') as _m_up_c,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.5", "5.6.7.8"]),
iface,
data['mac'],
reset_arp=True)
local_ep._update_chains.assert_called_once_with()
self.assertFalse(m_rem_conntrack.called)
# Send empty data, which deletes the endpoint.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
# Should clean up conntrack entries for all IPs.
m_rem_conntrack.assert_called_once_with(
set(['1.2.3.5', '5.6.7.8']), 4
)
def test_on_endpoint_update_delete_fail(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
ips = ["1.2.3.4/32"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(m_rem_conntrack.called)
# Send empty data, which deletes the endpoint. Raise an exception
# from set_routes to check that it's handled.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.interface_exists', return_value=True),\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
m_set_routes.side_effect = FailedSystemCall("", [], 1, "", "")
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
# Should clean up conntrack entries for all IPs.
m_rem_conntrack.assert_called_once_with(
set(['1.2.3.4']), 4
)
def test_on_endpoint_update_v6(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV6
local_ep = self.get_local_endpoint(combined_id, ip_type)
# Call with no data; should be ignored (no configuration to remove).
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
nets = ["2001::abcd/128"]
gway = "2020:ab::9876"
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv6_nets': nets,
'ipv6_gateway': gway,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv6') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up, \
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface, gway)
m_set_routes.assert_called_once_with(ip_type,
set(["2001::abcd"]),
iface,
data['mac'],
reset_arp=False)
self.assertFalse(m_rem_conntrack.called)
# Send through an update with no changes but a force update. Should
# force a re-write to iptables.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_endpoint_update(data, force_reprogram=True,
async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertTrue(m_conf.called)
self.assertTrue(m_set_routes.called)
# Send through an update with no changes - would reset ARP, but this is
# IPv6 so it won't.
data = data.copy()
data['mac'] = stub_utils.get_mac()
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface, gway)
m_set_routes.assert_called_once_with(ip_type,
set(["2001::abcd"]),
iface,
data['mac'],
reset_arp=False)
# Change the nat mappings, causing an iptables and route refresh.
data = data.copy()
nets.append('2001::abce/128')
data['ipv6_nat'] = [
{
'int_ip': '2001::abcd',
'ext_ip': '2001::abce'
}
]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv6') as m_conf,\
mock.patch('calico.felix.endpoint.LocalEndpoint._update_chains') as _m_up_c:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(
ip_type,
set(["2001::abcd", "2001::abce"]),
iface,
data['mac'],
reset_arp=False
)
local_ep._update_chains.assert_called_once_with()
# Send empty data, which deletes the endpoint.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(None, async=True)
local_ep.on_unreferenced(async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
local_ep._finish_msg_batch([], []) # Should be ignored
self.m_manager.on_object_cleanup_complete.assert_called_once_with(
local_ep._id,
local_ep,
async=True,
)
m_rem_conntrack.assert_called_once_with(set(['2001::abcd',
'2001::abce']), 6)
def test_on_interface_update_v4(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
ips = ["1.2.3.4"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# We can only get on_interface_update calls after the first
# on_endpoint_update, so trigger that.
with nested(
mock.patch('calico.felix.devices.set_routes'),
mock.patch('calico.felix.devices.configure_interface_ipv4'),
mock.patch('calico.felix.devices.interface_up'),
) as [m_set_routes, m_conf, m_iface_up]:
m_iface_up.return_value = False
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(local_ep._device_in_sync)
# Now pretend to get an interface update - does all the same work.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv4') as m_conf:
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(ips),
iface,
data['mac'],
reset_arp=True)
self.assertTrue(local_ep._device_in_sync)
@mock.patch("calico.felix.endpoint.devices", autospec=True)
def test_tiered_policy_mainline(self, m_devices):
self.config.plugins["iptables_generator"] = self.m_ipt_gen
ep = self.get_local_endpoint(ENDPOINT_ID, futils.IPV4)
mac = stub_utils.get_mac()
ep.on_endpoint_update(
{
'state': "active",
'endpoint': "endpoint_id",
'mac': mac,
'name': "tap1234",
'ipv4_nets': ["10.0.0.1"],
'profile_ids': ["prof1"]
},
async=True)
self.step_actor(ep)
self.assertEqual(
self.m_ipt_gen.endpoint_updates.mock_calls,
[
mock.call(4, 'd', '1234', mac, ['prof1'], {}),
]
)
self.m_ipt_gen.endpoint_updates.reset_mock()
tiers = OrderedDict()
t1_1 = TieredPolicyId("t1", "t1_1")
t1_2 = TieredPolicyId("t1", "t1_2")
tiers["t1"] = [t1_1, t1_2]
t2_1 = TieredPolicyId("t2", "t2_1")
tiers["t2"] = [t2_1]
ep.on_tiered_policy_update(tiers, async=True)
self.step_actor(ep)
self.assertEqual(
self.m_ipt_gen.endpoint_updates.mock_calls,
[
mock.call(4, 'd', '1234', mac, ['prof1'],
OrderedDict([('t1', [TieredPolicyId('t1','t1_1'),
TieredPolicyId('t1','t1_2')]),
('t2', [TieredPolicyId('t2','t2_1')])]))
])
def test_on_interface_update_v6(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV6
local_ep = self.get_local_endpoint(combined_id, ip_type)
ips = ["1234::5678"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv6_nets': ips,
'profile_ids': ["prof1"]
}
# We can only get on_interface_update calls after the first
# on_endpoint_update, so trigger that.
with nested(
mock.patch('calico.felix.devices.set_routes'),
mock.patch('calico.felix.devices.configure_interface_ipv6'),
mock.patch('calico.felix.devices.interface_up'),
) as [m_set_routes, m_conf, m_iface_up]:
m_iface_up.return_value = False
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(local_ep._device_in_sync)
# Now pretend to get an interface update - does all the same work.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
m_conf.assert_called_once_with(iface, None)
m_set_routes.assert_called_once_with(ip_type,
set(ips),
iface,
data['mac'],
reset_arp=False)
self.assertTrue(local_ep._device_in_sync)
# Now cover the error cases...
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
with mock.patch('calico.felix.devices.'
'interface_exists') as ifce_exists:
with mock.patch('calico.felix.devices.'
'interface_up') as ifce_up:
# Cycle through all the possibilities for the state.
ifce_exists.side_effect = [True, False, True]
ifce_up.side_effect = [True, False]
m_conf.side_effect = FailedSystemCall("", [], 1, "", "")
local_ep.on_interface_update(False, async=True)
self.step_actor(local_ep)
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
self.assertFalse(local_ep._device_in_sync)
def test_profile_id_update_triggers_iptables(self):
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
ips = ["10.0.0.1"]
iface = "tapabcdef"
mac = stub_utils.get_mac()
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ips, 'profile_ids': [],
'state': "active"}
local_ep._pending_endpoint = data.copy()
# First update with endpoint not yet set, should trigger full sync.
with mock.patch("calico.felix.devices.interface_up",
return_value=True):
local_ep._apply_endpoint_update()
self.assertEqual(local_ep.endpoint, data)
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
# No-op update
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertTrue(local_ep._iptables_in_sync)
self.assertTrue(local_ep._device_in_sync)
# Set the state.
local_ep._pending_endpoint = data.copy()
local_ep._pending_endpoint["state"] = "inactive"
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
local_ep._iptables_in_sync = True
# Set the state back again...
local_ep._pending_endpoint = data.copy()
local_ep._pending_endpoint["state"] = "active"
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
local_ep._iptables_in_sync = True
# Profiles update. Should update iptables.
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ips, 'profile_ids': ["prof2"],
"state": "active"}
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync) # Check...
local_ep._iptables_in_sync = True # ...then reset
self.assertTrue(local_ep._device_in_sync)
# IP update. Should update routing but not iptables.
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ["10.0.0.2"],
'profile_ids': ["prof2"],
"state": "active"}
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertTrue(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
# Delete, should update everything.
local_ep._pending_endpoint = None
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
def test_maybe_update_status_missing_deps(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_missing_endpoint(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep._device_is_up = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_iptables_failure(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = False
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'error'}, async=True
)
def test_maybe_update_status_device_failure(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._iptables_in_sync = True
local_ep._device_is_up = True
local_ep._device_in_sync = False
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'error'}, async=True
)
def test_maybe_update_status_iptables_up(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'up'}, async=True
)
def test_maybe_update_status_admin_down(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "inactive"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_oper_down(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = False
local_ep._iptables_in_sync = True
local_ep._device_in_sync = False
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_iptables_unreferenced(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = EndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.get_local_endpoint(combined_id, ip_type)
local_ep.on_unreferenced(async=True)
self.step_actor(local_ep)
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, None, async=True
)
|
|
import logging
import synapse.common as s_common
import synapse.lookup.phonenum as s_l_phone
from synapse.lib.types import DataType
import synapse.lib.module as s_module
logger = logging.getLogger(__name__)
intls = (
('us', '1', '011', 10),
)
def genTelLocCast(iso2, cc, idd, size):
'''
Generate a generic phone canonicalizer for numbers which
may reside within an arbitrary country's local exchange.
'''
clen = len(cc)
ilen = len(idd)
def castTelLocal(valu):
try:
rawp = str(valu).strip()
valu = digits(rawp)
if not valu:
return None
if rawp[0] == '+':
return int(valu)
# since 00 and 011 are so common
# (and generally incompatible with local)
if valu.startswith('00'):
return int(valu[2:])
if valu.startswith('011'):
return int(valu[3:])
if idd not in ('00', '011') and valu.startswith(idd):
return int(valu[ilen:])
if valu.startswith(cc):
return int(valu)
if len(valu) == size:
return int(cc + valu)
return int(valu)
except Exception as e:
logger.exception('cast tel:loc:%s' % iso2)
return None
return castTelLocal
def digits(text):
return ''.join([c for c in text if c.isdigit()])
class PhoneType(DataType):
def norm(self, valu, oldval=None):
if isinstance(valu, str):
valu = int(digits(valu))
subs = {}
try:
valu = int(valu)
info = s_l_phone.getPhoneInfo(valu)
cc = info.get('cc')
if cc is not None:
subs['cc'] = cc
# TODO prefix based validation?
return valu, subs
except TypeError as e:
self._raiseBadValu(valu)
def repr(self, valu):
text = str(valu)
# FIXME implement more geo aware reprs
if text[0] == '1':
area = text[1:4]
pref = text[4:7]
numb = text[7:11]
return '+1 (%s) %s-%s' % (area, pref, numb)
return '+' + text
def imeicsum(text):
'''
Calculate the imei check byte.
'''
digs = []
for i in range(14):
v = int(text[i])
if i % 2:
v *= 2
[digs.append(int(x)) for x in str(v)]
chek = 0
valu = sum(digs)
remd = valu % 10
if remd != 0:
chek = 10 - remd
return str(chek)
class ImeiType(DataType):
'''
https://en.wikipedia.org/wiki/International_Mobile_Equipment_Identity
'''
def norm(self, valu, oldval=None):
# TODO: support pre 2004 "old" imei format
if isinstance(valu, str):
digs = digits(valu)
if not digs:
self._raiseBadValu(valu, mesg='requires a digit string')
valu = int(digs)
imei = str(valu)
ilen = len(imei)
# we are missing our optional check digit
# lets add it for consistency...
if ilen == 14:
imei += imeicsum(imei)
return self._norm_imei(imei)
# if we *have* our check digit, lets check it
elif ilen == 15:
if imeicsum(imei) != imei[-1]:
self._raiseBadValu(valu, mesg='invalid imei checksum byte')
return self._norm_imei(imei)
self._raiseBadValu(valu)
def _norm_imei(self, imei):
valu = int(imei)
tac = int(imei[0:8])
snr = int(imei[8:14])
cd = int(imei[14:15])
return valu, {'tac': tac, 'serial': snr, 'cd': cd}
class ImsiType(DataType):
def norm(self, valu, oldval=None):
if isinstance(valu, str):
digs = digits(valu)
if not digs:
self._raiseBadValu(valu, mesg='requires a digit string')
valu = int(digs)
imsi = str(valu)
ilen = len(imsi)
if ilen > 15:
self._raiseBadValu(valu, mesg='invalid imsi len: %d' % (ilen,))
mcc = int(imsi[0:3])
# TODO full imsi analysis tree
return valu, {'mcc': mcc}
class TelMod(s_module.CoreModule):
def initCoreModule(self):
# TODO
# event handlers which cache and resolve prefixes to tag phone numbers
for iso2, cc, idd, size in intls:
self.core.addTypeCast('tel:loc:%s' % iso2, genTelLocCast(iso2, cc, idd, size))
@staticmethod
def getBaseModels():
modl = {
'types': (
('tel:phone', {'ctor': 'synapse.models.telco.PhoneType'}),
('tel:mob:tac', {'subof': 'int',
'doc': 'A mobile Type Allocation Code'}),
('tel:mob:imei', {'ctor': 'synapse.models.telco.ImeiType',
'doc': 'An International Mobile Equipment Id'}),
('tel:mob:imsi', {'ctor': 'synapse.models.telco.ImsiType',
'doc': 'An International Mobile Subscriber Id'}),
('tel:mob:imid', {
'subof': 'comp',
'fields': 'imei=tel:mob:imei,imsi=tel:mob:imsi',
'doc': 'Fused knowledge of an IMEI/IMSI used together.'}),
('tel:mob:imsiphone', {
'subof': 'comp',
'fields': 'imsi=tel:mob:imsi,phone=tel:phone',
'doc': 'Fused knowledge of an IMSI assigned phone number.'}),
# TODO: mcc, meid
),
'forms': (
('tel:phone', {'ptype': 'tel:phone'}, [
('cc', {'ptype': 'pol:iso2', 'defval': '??'}),
]),
('tel:prefix', {'ptype': 'tel:phone'}, [
('cc', {'ptype': 'pol:iso2', 'defval': '??'}),
('tag', {'ptype': 'syn:tag'}),
]),
('tel:mob:tac', {}, [
('org', {'ptype': 'ou:org',
'doc': 'The org guid for the manufacturer'}),
('manu', {'ptype': 'str:lwr', 'defval': '??',
'doc': 'The TAC manufacturer name'}),
('model', {'ptype': 'str:lwr', 'defval': '??',
'doc': 'The TAC model name'}),
('internal', {'ptype': 'str:lwr', 'defval': '??',
'doc': 'The TAC internal model name'}),
]),
('tel:mob:imei', {}, [
('tac', {'ptype': 'tel:mob:tac', 'doc': 'The Type Allocate Code within the IMEI'}),
('serial', {'ptype': 'int', 'doc': 'The serial number within the IMEI'}),
]),
('tel:mob:imsi', {}, [
('mcc', {'ptype': 'int', 'doc': 'The Mobile Country Code'}),
]),
('tel:mob:imid', {}, [
('imei', {'ptype': 'tel:mob:imei',
'doc': 'The IMEI for the phone hardware.'}),
('imsi', {'ptype': 'tel:mob:imsi',
'doc': 'The IMSI for the phone subscriber.'}),
]),
('tel:mob:imsiphone', {}, (
('imsi', {'ptype': 'tel:mob:imsi',
'doc': 'The IMSI with the assigned phone number.'}),
('phone', {'ptype': 'tel:phone',
'doc': 'The phone number assigned to the IMSI.'}),
)),
),
}
name = 'tel'
return ((name, modl), )
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Power100Power(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Power100Power - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'id': 'ResourceId',
'name': 'ResourceName',
'oem': 'ResourceOem',
'power_control': 'list[Power100PowerControl]',
'power_controlodata_count': 'Odata400Count',
'power_controlodata_navigation_link': 'Odata400IdRef',
'power_supplies': 'list[Power100PowerSupply]',
'power_suppliesodata_count': 'Odata400Count',
'power_suppliesodata_navigation_link': 'Odata400IdRef',
'redundancy': 'list[RedundancyRedundancy]',
'redundancyodata_count': 'Odata400Count',
'redundancyodata_navigation_link': 'Odata400IdRef',
'voltages': 'list[Power100Voltage]',
'voltagesodata_count': 'Odata400Count',
'voltagesodata_navigation_link': 'Odata400IdRef'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'id': 'Id',
'name': 'Name',
'oem': 'Oem',
'power_control': 'PowerControl',
'power_controlodata_count': 'PowerControl@odata.count',
'power_controlodata_navigation_link': 'PowerControl@odata.navigationLink',
'power_supplies': 'PowerSupplies',
'power_suppliesodata_count': 'PowerSupplies@odata.count',
'power_suppliesodata_navigation_link': 'PowerSupplies@odata.navigationLink',
'redundancy': 'Redundancy',
'redundancyodata_count': 'Redundancy@odata.count',
'redundancyodata_navigation_link': 'Redundancy@odata.navigationLink',
'voltages': 'Voltages',
'voltagesodata_count': 'Voltages@odata.count',
'voltagesodata_navigation_link': 'Voltages@odata.navigationLink'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._id = None
self._name = None
self._oem = None
self._power_control = None
self._power_controlodata_count = None
self._power_controlodata_navigation_link = None
self._power_supplies = None
self._power_suppliesodata_count = None
self._power_suppliesodata_navigation_link = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._voltages = None
self._voltagesodata_count = None
self._voltagesodata_navigation_link = None
@property
def odata_context(self):
"""
Gets the odata_context of this Power100Power.
:return: The odata_context of this Power100Power.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this Power100Power.
:param odata_context: The odata_context of this Power100Power.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this Power100Power.
:return: The odata_id of this Power100Power.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this Power100Power.
:param odata_id: The odata_id of this Power100Power.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this Power100Power.
:return: The odata_type of this Power100Power.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this Power100Power.
:param odata_type: The odata_type of this Power100Power.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this Power100Power.
:return: The description of this Power100Power.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Power100Power.
:param description: The description of this Power100Power.
:type: ResourceDescription
"""
self._description = description
@property
def id(self):
"""
Gets the id of this Power100Power.
:return: The id of this Power100Power.
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Power100Power.
:param id: The id of this Power100Power.
:type: ResourceId
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Power100Power.
:return: The name of this Power100Power.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Power100Power.
:param name: The name of this Power100Power.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100Power.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100Power.
:type: ResourceOem
"""
self._oem = oem
@property
def power_control(self):
"""
Gets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:return: The power_control of this Power100Power.
:rtype: list[Power100PowerControl]
"""
return self._power_control
@power_control.setter
def power_control(self, power_control):
"""
Sets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:param power_control: The power_control of this Power100Power.
:type: list[Power100PowerControl]
"""
self._power_control = power_control
@property
def power_controlodata_count(self):
"""
Gets the power_controlodata_count of this Power100Power.
:return: The power_controlodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_controlodata_count
@power_controlodata_count.setter
def power_controlodata_count(self, power_controlodata_count):
"""
Sets the power_controlodata_count of this Power100Power.
:param power_controlodata_count: The power_controlodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_controlodata_count = power_controlodata_count
@property
def power_controlodata_navigation_link(self):
"""
Gets the power_controlodata_navigation_link of this Power100Power.
:return: The power_controlodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_controlodata_navigation_link
@power_controlodata_navigation_link.setter
def power_controlodata_navigation_link(self, power_controlodata_navigation_link):
"""
Sets the power_controlodata_navigation_link of this Power100Power.
:param power_controlodata_navigation_link: The power_controlodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_controlodata_navigation_link = power_controlodata_navigation_link
@property
def power_supplies(self):
"""
Gets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:return: The power_supplies of this Power100Power.
:rtype: list[Power100PowerSupply]
"""
return self._power_supplies
@power_supplies.setter
def power_supplies(self, power_supplies):
"""
Sets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:param power_supplies: The power_supplies of this Power100Power.
:type: list[Power100PowerSupply]
"""
self._power_supplies = power_supplies
@property
def power_suppliesodata_count(self):
"""
Gets the power_suppliesodata_count of this Power100Power.
:return: The power_suppliesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_suppliesodata_count
@power_suppliesodata_count.setter
def power_suppliesodata_count(self, power_suppliesodata_count):
"""
Sets the power_suppliesodata_count of this Power100Power.
:param power_suppliesodata_count: The power_suppliesodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_suppliesodata_count = power_suppliesodata_count
@property
def power_suppliesodata_navigation_link(self):
"""
Gets the power_suppliesodata_navigation_link of this Power100Power.
:return: The power_suppliesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_suppliesodata_navigation_link
@power_suppliesodata_navigation_link.setter
def power_suppliesodata_navigation_link(self, power_suppliesodata_navigation_link):
"""
Sets the power_suppliesodata_navigation_link of this Power100Power.
:param power_suppliesodata_navigation_link: The power_suppliesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_suppliesodata_navigation_link = power_suppliesodata_navigation_link
@property
def redundancy(self):
"""
Gets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:return: The redundancy of this Power100Power.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:param redundancy: The redundancy of this Power100Power.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100Power.
:return: The redundancyodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100Power.
:param redundancyodata_count: The redundancyodata_count of this Power100Power.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100Power.
:return: The redundancyodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100Power.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def voltages(self):
"""
Gets the voltages of this Power100Power.
This is the definition for voltage sensors.
:return: The voltages of this Power100Power.
:rtype: list[Power100Voltage]
"""
return self._voltages
@voltages.setter
def voltages(self, voltages):
"""
Sets the voltages of this Power100Power.
This is the definition for voltage sensors.
:param voltages: The voltages of this Power100Power.
:type: list[Power100Voltage]
"""
self._voltages = voltages
@property
def voltagesodata_count(self):
"""
Gets the voltagesodata_count of this Power100Power.
:return: The voltagesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._voltagesodata_count
@voltagesodata_count.setter
def voltagesodata_count(self, voltagesodata_count):
"""
Sets the voltagesodata_count of this Power100Power.
:param voltagesodata_count: The voltagesodata_count of this Power100Power.
:type: Odata400Count
"""
self._voltagesodata_count = voltagesodata_count
@property
def voltagesodata_navigation_link(self):
"""
Gets the voltagesodata_navigation_link of this Power100Power.
:return: The voltagesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._voltagesodata_navigation_link
@voltagesodata_navigation_link.setter
def voltagesodata_navigation_link(self, voltagesodata_navigation_link):
"""
Sets the voltagesodata_navigation_link of this Power100Power.
:param voltagesodata_navigation_link: The voltagesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._voltagesodata_navigation_link = voltagesodata_navigation_link
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#! /usr/bin/python
#*************************************************************************
#
# REDWOOD CONFIDENTIAL
# Author: Aaron Edsinger
# __________________
#
# [2012] - [+] Redwood Robotics Incorporated
# All Rights Reserved.
#
# All information contained herein is, and remains
# the property of Redwood Robotics Incorporated and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Redwood Robotics Incorporated
# and its suppliers and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Redwood Robotics Incorporated.
#
#import matplotlib
#matplotlib.use('TkAgg')
import time
import m3.gui as m3g
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.actuator_pb2 as mec
import m3.component_factory as m3f
import math
import glob
from m3dev_tuning import M3Tuning
class M3Proc(M3Tuning):
def __init__(self):
M3Tuning.__init__(self)
self.proxy = m3p.M3RtProxy()
self.gui = m3g.M3Gui(stride_ms=125)
self.cnt=0
self.bias=[]
def stop(self):
self.act.set_mode(0)
self.proxy.step()
self.proxy.stop()
def start(self):
self.proxy.start()
self.get_component('m3actuator')
print "starting components"
self.start_components(['act','act_ec','pwr'],None)
print "done starting components"
# for k,v in self.comps.items():
# # accomplishes this: self.act=m3s.M3Actuator(self.comp_name)
# setattr(self, k, v['type'](v['name']) )
# self.comps[k]['comp'] = getattr(self,k)
# self.proxy.subscribe_status(getattr(self,k))
# self.proxy.publish_command(getattr(self,k))
# self.proxy.publish_param(getattr(self,k))
# self.proxy.make_operational(v['name'])
# pwr_rt=m3t.get_actuator_ec_pwr_component_name(self.comps['act_ec']['name'])
# pwr_ec=pwr_rt.replace('m3pwr','m3pwr_ec')
# pr=m3f.create_component(pwr_rt)
# self.proxy.publish_command(pr)
# self.proxy.make_operational(pwr_rt)
# self.proxy.make_operational(pwr_ec)
# pr.set_motor_power_on()
self.proxy.step()
#Create gui
self.mode = [0]
self.current_desired = [0]
self.pwm_desired = [0]
#self.enable_ctrl_dev=[0]
self.save = False
self.save_last = False
self.do_scope = False
self.scope = None
self.status_dict=self.proxy.get_status_dict()
#extract status fields
self.scope_keys=m3t.get_msg_fields(self.act.status,prefix='',exclude=['ethercat','base'])
self.scope_keys.sort()
self.scope_keys = ['None']+self.scope_keys
self.scope_field1 = [0]
self.scope_field2 = [0]
self.f1_last = None
self.f2_last = None
self.zero_motor_theta = False
self.zero_motor_theta_last = False
self.zero_joint_torque = False
self.zero_joint_torque_last = False
self.zero_joint_torque_lc = False
self.zero_joint_torque_lc_last = False
self.zero_joint_theta = False
self.zero_joint_theta_last = False
current_max = 2.5
pwm_max = 200
self.param_dict = self.proxy.get_param_dict()
# self.joint_torque = self.param_dict[self.comps['act']['name']]['calib']['torque']['cb_bias']
# self.joint_theta = self.param_dict[self.comps['act']['name']]['calib']['theta']['cb_bias']
self.gui.add('M3GuiTree', 'Status', (self,'status_dict'),[],[],m3g.M3GuiRead,column=2)
self.gui.add('M3GuiTree', 'Param', (self,'param_dict'),[],[],m3g.M3GuiWrite,column=3)
self.gui.add('M3GuiModes', 'Mode', (self,'mode'),range(1),[['Off','PWM','Current'],1],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','PWM (counts)', (self,'pwm_desired'),range(1),[-pwm_max,pwm_max],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','Current (A)', (self,'current_desired'),range(1),[-current_max,current_max],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'ZeroJointTheta', (self,'zero_joint_theta'), [],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'ZeroJointTorque', (self,'zero_joint_torque'), [],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'ZeroJointTorqueLc',(self,'zero_joint_torque_lc'), [],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiModes', 'Scope1', (self,'scope_field1'),range(1),[self.scope_keys,1],m3g.M3GuiWrite)
self.gui.add('M3GuiModes', 'Scope2', (self,'scope_field2'),range(1),[self.scope_keys,1],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'Scope', (self,'do_scope'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'Save', (self,'save'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.start(self.step)
def step(self):
# print self.comps['act_ec']['comp'].status.timestamp
if self.do_scope and self.scope is None:
self.scope=m3t.M3Scope2(xwidth=100,yrange=None)
self.proxy.step()
self.cnt=self.cnt+1
self.status_dict=self.proxy.get_status_dict()
if self.zero_joint_theta and not self.zero_joint_theta_last:
self.joint_theta -= self.act.get_joint_theta()
print 'New joint_theta zero',self.joint_theta
if self.zero_joint_torque and not self.zero_joint_torque_last:
self.joint_torque -= self.act.get_joint_torque()
print 'New joint_torque zero',self.joint_torque
#
# if self.zero_joint_torque_lc and not self.zero_joint_torque_lc_last:
# self.joint_torque_lc -= self.act.get_joint_torque_lc()
# print 'New joint_torque_lc zero',self.joint_torque_lc
#
# self.param_dict[self.comp_name]['calibration']['zero_motor_theta'] = self.motor_theta
# self.param_dict[self.comp_name]['calibration']['zero_joint_theta'] = self.joint_theta
# self.param_dict[self.comp_name]['calibration']['zero_joint_torque'] = self.joint_torque
# self.param_dict[self.comp_name]['calibration']['zero_joint_torque_lc'] = self.joint_torque_lc
#
# self.zero_joint_theta_last = self.zero_joint_theta
# self.zero_joint_torque_last = self.zero_joint_torque
# self.zero_joint_torque_lc_last = self.zero_joint_torque_lc
self.proxy.set_param_from_dict(self.param_dict)
if self.do_scope and self.scope is not None:
f1=self.scope_keys[self.scope_field1[0]]
f2=self.scope_keys[self.scope_field2[0]]
x1=x2=None
if f1!='None' and f1!='base':
x1=m3t.get_msg_field_value(self.act.status,f1)
print f1,':',x1
if f2!='None' and f2!='base':
x2=m3t.get_msg_field_value(self.act.status,f2)
print f2,':',x2
if x1==None:
x1=x2
if x2==None:
x2=x1
if x1!=None and x2!=None: #Handle only one value or two
self.scope.plot(x1,x2)
print'-----------------'
if self.mode[0] == 0: #Off
self.act.set_mode(mec.ACTUATOR_MODE_OFF)
elif self.mode[0] == 1: #Pwm
self.act.set_mode(mec.ACTUATOR_MODE_PWM)
self.act.set_pwm(self.pwm_desired[0])
elif self.mode[0] == 2: #Current
self.act.set_mode(mec.ACTUATOR_MODE_CURRENT)
self.act.set_i_desired(self.current_desired[0]*1000.0)
else:
self.act.set_mode(mec.ACTUATOR_MODE_OFF)
if (self.save and not self.save_last):
self.act.write_config()
self.save_last=self.save
if __name__ == '__main__':
t=M3Proc()
try:
t.start()
except Exception as e: #(KeyboardInterrupt,EOFError):
print "Exception " + str(e)
pass
t.stop()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._pipeline_runs_operations import build_create_request_initial, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PipelineRunsOperations:
"""PipelineRunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> "_models.PipelineRun":
"""Gets the detailed information for a given pipeline run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRun, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> "_models.PipelineRun":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(pipeline_run_create_parameters, 'PipelineRun')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> AsyncLROPoller["_models.PipelineRun"]:
"""Creates a pipeline run for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:param pipeline_run_create_parameters: The parameters for creating a pipeline run.
:type pipeline_run_create_parameters:
~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PipelineRun or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRun]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
pipeline_run_create_parameters=pipeline_run_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a pipeline run from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PipelineRunListResult"]:
"""Lists all the pipeline runs for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PipelineRunListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.PipelineRunListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRunListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PipelineRunListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns'} # type: ignore
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
from sahara.tests.integration.tests import base as b
from sahara.tests.integration.tests import cinder
from sahara.tests.integration.tests import cluster_configs
from sahara.tests.integration.tests import edp
from sahara.tests.integration.tests import map_reduce
from sahara.tests.integration.tests import scaling
from sahara.tests.integration.tests import swift
from sahara.utils import edp as utils_edp
class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
map_reduce.MapReduceTest, swift.SwiftTest,
scaling.ScalingTest, cinder.CinderVolumeTest,
edp.EDPTest):
vanilla_two_config = cfg.ITConfig().vanilla_two_config
SKIP_MAP_REDUCE_TEST = vanilla_two_config.SKIP_MAP_REDUCE_TEST
SKIP_SWIFT_TEST = vanilla_two_config.SKIP_SWIFT_TEST
SKIP_SCALING_TEST = vanilla_two_config.SKIP_SCALING_TEST
SKIP_CINDER_TEST = vanilla_two_config.SKIP_CINDER_TEST
SKIP_EDP_TEST = vanilla_two_config.SKIP_EDP_TEST
def setUp(self):
super(VanillaTwoGatingTest, self).setUp()
self.cluster_id = None
self.cluster_template_id = None
def get_plugin_config(self):
return cfg.ITConfig().vanilla_two_config
ng_params = {
'MapReduce': {
'yarn.app.mapreduce.am.resource.mb': 256,
'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
'YARN': {
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.scheduler.maximum-allocation-mb': 1024,
'yarn.nodemanager.vmem-check-enabled': False
}
}
@b.errormsg("Failure while 'nm-dn' node group template creation: ")
def _create_nm_dn_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-nm-dn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'node_processes': ['nodemanager', 'datanode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
self.addCleanup(self.delete_node_group_template, self.ng_tmpl_nm_dn_id)
@b.errormsg("Failure while 'nm' node group template creation: ")
def _create_nm_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-nm',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'volumes_per_node': self.volumes_per_node,
'volumes_size': self.volumes_size,
'node_processes': ['nodemanager'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_nm_id = self.create_node_group_template(**template)
self.addCleanup(self.delete_node_group_template, self.ng_tmpl_nm_id)
@b.errormsg("Failure while 'dn' node group template creation: ")
def _create_dn_ng_template(self):
template = {
'name': 'test-node-group-template-vanilla-dn',
'plugin_config': self.plugin_config,
'description': 'test node group template for Vanilla plugin',
'volumes_per_node': self.volumes_per_node,
'volumes_size': self.volumes_size,
'node_processes': ['datanode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'node_configs': self.ng_params
}
self.ng_tmpl_dn_id = self.create_node_group_template(**template)
self.addCleanup(self.delete_node_group_template, self.ng_tmpl_dn_id)
@b.errormsg("Failure while cluster template creation: ")
def _create_cluster_template(self):
template = {
'name': 'test-cluster-template-vanilla',
'plugin_config': self.plugin_config,
'description': 'test cluster template for Vanilla plugin',
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
},
'node_groups': [
{
'name': 'master-node-rm-nn',
'flavor_id': self.flavor_id,
'node_processes': ['namenode', 'resourcemanager',
'hiveserver'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'count': 1,
'node_configs': self.ng_params
},
{
'name': 'master-node-oo-hs',
'flavor_id': self.flavor_id,
'node_processes': ['oozie', 'historyserver',
'secondarynamenode'],
'floating_ip_pool': self.floating_ip_pool,
'auto_security_group': True,
'count': 1,
'node_configs': self.ng_params
},
{
'name': 'worker-node-nm-dn',
'node_group_template_id': self.ng_tmpl_nm_dn_id,
'count': 2
},
{
'name': 'worker-node-dn',
'node_group_template_id': self.ng_tmpl_dn_id,
'count': 1
},
{
'name': 'worker-node-nm',
'node_group_template_id': self.ng_tmpl_nm_id,
'count': 1
}
],
'net_id': self.internal_neutron_net
}
self.cluster_template_id = self.create_cluster_template(**template)
self.addCleanup(self.delete_cluster_template, self.cluster_template_id)
@b.errormsg("Failure while cluster creation: ")
def _create_cluster(self):
cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
self.plugin_config.PLUGIN_NAME)
cluster = {
'name': cluster_name,
'plugin_config': self.plugin_config,
'cluster_template_id': self.cluster_template_id,
'description': 'test cluster',
'cluster_configs': {}
}
cluster_id = self.create_cluster(**cluster)
self.addCleanup(self.delete_cluster, cluster_id)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.plugin_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.plugin_config)
@b.errormsg("Failure while Cinder testing: ")
def _check_cinder(self):
self.cinder_volume_testing(self.cluster_info)
@b.errormsg("Failure while Map Reduce testing: ")
def _check_mapreduce(self):
self.map_reduce_testing(self.cluster_info)
@b.errormsg("Failure during check of Swift availability: ")
def _check_swift(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing: ")
def _check_edp(self):
self.poll_jobs_status(list(self._run_edp_tests()))
def _run_edp_tests(self):
skipped_edp_job_types = self.plugin_config.SKIP_EDP_JOB_TYPES
if utils_edp.JOB_TYPE_PIG not in skipped_edp_job_types:
yield self._edp_pig_test()
if utils_edp.JOB_TYPE_MAPREDUCE not in skipped_edp_job_types:
yield self._edp_mapreduce_test()
if utils_edp.JOB_TYPE_MAPREDUCE_STREAMING not in skipped_edp_job_types:
yield self._edp_mapreduce_streaming_test()
if utils_edp.JOB_TYPE_JAVA not in skipped_edp_job_types:
yield self._edp_java_test()
if utils_edp.JOB_TYPE_HIVE not in skipped_edp_job_types:
yield self._check_edp_hive()
if utils_edp.JOB_TYPE_SHELL not in skipped_edp_job_types:
yield self._edp_shell_test()
# TODO(esikachev): Until fix bug 1413602
def _run_edp_tests_after_scaling(self):
skipped_edp_job_types = self.plugin_config.SKIP_EDP_JOB_TYPES
if utils_edp.JOB_TYPE_PIG not in skipped_edp_job_types:
yield self._edp_pig_test()
if utils_edp.JOB_TYPE_MAPREDUCE not in skipped_edp_job_types:
yield self._edp_mapreduce_test()
if utils_edp.JOB_TYPE_MAPREDUCE_STREAMING not in skipped_edp_job_types:
yield self._edp_mapreduce_streaming_test()
if utils_edp.JOB_TYPE_JAVA not in skipped_edp_job_types:
yield self._edp_java_test()
if utils_edp.JOB_TYPE_SHELL not in skipped_edp_job_types:
yield self._edp_shell_test()
def _edp_pig_test(self):
pig_job = self.edp_info.read_pig_example_script()
pig_lib = self.edp_info.read_pig_example_jar()
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job}],
lib_data_list=[{'jar': pig_lib}],
swift_binaries=True,
hdfs_local_output=True)
def _edp_mapreduce_test(self):
mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
mapreduce_configs = self.edp_info.mapreduce_example_configs()
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE,
job_data_list=[],
lib_data_list=[{'jar': mapreduce_jar}],
configs=mapreduce_configs,
swift_binaries=True,
hdfs_local_output=True)
def _edp_mapreduce_streaming_test(self):
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
job_data_list=[],
lib_data_list=[],
configs=self.edp_info.mapreduce_streaming_configs())
def _edp_java_test(self):
java_jar = self.edp_info.read_java_example_lib(2)
java_configs = self.edp_info.java_example_configs(2)
return self.edp_testing(
utils_edp.JOB_TYPE_JAVA,
job_data_list=[],
lib_data_list=[{'jar': java_jar}],
configs=java_configs)
def _edp_shell_test(self):
shell_script_data = self.edp_info.read_shell_example_script()
shell_file_data = self.edp_info.read_shell_example_text_file()
return self.edp_testing(
job_type=utils_edp.JOB_TYPE_SHELL,
job_data_list=[{'script': shell_script_data}],
lib_data_list=[{'text': shell_file_data}],
configs=self.edp_info.shell_example_configs())
def _check_edp_hive(self):
return self.check_edp_hive()
@b.errormsg("Failure while cluster scaling: ")
def _check_scaling(self):
change_list = [
{
'operation': 'resize',
'info': ['worker-node-nm-dn', 1]
},
{
'operation': 'resize',
'info': ['worker-node-dn', 0]
},
{
'operation': 'resize',
'info': ['worker-node-nm', 0]
},
{
'operation': 'add',
'info': [
'new-worker-node-nm', 1, '%s' % self.ng_tmpl_nm_id
]
},
{
'operation': 'add',
'info': [
'new-worker-node-dn', 1, '%s' % self.ng_tmpl_dn_id
]
}
]
self.cluster_info = self.cluster_scaling(self.cluster_info,
change_list)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.plugin_config)
@b.errormsg("Failure while Cinder testing after cluster scaling: ")
def _check_cinder_after_scaling(self):
self.cinder_volume_testing(self.cluster_info)
@b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
def _check_mapreduce_after_scaling(self):
self.map_reduce_testing(self.cluster_info)
@b.errormsg(
"Failure during check of Swift availability after cluster scaling: ")
def _check_swift_after_scaling(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing after cluster scaling: ")
def _check_edp_after_scaling(self):
self.poll_jobs_status(list(self._run_edp_tests_after_scaling()))
@testcase.skipIf(
cfg.ITConfig().vanilla_two_config.SKIP_ALL_TESTS_FOR_PLUGIN,
"All tests for Vanilla plugin were skipped")
@testcase.attr('vanilla2')
def test_vanilla_two_plugin_gating(self):
self._create_nm_dn_ng_template()
self._create_nm_ng_template()
self._create_dn_ng_template()
self._create_cluster_template()
self._create_cluster()
self._test_event_log(self.cluster_id)
self._check_cinder()
self._check_mapreduce()
self._check_swift()
self._check_edp()
if not self.plugin_config.SKIP_SCALING_TEST:
self._check_scaling()
self._test_event_log(self.cluster_id)
self._check_cinder_after_scaling()
self._check_mapreduce_after_scaling()
self._check_swift_after_scaling()
self._check_edp_after_scaling()
def tearDown(self):
super(VanillaTwoGatingTest, self).tearDown()
|
|
"""Prodigy integration for W&B
User can upload Prodigy annotated datasets directly
from the local database to W&B in Tables format.
Example usage:
```python
import wandb
from wandb.integration.prodigy import upload_dataset
run = wandb.init(project='prodigy')
upload_dataset("name_of_dataset")
wandb.finish()
```
"""
import base64
import collections
from copy import deepcopy
import io
import urllib
import pandas as pd
from PIL import Image
import wandb
from wandb import util
from wandb.plots.utils import test_missing
from wandb.sdk.lib import telemetry as wb_telemetry
def named_entity(docs):
""" Creates a named entity visualization.
Taken from https://github.com/wandb/client/blob/master/wandb/plots/named_entity.py
"""
spacy = util.get_module(
"spacy",
required="part_of_speech requires the spacy library, install with `pip install spacy`",
)
util.get_module(
"en_core_web_md",
required="part_of_speech requires `en_core_web_md` library, install with `python -m spacy download en_core_web_md`",
)
# Test for required packages and missing & non-integer values in docs data
if test_missing(docs=docs):
html = spacy.displacy.render(
docs, style="ent", page=True, minify=True, jupyter=False
)
wandb_html = wandb.Html(html)
return wandb_html
def merge(dict1, dict2):
""" Return a new dictionary by merging two dictionaries recursively. """
result = deepcopy(dict1)
for key, value in dict2.items():
if isinstance(value, collections.Mapping):
result[key] = merge(result.get(key, {}), value)
else:
result[key] = deepcopy(dict2[key])
return result
def get_schema(list_data_dict, struct, array_dict_types):
"""Get a schema of the dataset's structure and data types"""
# Get the structure of the JSON objects in the database
# This is similar to getting a JSON schema but with slightly different format
for _i, item in enumerate(list_data_dict):
# If the list contains dict objects
for k, v in item.items():
# Check if key already exists in template
if k not in struct.keys():
if isinstance(v, list):
if len(v) > 0 and isinstance(v[0], list):
# nested list structure
struct[k] = type(v) # type list
elif len(v) > 0 and not (
isinstance(v[0], list) or isinstance(v[0], dict)
):
# list of singular values
struct[k] = type(v) # type list
else:
# list of dicts
array_dict_types.append(
k
) # keep track of keys that are type list[dict]
struct[k] = {}
struct[k] = get_schema(v, struct[k], array_dict_types)
elif isinstance(v, dict):
struct[k] = {}
struct[k] = get_schema([v], struct[k], array_dict_types)
else:
struct[k] = type(v)
else:
# Get the value of struct[k] which is the current template
# Find new keys and then merge the two templates together
cur_struct = struct[k]
if isinstance(v, list):
if len(v) > 0 and isinstance(v[0], list):
# nested list coordinate structure
# if the value in the item is currently None, then update
if v is not None:
struct[k] = type(v) # type list
elif len(v) > 0 and not (
isinstance(v[0], list) or isinstance(v[0], dict)
):
# single list with values
# if the value in the item is currently None, then update
if v is not None:
struct[k] = type(v) # type list
else:
array_dict_types.append(
k
) # keep track of keys that are type list[dict]
struct[k] = {}
struct[k] = get_schema(v, struct[k], array_dict_types)
# merge cur_struct and struct[k], remove duplicates
struct[k] = merge(struct[k], cur_struct)
elif isinstance(v, dict):
struct[k] = {}
struct[k] = get_schema([v], struct[k], array_dict_types)
# merge cur_struct and struct[k], remove duplicates
struct[k] = merge(struct[k], cur_struct)
else:
# if the value in the item is currently None, then update
if v is not None:
struct[k] = type(v)
return struct
def standardize(item, structure, array_dict_types):
"""Standardize all rows/entries in dataset to fit the schema.
Will look for missing values and fill it in so all rows have
the same items and structure.
"""
for k, v in structure.items():
if k not in item:
# If the structure/field does not exist
if isinstance(v, dict) and (k not in array_dict_types):
# If key k is of type dict, and not not a type list[dict]
item[k] = {}
standardize(item[k], v, array_dict_types)
elif isinstance(v, dict) and (k in array_dict_types):
# If key k is of type dict, and is actually of type list[dict],
# just treat as a list and set to None by default
item[k] = None
else:
# Assign a default type
item[k] = v()
else:
# If the structure/field already exists and is a list or dict
if isinstance(item[k], list):
# ignore if item is a nested list structure or list of non-dicts
condition = (
not (len(item[k]) > 0 and isinstance(item[k][0], list))
) and (
not (
len(item[k]) > 0
and not (
isinstance(item[k][0], list) or isinstance(item[k][0], dict)
)
)
)
if condition:
for sub_item in item[k]:
standardize(sub_item, v, array_dict_types)
elif isinstance(item[k], dict):
standardize(item[k], v, array_dict_types)
def create_table(data):
""" Create a W&B Table.
- Create/decode images from URL/Base64
- Uses spacy to translate NER span data to visualizations.
"""
# create table object from columns
table_df = pd.DataFrame(data)
columns = list(table_df.columns)
if ("spans" in table_df.columns) and ("text" in table_df.columns):
columns.append("spans_visual")
if "image" in columns:
columns.append("image_visual")
main_table = wandb.Table(columns=columns)
# Convert to dictionary format to maintain order during processing
matrix = table_df.to_dict(orient="records")
# Import en_core_web_md if exists
en_core_web_md = util.get_module(
"en_core_web_md",
required="part_of_speech requires `en_core_web_md` library, install with `python -m spacy download en_core_web_md`",
)
nlp = en_core_web_md.load(disable=["ner"])
# Go through each individual row
for _i, document in enumerate(matrix):
# Text NER span visualizations
if ("spans_visual" in columns) and ("text" in columns):
# Add visuals for spans
document["spans_visual"] = None
doc = nlp(document["text"])
ents = []
if ("spans" in document) and (document["spans"] is not None):
for span in document["spans"]:
if ("start" in span) and ("end" in span) and ("label" in span):
charspan = doc.char_span(
span["start"], span["end"], span["label"]
)
ents.append(charspan)
doc.ents = ents
document["spans_visual"] = named_entity(docs=doc)
# Convert image link to wandb Image
if "image" in columns:
# Turn into wandb image
document["image_visual"] = None
if ("image" in document) and (document["image"] is not None):
isurl = urllib.parse.urlparse(document["image"]).scheme in (
"http",
"https",
)
isbase64 = ("data:" in document["image"]) and (
";base64" in document["image"]
)
if isurl:
# is url
try:
im = Image.open(urllib.request.urlopen(document["image"]))
document["image_visual"] = wandb.Image(im)
except urllib.error.URLError:
print(
"Warning: Image URL "
+ str(document["image"])
+ " is invalid."
)
document["image_visual"] = None
elif isbase64:
# is base64 uri
imgb64 = document["image"].split("base64,")[1]
try:
msg = base64.b64decode(imgb64)
buf = io.BytesIO(msg)
im = Image.open(buf)
document["image_visual"] = wandb.Image(im)
except base64.binascii.Error:
print(
"Warning: Base64 string "
+ str(document["image"])
+ " is invalid."
)
document["image_visual"] = None
else:
# is data path
document["image_visual"] = wandb.Image(document["image"])
# Create row and append to table
values_list = list(document.values())
main_table.add_data(*values_list)
return main_table
def upload_dataset(dataset_name):
""" Uploads dataset from local database to Weights & Biases.
Args:
dataset_name: The name of the dataset in the Prodigy database.
"""
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError("You must call wandb.init() before upload_dataset()")
with wb_telemetry.context(run=wandb.run) as tel:
tel.feature.prodigy = True
prodigy_db = util.get_module(
"prodigy.components.db",
required="`prodigy` library is required but not installed. Please see https://prodi.gy/docs/install",
)
# Retrieve and upload prodigy dataset
database = prodigy_db.connect()
data = database.get_dataset(dataset_name)
array_dict_types = []
schema = get_schema(data, {}, array_dict_types)
for i, _d in enumerate(data):
standardize(data[i], schema, array_dict_types)
table = create_table(data)
wandb.log({dataset_name: table})
print("Prodigy dataset `" + dataset_name + "` uploaded.")
|
|
# changelog.py - changelog class for mercurial
#
# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .i18n import _
from .node import (
bin,
hex,
)
from .thirdparty import attr
from . import (
encoding,
error,
metadata,
pycompat,
revlog,
)
from .utils import (
dateutil,
stringutil,
)
from .revlogutils import (
constants as revlog_constants,
flagutil,
)
_defaultextra = {b'branch': b'default'}
def _string_escape(text):
"""
>>> from .pycompat import bytechr as chr
>>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
>>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
>>> s
'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
>>> res = _string_escape(s)
>>> s == _string_unescape(res)
True
"""
# subset of the string_escape codec
text = (
text.replace(b'\\', b'\\\\')
.replace(b'\n', b'\\n')
.replace(b'\r', b'\\r')
)
return text.replace(b'\0', b'\\0')
def _string_unescape(text):
if b'\\0' in text:
# fix up \0 without getting into trouble with \\0
text = text.replace(b'\\\\', b'\\\\\n')
text = text.replace(b'\\0', b'\0')
text = text.replace(b'\n', b'')
return stringutil.unescapestr(text)
def decodeextra(text):
"""
>>> from .pycompat import bytechr as chr
>>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
... ).items())
[('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
>>> sorted(decodeextra(encodeextra({b'foo': b'bar',
... b'baz': chr(92) + chr(0) + b'2'})
... ).items())
[('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
"""
extra = _defaultextra.copy()
for l in text.split(b'\0'):
if l:
k, v = _string_unescape(l).split(b':', 1)
extra[k] = v
return extra
def encodeextra(d):
# keys must be sorted to produce a deterministic changelog entry
items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
return b"\0".join(items)
def stripdesc(desc):
"""strip trailing whitespace and leading and trailing empty lines"""
return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
class appender(object):
"""the changelog index must be updated last on disk, so we use this class
to delay writes to it"""
def __init__(self, vfs, name, mode, buf):
self.data = buf
fp = vfs(name, mode)
self.fp = fp
self.offset = fp.tell()
self.size = vfs.fstat(fp).st_size
self._end = self.size
def end(self):
return self._end
def tell(self):
return self.offset
def flush(self):
pass
@property
def closed(self):
return self.fp.closed
def close(self):
self.fp.close()
def seek(self, offset, whence=0):
'''virtual file offset spans real file and data'''
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.end() + offset
if self.offset < self.size:
self.fp.seek(self.offset)
def read(self, count=-1):
'''only trick here is reads that span real file and data'''
ret = b""
if self.offset < self.size:
s = self.fp.read(count)
ret = s
self.offset += len(s)
if count > 0:
count -= len(s)
if count != 0:
doff = self.offset - self.size
self.data.insert(0, b"".join(self.data))
del self.data[1:]
s = self.data[0][doff : doff + count]
self.offset += len(s)
ret += s
return ret
def write(self, s):
self.data.append(bytes(s))
self.offset += len(s)
self._end += len(s)
def __enter__(self):
self.fp.__enter__()
return self
def __exit__(self, *args):
return self.fp.__exit__(*args)
class _divertopener(object):
def __init__(self, opener, target):
self._opener = opener
self._target = target
def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
if name != self._target:
return self._opener(name, mode, **kwargs)
return self._opener(name + b".a", mode, **kwargs)
def __getattr__(self, attr):
return getattr(self._opener, attr)
def _delayopener(opener, target, buf):
"""build an opener that stores chunks in 'buf' instead of 'target'"""
def _delay(name, mode=b'r', checkambig=False, **kwargs):
if name != target:
return opener(name, mode, **kwargs)
assert not kwargs
return appender(opener, name, mode, buf)
return _delay
@attr.s
class _changelogrevision(object):
# Extensions might modify _defaultextra, so let the constructor below pass
# it in
extra = attr.ib()
manifest = attr.ib()
user = attr.ib(default=b'')
date = attr.ib(default=(0, 0))
files = attr.ib(default=attr.Factory(list))
filesadded = attr.ib(default=None)
filesremoved = attr.ib(default=None)
p1copies = attr.ib(default=None)
p2copies = attr.ib(default=None)
description = attr.ib(default=b'')
branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
class changelogrevision(object):
"""Holds results of a parsed changelog revision.
Changelog revisions consist of multiple pieces of data, including
the manifest node, user, and date. This object exposes a view into
the parsed object.
"""
__slots__ = (
'_offsets',
'_text',
'_sidedata',
'_cpsd',
'_changes',
)
def __new__(cls, cl, text, sidedata, cpsd):
if not text:
return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
# But doing it here is equivalent and saves an extra function call.
# format used:
# nodeid\n : manifest node in ascii
# user\n : user, no \n or \r allowed
# time tz extra\n : date (time is int or float, timezone is int)
# : extra is metadata, encoded and separated by '\0'
# : older versions ignore it
# files\n\n : files modified by the cset, no \n or \r allowed
# (.*) : comment (free text, ideally utf-8)
#
# changelog v0 doesn't use extra
nl1 = text.index(b'\n')
nl2 = text.index(b'\n', nl1 + 1)
nl3 = text.index(b'\n', nl2 + 1)
# The list of files may be empty. Which means nl3 is the first of the
# double newline that precedes the description.
if text[nl3 + 1 : nl3 + 2] == b'\n':
doublenl = nl3
else:
doublenl = text.index(b'\n\n', nl3 + 1)
self._offsets = (nl1, nl2, nl3, doublenl)
self._text = text
self._sidedata = sidedata
self._cpsd = cpsd
self._changes = None
return self
@property
def manifest(self):
return bin(self._text[0 : self._offsets[0]])
@property
def user(self):
off = self._offsets
return encoding.tolocal(self._text[off[0] + 1 : off[1]])
@property
def _rawdate(self):
off = self._offsets
dateextra = self._text[off[1] + 1 : off[2]]
return dateextra.split(b' ', 2)[0:2]
@property
def _rawextra(self):
off = self._offsets
dateextra = self._text[off[1] + 1 : off[2]]
fields = dateextra.split(b' ', 2)
if len(fields) != 3:
return None
return fields[2]
@property
def date(self):
raw = self._rawdate
time = float(raw[0])
# Various tools did silly things with the timezone.
try:
timezone = int(raw[1])
except ValueError:
timezone = 0
return time, timezone
@property
def extra(self):
raw = self._rawextra
if raw is None:
return _defaultextra
return decodeextra(raw)
@property
def changes(self):
if self._changes is not None:
return self._changes
if self._cpsd:
changes = metadata.decode_files_sidedata(self._sidedata)
else:
changes = metadata.ChangingFiles(
touched=self.files or (),
added=self.filesadded or (),
removed=self.filesremoved or (),
p1_copies=self.p1copies or {},
p2_copies=self.p2copies or {},
)
self._changes = changes
return changes
@property
def files(self):
if self._cpsd:
return sorted(self.changes.touched)
off = self._offsets
if off[2] == off[3]:
return []
return self._text[off[2] + 1 : off[3]].split(b'\n')
@property
def filesadded(self):
if self._cpsd:
return self.changes.added
else:
rawindices = self.extra.get(b'filesadded')
if rawindices is None:
return None
return metadata.decodefileindices(self.files, rawindices)
@property
def filesremoved(self):
if self._cpsd:
return self.changes.removed
else:
rawindices = self.extra.get(b'filesremoved')
if rawindices is None:
return None
return metadata.decodefileindices(self.files, rawindices)
@property
def p1copies(self):
if self._cpsd:
return self.changes.copied_from_p1
else:
rawcopies = self.extra.get(b'p1copies')
if rawcopies is None:
return None
return metadata.decodecopies(self.files, rawcopies)
@property
def p2copies(self):
if self._cpsd:
return self.changes.copied_from_p2
else:
rawcopies = self.extra.get(b'p2copies')
if rawcopies is None:
return None
return metadata.decodecopies(self.files, rawcopies)
@property
def description(self):
return encoding.tolocal(self._text[self._offsets[3] + 2 :])
@property
def branchinfo(self):
extra = self.extra
return encoding.tolocal(extra.get(b"branch")), b'close' in extra
class changelog(revlog.revlog):
def __init__(self, opener, trypending=False, concurrencychecker=None):
"""Load a changelog revlog using an opener.
If ``trypending`` is true, we attempt to load the index from a
``00changelog.i.a`` file instead of the default ``00changelog.i``.
The ``00changelog.i.a`` file contains index (and possibly inline
revision) data for a transaction that hasn't been finalized yet.
It exists in a separate file to facilitate readers (such as
hooks processes) accessing data before a transaction is finalized.
``concurrencychecker`` will be passed to the revlog init function, see
the documentation there.
"""
revlog.revlog.__init__(
self,
opener,
target=(revlog_constants.KIND_CHANGELOG, None),
radix=b'00changelog',
checkambig=True,
mmaplargeindex=True,
persistentnodemap=opener.options.get(b'persistent-nodemap', False),
concurrencychecker=concurrencychecker,
trypending=trypending,
)
if self._initempty and (self._format_version == revlog.REVLOGV1):
# changelogs don't benefit from generaldelta.
self._format_flags &= ~revlog.FLAG_GENERALDELTA
self._generaldelta = False
# Delta chains for changelogs tend to be very small because entries
# tend to be small and don't delta well with each. So disable delta
# chains.
self._storedeltachains = False
self._realopener = opener
self._delayed = False
self._delaybuf = None
self._divert = False
self._filteredrevs = frozenset()
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
@property
def filteredrevs(self):
return self._filteredrevs
@filteredrevs.setter
def filteredrevs(self, val):
# Ensure all updates go through this function
assert isinstance(val, frozenset)
self._filteredrevs = val
self._filteredrevs_hashcache = {}
def _write_docket(self, tr):
if not self._delayed:
super(changelog, self)._write_docket(tr)
def delayupdate(self, tr):
"""delay visibility of index updates to other readers"""
if self._docket is None and not self._delayed:
if len(self) == 0:
self._divert = True
if self._realopener.exists(self._indexfile + b'.a'):
self._realopener.unlink(self._indexfile + b'.a')
self.opener = _divertopener(self._realopener, self._indexfile)
else:
self._delaybuf = []
self.opener = _delayopener(
self._realopener, self._indexfile, self._delaybuf
)
self._segmentfile.opener = self.opener
self._segmentfile_sidedata.opener = self.opener
self._delayed = True
tr.addpending(b'cl-%i' % id(self), self._writepending)
tr.addfinalize(b'cl-%i' % id(self), self._finalize)
def _finalize(self, tr):
"""finalize index updates"""
self._delayed = False
self.opener = self._realopener
self._segmentfile.opener = self.opener
self._segmentfile_sidedata.opener = self.opener
# move redirected index data back into place
if self._docket is not None:
self._write_docket(tr)
elif self._divert:
assert not self._delaybuf
tmpname = self._indexfile + b".a"
nfile = self.opener.open(tmpname)
nfile.close()
self.opener.rename(tmpname, self._indexfile, checkambig=True)
elif self._delaybuf:
fp = self.opener(self._indexfile, b'a', checkambig=True)
fp.write(b"".join(self._delaybuf))
fp.close()
self._delaybuf = None
self._divert = False
# split when we're done
self._enforceinlinesize(tr)
def _writepending(self, tr):
"""create a file containing the unfinalized state for
pretxnchangegroup"""
if self._docket:
return self._docket.write(tr, pending=True)
if self._delaybuf:
# make a temporary copy of the index
fp1 = self._realopener(self._indexfile)
pendingfilename = self._indexfile + b".a"
# register as a temp file to ensure cleanup on failure
tr.registertmp(pendingfilename)
# write existing data
fp2 = self._realopener(pendingfilename, b"w")
fp2.write(fp1.read())
# add pending data
fp2.write(b"".join(self._delaybuf))
fp2.close()
# switch modes so finalize can simply rename
self._delaybuf = None
self._divert = True
self.opener = _divertopener(self._realopener, self._indexfile)
self._segmentfile.opener = self.opener
self._segmentfile_sidedata.opener = self.opener
if self._divert:
return True
return False
def _enforceinlinesize(self, tr):
if not self._delayed:
revlog.revlog._enforceinlinesize(self, tr)
def read(self, nodeorrev):
"""Obtain data from a parsed changelog revision.
Returns a 6-tuple of:
- manifest node in binary
- author/user as a localstr
- date as a 2-tuple of (time, timezone)
- list of files
- commit message as a localstr
- dict of extra metadata
Unless you need to access all fields, consider calling
``changelogrevision`` instead, as it is faster for partial object
access.
"""
d = self._revisiondata(nodeorrev)
sidedata = self.sidedata(nodeorrev)
copy_sd = self._copiesstorage == b'changeset-sidedata'
c = changelogrevision(self, d, sidedata, copy_sd)
return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
def changelogrevision(self, nodeorrev):
"""Obtain a ``changelogrevision`` for a node or revision."""
text = self._revisiondata(nodeorrev)
sidedata = self.sidedata(nodeorrev)
return changelogrevision(
self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
)
def readfiles(self, nodeorrev):
"""
short version of read that only returns the files modified by the cset
"""
text = self.revision(nodeorrev)
if not text:
return []
last = text.index(b"\n\n")
l = text[:last].split(b'\n')
return l[3:]
def add(
self,
manifest,
files,
desc,
transaction,
p1,
p2,
user,
date=None,
extra=None,
):
# Convert to UTF-8 encoded bytestrings as the very first
# thing: calling any method on a localstr object will turn it
# into a str object and the cached UTF-8 string is thus lost.
user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
user = user.strip()
# An empty username or a username with a "\n" will make the
# revision text contain two "\n\n" sequences -> corrupt
# repository since read cannot unpack the revision.
if not user:
raise error.StorageError(_(b"empty username"))
if b"\n" in user:
raise error.StorageError(
_(b"username %r contains a newline") % pycompat.bytestr(user)
)
desc = stripdesc(desc)
if date:
parseddate = b"%d %d" % dateutil.parsedate(date)
else:
parseddate = b"%d %d" % dateutil.makedate()
if extra:
branch = extra.get(b"branch")
if branch in (b"default", b""):
del extra[b"branch"]
elif branch in (b".", b"null", b"tip"):
raise error.StorageError(
_(b'the name \'%s\' is reserved') % branch
)
sortedfiles = sorted(files.touched)
flags = 0
sidedata = None
if self._copiesstorage == b'changeset-sidedata':
if files.has_copies_info:
flags |= flagutil.REVIDX_HASCOPIESINFO
sidedata = metadata.encode_files_sidedata(files)
if extra:
extra = encodeextra(extra)
parseddate = b"%s %s" % (parseddate, extra)
l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
text = b"\n".join(l)
rev = self.addrevision(
text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
)
return self.node(rev)
def branchinfo(self, rev):
"""return the branch name and open/close state of a revision
This function exists because creating a changectx object
just to access this is costly."""
return self.changelogrevision(rev).branchinfo
def _nodeduplicatecallback(self, transaction, rev):
# keep track of revisions that got "re-added", eg: unbunde of know rev.
#
# We track them in a list to preserve their order from the source bundle
duplicates = transaction.changes.setdefault(b'revduplicates', [])
duplicates.append(rev)
|
|
#!/usr/bin/env python2
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Erwan Velu <erwan.velu@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from SocketServer import BaseRequestHandler, ThreadingTCPServer
import ConfigParser
import socket
import struct
from health_messages import Health_Message as HM
import health_libs as HL
import health_protocol as HP
import logging
import os
import pprint
import sys
import threading
import time
import yaml
import math
import shutil
import getopt
socket_list = {}
lock_socket_list = threading.RLock()
hosts = {}
lock_host = threading.RLock()
hosts_state = {}
results_cpu = {}
results_memory = {}
results_network = {}
results_storage = {}
serv = 0
startup_date = ""
NOTHING_RUN = 0
CPU_RUN = 1 << 0
MEMORY_RUN = 1 << 1
STORAGE_RUN = 1 << 2
NETWORK_RUN = 1 << 3
SCHED_FAIR = "fair"
start_jitter = {}
stop_jitter = {}
running_jitter = False
average = lambda x: sum(x) * 1.0 / len(x)
variance = lambda x: map(lambda y: (y - average(x)) ** 2, x)
stdev = lambda x: math.sqrt(average(variance(x)))
def print_help():
print 'health-server help '
print
print '-h --help : Print this help'
print '-f <file> or --file <file> : Mandatory option to select the benchmark file'
print '-t <title> or --title <title> : Optinal option to define a title to this benchmark'
print ' This is useful to describe a temporary context'
def init_jitter():
global start_jitter
global stop_jitter
global running_jitter
start_jitter = {}
stop_jitter = {}
running_jitter = True
def disable_jitter():
global running_jitter
running_jitter = False
def start_time(host):
timestamp = time.time()
global start_jitter
if host not in start_jitter:
start_jitter[host] = [timestamp]
else:
start_jitter[host].append(timestamp)
def stop_time(host):
timestamp = time.time()
global stop_jitter
stop_jitter[host] = timestamp
class SocketHandler(BaseRequestHandler):
global hosts
global lock_host
timeout = 5
disable_nagle_algorithm = False # Set TCP_NODELAY socket option
def handle(self):
lock_socket_list.acquire()
socket_list[self.client_address] = self.request
lock_socket_list.release()
HP.logger.debug('Got connection from %s' % self.client_address[0])
while True:
msg = HP.recv_hm_message(socket_list[self.client_address])
if not msg:
continue
if msg.message != HM.ACK:
# If we do receive a STARTING message, let's record the starting time
# No need to continue processing the packet, we can wait the next one
if msg.action == HM.STARTING:
start_time(self.client_address)
continue
if msg.message == HM.DISCONNECT:
HP.logger.debug('Disconnecting from %s' %
self.client_address[0])
lock_host.acquire()
del hosts[self.client_address]
del hosts_state[self.client_address]
lock_host.release()
socket_list[self.client_address].close()
lock_socket_list.acquire()
del socket_list[self.client_address]
lock_socket_list.release()
return
else:
lock_host.acquire()
hosts[self.client_address] = msg
hosts_state[self.client_address] = NOTHING_RUN
lock_host.release()
if msg.message == HM.MODULE and msg.action == HM.COMPLETED:
if running_jitter is True:
stop_time(self.client_address)
if msg.module == HM.CPU:
cpu_completed(self.client_address, msg)
elif msg.module == HM.MEMORY:
memory_completed(self.client_address, msg)
elif msg.module == HM.NETWORK:
network_completed(self.client_address, msg)
elif msg.module == HM.STORAGE:
storage_completed(self.client_address, msg)
def createAndStartServer():
global serv
ThreadingTCPServer.allow_reuse_address = True
serv = ThreadingTCPServer(('', 20000), SocketHandler,
bind_and_activate=False)
l_onoff = 1
l_linger = 0
serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
serv.server_bind()
serv.server_activate()
HP.logger.info('Starting server')
serv.serve_forever() # blocking method
def cpu_completed(host, msg):
global hosts_state
global results_cpu
hosts_state[host] &= ~CPU_RUN
results_cpu[host] = msg.hw
def memory_completed(host, msg):
global hosts_state
global results_memory
hosts_state[host] &= ~MEMORY_RUN
results_memory[host] = msg.hw
def network_completed(host, msg):
global hosts_state
global results_network
hosts_state[host] &= ~NETWORK_RUN
results_network[host] = msg.hw
def storage_completed(host, msg):
global hosts_state
global results_storage
hosts_state[host] &= ~STORAGE_RUN
results_storage[host] = msg.hw
def get_host_list(item):
global hosts
global hosts_state
selected_hosts = {}
for host in hosts.keys():
if hosts_state[host] & item == item:
selected_hosts[host] = True
return selected_hosts
def compute_affinity(bench=[]):
affinity = {}
global hosts
def acceptable_host(host_list, host):
if (len(host_list) == 0):
return True
if host in host_list:
return True
return False
for host in hosts.keys():
hw = hosts[host].hw
system_id = HL.get_value(hw, "system", "product", "serial")
if len(bench) > 0:
if acceptable_host(bench['affinity-hosts'], system_id) is False:
continue
if system_id not in affinity.keys():
affinity[system_id] = [host]
else:
# If the system is already known, it means that several
# virtual machines are sharing the same Hypervisor
affinity[system_id].append(host)
return affinity
def get_fair_hosts_list(affinity_hosts_list, nb_hosts):
hosts_list = []
while (len(hosts_list) < nb_hosts):
for hypervisor in affinity_hosts_list.keys():
if (len(affinity_hosts_list[hypervisor]) == 0):
return hosts_list
hosts_list.append(affinity_hosts_list[hypervisor].pop())
if (len(hosts_list) == nb_hosts):
break
return hosts_list
def get_fair_hosts_list_per_hv(affinity_hosts_list, nb_hosts):
hosts_list = {}
for hypervisor in affinity_hosts_list.keys():
hosts_list[hypervisor] = []
selected_hosts = 0
while (selected_hosts < nb_hosts):
for hypervisor in affinity_hosts_list.keys():
if (len(affinity_hosts_list[hypervisor]) == 0):
return hosts_list
hosts_list[hypervisor].append(affinity_hosts_list[hypervisor].pop())
selected_hosts = selected_hosts + 1
if (selected_hosts == nb_hosts):
break
return hosts_list
def get_hosts_list_from_affinity(bench, sorted_list=False):
affinity_hosts_list = compute_affinity(bench)
hosts_list = []
if bench['affinity'] == SCHED_FAIR:
if sorted_list is False:
hosts_list = get_fair_hosts_list(affinity_hosts_list, bench['nb-hosts'])
else:
hosts_list = get_fair_hosts_list_per_hv(affinity_hosts_list, bench['nb-hosts'])
else:
HP.logger.error("Unsupported affinity : %s" % bench['affinity'])
return hosts_list
def dump_affinity(bench, bench_type):
HP.logger.debug("Using affinity %s on the following mapping :" % bench['affinity'])
host_affinity = compute_affinity(bench)
final_list = {}
if bench_type == HM.NETWORK:
return bench['hosts-list']
for hypervisor in host_affinity.keys():
for hostname in bench['hosts-list']:
if hostname in host_affinity[hypervisor]:
if hypervisor not in final_list.keys():
final_list[hypervisor] = [hostname]
else:
final_list[hypervisor].append(hostname)
return final_list
def start_cpu_bench(bench):
global hosts_state
nb_hosts = bench['nb-hosts']
msg = HM(HM.MODULE, HM.CPU, HM.START)
msg.cpu_instances = bench['cores']
msg.running_time = bench['runtime']
for host in bench['hosts-list']:
if nb_hosts == 0:
break
if host not in get_host_list(CPU_RUN).keys():
hosts_state[host] |= CPU_RUN
nb_hosts = nb_hosts - 1
lock_socket_list.acquire()
start_time(host)
HP.send_hm_message(socket_list[host], msg)
lock_socket_list.release()
def start_memory_bench(bench):
global hosts_state
nb_hosts = bench['nb-hosts']
msg = HM(HM.MODULE, HM.MEMORY, HM.START)
msg.cpu_instances = bench['cores']
msg.block_size = bench['block-size']
msg.running_time = bench['runtime']
msg.mode = bench['mode']
for host in bench['hosts-list']:
if nb_hosts == 0:
break
if host not in get_host_list(MEMORY_RUN).keys():
hosts_state[host] |= MEMORY_RUN
nb_hosts = nb_hosts - 1
lock_socket_list.acquire()
start_time(host)
HP.send_hm_message(socket_list[host], msg)
lock_socket_list.release()
def start_storage_bench(bench):
global hosts_state
nb_hosts = bench['nb-hosts']
msg = HM(HM.MODULE, HM.STORAGE, HM.START)
msg.block_size = bench['block-size']
msg.access = bench['access']
msg.running_time = bench['runtime']
msg.mode = bench['mode']
msg.device = bench['device']
msg.rampup_time = bench['rampup-time']
for host in bench['hosts-list']:
if nb_hosts == 0:
break
if host not in get_host_list(STORAGE_RUN).keys():
hosts_state[host] |= STORAGE_RUN
nb_hosts = nb_hosts - 1
lock_socket_list.acquire()
start_time(host)
HP.send_hm_message(socket_list[host], msg)
lock_socket_list.release()
def prepare_network_bench(bench, mode):
global hosts_state
nb_hosts = bench['nb-hosts']
msg = HM(HM.MODULE, HM.NETWORK, mode)
msg.network_test = bench['mode']
msg.network_connection = bench['connection']
msg.peer_servers = bench['ip-list'].items()
msg.ports_list = bench['port-list']
for hv in bench['hosts-list']:
for host in bench['hosts-list'][hv]:
if nb_hosts == 0:
break
if host not in get_host_list(NETWORK_RUN).keys():
hosts_state[host] |= NETWORK_RUN
nb_hosts = nb_hosts - 1
lock_socket_list.acquire()
msg.my_peer_name = bench['ip-list'][host]
HP.send_hm_message(socket_list[host], msg)
lock_socket_list.release()
string_mode = ""
if mode == HM.INIT:
string_mode = "Initialisation"
else:
string_mode = "Cleaning"
HP.logger.info("NETWORK: %s in progress" % string_mode)
max_timeout = 45
timeout = 0
while (get_host_list(NETWORK_RUN).keys()):
timeout = timeout + 1
time.sleep(1)
if timeout == max_timeout:
HP.logger.error("NETWORK: Failed to %s the following hosts : " % string_mode + str(get_host_list(NETWORK_RUN).keys()))
return False
return True
def start_network_bench(bench):
global hosts_state
nb_hosts = bench['nb-hosts']
msg = HM(HM.MODULE, HM.NETWORK, HM.START)
msg.block_size = bench['block-size']
msg.running_time = bench['runtime']
msg.network_test = bench['mode']
msg.network_connection = bench['connection']
msg.ports_list = bench['port-list']
bench['arity_groups'] = []
arity_group = []
used_hosts = []
ip_list = {}
while nb_hosts > 0:
for hv in bench['hosts-list']:
for host in bench['hosts-list'][hv]:
if nb_hosts == 0:
break
# We shall not use the same host twice
if host in used_hosts:
continue
used_hosts.append(host)
arity_group.append(host)
ip_list[host] = bench['ip-list'][host]
nb_hosts = nb_hosts - 1
if len(arity_group) == bench['arity']:
bench['arity_groups'].append(arity_group)
msg.peer_servers = ip_list.items()
for peer_server in arity_group:
if peer_server not in get_host_list(NETWORK_RUN).keys():
msg.my_peer_name = bench['ip-list'][peer_server]
hosts_state[peer_server] |= NETWORK_RUN
lock_socket_list.acquire()
start_time(peer_server)
HP.send_hm_message(socket_list[peer_server], msg)
lock_socket_list.release()
arity_group = []
ip_list = {}
# We shall break to switch to another hypervisor
break
if nb_hosts == 0:
return
def disconnect_clients():
global serv
global hosts
msg = HM(HM.DISCONNECT)
HP.logger.info("Asking %d hosts to disconnect" % len(hosts.keys()))
for host in hosts.keys():
lock_socket_list.acquire()
HP.send_hm_message(socket_list[host], msg)
lock_socket_list.release()
while(hosts.keys()):
time.sleep(1)
HP.logger.info("Still %d hosts connected" % len(hosts.keys()))
HP.logger.info("All hosts disconnected")
serv.shutdown()
serv.socket.close()
def save_hw(items, name, hwdir):
'Save hw items for inspection on the server.'
try:
filename = os.path.join(hwdir, name + '.hw')
pprint.pprint(items, stream=open(filename, 'w'))
except Exception, xcpt:
HP.logger.error("exception while saving hw file: %s" % str(xcpt))
def dump_hosts(log_dir):
global hosts
unique_hosts_list = []
for host in hosts.keys():
uuid = HL.get_value(hosts[host].hw, "system", "product", "serial")
if uuid not in unique_hosts_list:
unique_hosts_list.append(uuid)
pprint.pprint(unique_hosts_list, stream=open(log_dir+"/hosts", 'w'))
pprint.pprint(compute_affinity(), stream=open(log_dir+"/affinity", 'w'))
def prepare_metrics(log_dir, bench, bench_type):
dest_dir = log_dir + '/%d/' % bench['nb-hosts']
if bench_type == HM.CPU:
dest_dir = dest_dir + "/cpu-" + bench['name']
elif bench_type == HM.MEMORY:
dest_dir = dest_dir + "/memory-" + bench['name']
elif bench_type == HM.NETWORK:
dest_dir = dest_dir + "/network-" + bench['name']
elif bench_type == HM.STORAGE:
dest_dir = dest_dir + "/storage-" + bench['name']
else:
HL.fatal_error("Unknown benchmark type in prepare_metrics")
try:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
except OSError, e:
HL.fatal_error("Cannot create %s directory (%s)" % (dest_dir, e.errno))
output = {}
output['bench'] = bench
output['affinity'] = dump_affinity(bench, bench_type)
pprint.pprint(output, stream=open(dest_dir+"/metrics", 'w'))
return dest_dir
def compute_metrics(dest_dir, bench, bench_type):
if bench_type == HM.CPU:
results = results_cpu
elif bench_type == HM.MEMORY:
results = results_memory
elif bench_type == HM.NETWORK:
results = results_network
elif bench_type == HM.STORAGE:
results = results_storage
else:
HL.fatal_error("Unknown benchmark type in compute_metrics")
delta_start_jitter = {}
duration = {}
real_start = {}
for host in results.keys():
# Checking jitter settings
if host not in start_jitter:
HP.logger.error("Host %s should have a jitter value !" % host)
else:
if len(start_jitter[host]) < 2:
HP.logger.error("Not enough start jitter information for host %s" % host)
else:
real_start[host] = start_jitter[host][1]
delta_start_jitter[host] = (start_jitter[host][1] - start_jitter[host][0])
duration[host] = (stop_jitter[host] - start_jitter[host][1])
if (float(duration[host]) > float(bench['runtime'] + 1)):
HP.logger.error("Host %s took too much time : %.2f while expecting %d" % (host, duration[host], bench['runtime']))
HP.logger.debug("Dumping result from host %s" % str(host))
filename_and_macs = HL.generate_filename_and_macs(results[host])
save_hw(results[host], filename_and_macs['sysname'], dest_dir)
output = {}
output['bench'] = bench
output['hosts'] = results.keys()
output['affinity'] = dump_affinity(bench, bench_type)
output['start_time'] = real_start
output['start_lag'] = delta_start_jitter
output['duration'] = duration
pprint.pprint(output, stream=open(dest_dir+"/metrics", 'w'))
def get_default_value(job, item, default_value):
return job.get(item, default_value)
def prepare_log_dir(name):
config = ConfigParser.ConfigParser()
config.read('/etc/edeploy.conf')
def config_get(section, name, default):
'Secured config getter.'
try:
return config.get(section, name)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
cfg_dir = os.path.normpath(config_get('SERVER', 'HEALTHDIR', '')) + '/'
dirname = startup_date
dest_dir = cfg_dir + 'dahc/%s/' % name + dirname
try:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
except OSError, e:
HL.fatal_error("Cannot create %s directory (%s)" % (dest_dir, e.errno))
HP.logger.info("Results will be stored in %s" % dest_dir)
return dest_dir
def compute_nb_hosts_series(bench):
nb_hosts_series = []
# Insure that min_hosts is always part of the serie
nb_hosts_series.append(bench['min_hosts'])
# Using the modulo to get the number of interations we have
for modulo in xrange(1, divmod(bench['max_hosts'], bench['step-hosts'])[0]+1):
nb_hosts = modulo * bench['step-hosts']
# Don't save hosts that are below min_hosts
if nb_hosts > bench['min_hosts']:
nb_hosts_series.append(nb_hosts)
# Insure that the max_hosts is always part of the serie
if bench['max_hosts'] not in nb_hosts_series:
nb_hosts_series.append(bench['max_hosts'])
return nb_hosts_series
def parse_job_config(bench, job, component, log_dir):
bench['component'] = component
bench['step-hosts'] = get_default_value(job, 'step-hosts', 1)
bench['name'] = get_default_value(job, 'name', '')
bench['affinity'] = get_default_value(job, 'affinity', SCHED_FAIR)
bench['runtime'] = get_default_value(job, 'runtime', bench['runtime'])
affinity_list = get_default_value(job, 'affinity-hosts', '')
affinity_hosts = []
if affinity_list:
for manual_host in affinity_list.split(","):
affinity_hosts.append(manual_host.strip())
bench['affinity-hosts'] = affinity_hosts
if len(bench['affinity-hosts']) > 0:
if len(bench['affinity-hosts']) != len(compute_affinity(bench)):
HP.logger.error("ERROR: Available hypervisors is different than affinity-hosts")
HP.logger.error("ERROR: %d hypervisors while we expect %d" % (len(compute_affinity(bench)), len(bench['affinity-hosts'])))
HP.logger.error("ERROR: Please check %s/affinity to see detected hypervisors" % log_dir)
return False
required_hosts = get_default_value(job, 'required-hosts',
bench['required-hosts'])
if "-" in str(required_hosts):
min_hosts = int(str(required_hosts).split("-")[0])
max_hosts = int(str(required_hosts).split("-")[1])
else:
min_hosts = required_hosts
max_hosts = min_hosts
if max_hosts < 1:
max_hosts = min_hosts
HP.logger.error("ERROR: required-hosts shall be greater than"
" 0, defaulting to global required-hosts=%d"
% max_hosts)
return False
if max_hosts > bench['required-hosts']:
HP.logger.error("ERROR: The maximum number of hosts to tests"
" is greater than the amount of available"
" hosts.")
return False
bench['min_hosts'] = min_hosts
bench['max_hosts'] = max_hosts
return True
def select_vms_from_networks(bench):
port_add = 0
port_list = {}
hosts_selected_ip = {}
for hv in bench['hosts-list']:
for host in bench['hosts-list'][hv]:
ipv4_list = HL.get_multiple_values(hosts[host].hw, "network", "*", "ipv4")
match_network = False
# Let's check if one of the IP of a host match at least one network
# If so, let's save the resulting IP
for ip in ipv4_list:
for network in bench['network-hosts'].split(','):
if HL.is_in_network(ip, network.strip()):
hosts_selected_ip[host] = ip
port_list[host] = HM.port_base + port_add
port_add += 1
match_network = True
# If the host is not part of the network we look at
# Let's remove it from the possible host list
if match_network is False:
bench['hosts-list'][hv].remove(host)
bench['port-list'] = port_list
bench['ip-list'] = hosts_selected_ip
def do_network_job(bench_all, current_job, log_dir, total_runtime):
bench = dict(bench_all)
# In the network bench, step-hosts shall be modulo 2
bench['step-hosts'] = get_default_value(current_job, 'step-hosts', 2)
bench['arity'] = get_default_value(current_job, 'arity', 2)
if parse_job_config(bench, current_job, HM.NETWORK, log_dir) is True:
# Only consider to watch step-hosts vs arity if we have some rampup
if (int(bench['min_hosts']) != int(bench['max_hosts'])):
if ((int(bench['step-hosts']) % int(bench['arity'])) != 0):
HP.logger.error("NETWORK: step-hosts shall be modulo arity (%d)" % int(bench['arity']))
HP.logger.error("NETWORK: Canceling Test")
return False
if ((int(bench['min_hosts']) % int(bench['arity'])) != 0) or ((int(bench['max_hosts']) % int(bench['arity'])) != 0):
HP.logger.error("NETWORK: min and max-hosts shall be modulo arity %d" % int(bench['arity']))
HP.logger.error("NETWORK: Canceling Test")
return False
nb_loops = 0
hosts_series = compute_nb_hosts_series(bench)
for nb_hosts in hosts_series:
nb_loops = nb_loops + 1
iter_bench = dict(bench)
iter_bench['cores'] = get_default_value(current_job, 'cores', 1)
iter_bench['block-size'] = get_default_value(current_job, 'block-size', "0")
iter_bench['mode'] = get_default_value(current_job, 'mode', HM.BANDWIDTH)
iter_bench['network-hosts'] = get_default_value(current_job, 'network-hosts', "0.0.0.0/0")
iter_bench['connection'] = get_default_value(current_job, 'connection', HM.TCP)
iter_bench['nb-hosts'] = nb_hosts
total_runtime += iter_bench['runtime']
iter_bench['hosts-list'] = get_hosts_list_from_affinity(iter_bench, True)
unsorted_list = get_hosts_list_from_affinity(iter_bench)
if (len(unsorted_list) < iter_bench['nb-hosts']):
HP.logger.error("NETWORK: %d hosts expected while affinity only provides %d hosts available" % (iter_bench['nb-hosts'], len(unsorted_list)))
HP.logger.error("NETWORK: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
select_vms_from_networks(iter_bench)
if (len(iter_bench['ip-list']) < iter_bench['nb-hosts']):
HP.logger.error("NETWORK: %d hosts expected while ip-based filtering only provides %d hosts available" % (iter_bench['nb-hosts'], len(iter_bench['ip-list'])))
HP.logger.error("NETWORK: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
if (nb_hosts % iter_bench['arity'] != 0):
HP.logger.error("NETWORK: It's impossible to get an arity=%d with %d hosts" % (iter_bench['arity'], len(iter_bench['nb-hosts'])))
HP.logger.error("NETWORK: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
metrics_log_dir = prepare_metrics(log_dir, iter_bench, HM.NETWORK)
if prepare_network_bench(iter_bench, HM.INIT) is False:
HP.logger.error("NETWORK: Unable to complete initialisation")
HP.logger.error("NETWORK: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
prepare_network_bench(iter_bench, HM.CLEAN)
continue
if iter_bench['block-size'] != "0":
HP.logger.info("NETWORK: Waiting %s bench @%s %d / %d"
" to finish on %d hosts (step = %d): should take"
" %d seconds" % (iter_bench['mode'], iter_bench['block-size'], nb_loops, len(hosts_series),
iter_bench['nb-hosts'], iter_bench['step-hosts'],
iter_bench['runtime']))
else:
HP.logger.info("NETWORK: Waiting %s bench %d / %d"
" to finish on %d hosts (step = %d): should take"
" %d seconds" % (iter_bench['mode'], nb_loops, len(hosts_series),
iter_bench['nb-hosts'], iter_bench['step-hosts'],
iter_bench['runtime']))
init_jitter()
start_network_bench(iter_bench)
time.sleep(bench['runtime'])
while (get_host_list(NETWORK_RUN).keys()):
time.sleep(1)
disable_jitter()
compute_metrics(metrics_log_dir, iter_bench, HM.NETWORK)
prepare_network_bench(iter_bench, HM.CLEAN)
else:
HP.logger.error("NETWORK: Canceling Test")
def do_storage_job(bench_all, current_job, log_dir, total_runtime):
bench = dict(bench_all)
if parse_job_config(bench, current_job, HM.STORAGE, log_dir) is True:
nb_loops = 0
hosts_series = compute_nb_hosts_series(bench)
for nb_hosts in hosts_series:
nb_loops = nb_loops + 1
iter_bench = dict(bench)
iter_bench['cores'] = get_default_value(current_job, 'cores', 1)
iter_bench['block-size'] = get_default_value(current_job, 'block-size', "4k")
iter_bench['mode'] = get_default_value(current_job, 'mode', HM.RANDOM)
iter_bench['access'] = get_default_value(current_job, 'access', HM.READ)
iter_bench['device'] = get_default_value(current_job, 'device', "vda")
iter_bench['rampup-time'] = get_default_value(current_job, 'rampup-time', "5")
iter_bench['nb-hosts'] = nb_hosts
total_runtime += iter_bench['runtime']
iter_bench['hosts-list'] = get_hosts_list_from_affinity(iter_bench)
if (iter_bench['rampup-time'] > iter_bench['runtime']):
HP.logger.error("STORAGE: Rampup time (%s) is bigger than runtime (%s" %
(iter_bench['rampup-time'], iter_bench['runtime']))
HP.logger.error("STORAGE: Canceling Test")
return
if (len(iter_bench['hosts-list']) < iter_bench['nb-hosts']):
HP.logger.error("STORAGE: %d hosts expected while affinity only provides %d hosts available" % (iter_bench['nb-hosts'], len(iter_bench['hosts-list'])))
HP.logger.error("STORAGE: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
HP.logger.info("STORAGE: Waiting %s %s bench %s@%s %d / %d"
" to finish on %d hosts (step = %d): should take"
" %d seconds" % (iter_bench['mode'], iter_bench['access'], iter_bench['device'],
iter_bench['block-size'], nb_loops, len(hosts_series),
iter_bench['nb-hosts'], iter_bench['step-hosts'],
iter_bench['runtime']))
metrics_log_dir = prepare_metrics(log_dir, iter_bench, HM.STORAGE)
init_jitter()
start_storage_bench(iter_bench)
time.sleep(bench['runtime'])
while (get_host_list(STORAGE_RUN).keys()):
time.sleep(1)
disable_jitter()
compute_metrics(metrics_log_dir, iter_bench, HM.STORAGE)
else:
HP.logger.error("STORAGE: Canceling Test")
def do_memory_job(bench_all, current_job, log_dir, total_runtime):
bench = dict(bench_all)
if parse_job_config(bench, current_job, HM.MEMORY, log_dir) is True:
nb_loops = 0
hosts_series = compute_nb_hosts_series(bench)
for nb_hosts in hosts_series:
nb_loops = nb_loops + 1
iter_bench = dict(bench)
iter_bench['cores'] = get_default_value(current_job, 'cores', 1)
iter_bench['block-size'] = get_default_value(current_job, 'block-size', "128M")
iter_bench['mode'] = get_default_value(current_job, 'mode', HM.FORKED)
iter_bench['nb-hosts'] = nb_hosts
total_runtime += iter_bench['runtime']
iter_bench['hosts-list'] = get_hosts_list_from_affinity(iter_bench)
if (len(iter_bench['hosts-list']) < iter_bench['nb-hosts']):
HP.logger.error("MEMORY: %d hosts expected while affinity only provides %d hosts available" % (iter_bench['nb-hosts'], len(iter_bench['hosts-list'])))
HP.logger.error("MEMORY: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
HP.logger.info("MEMORY: Waiting bench @%s %d / %d"
" to finish on %d hosts (step = %d): should take"
" %d seconds" % (iter_bench['block-size'], nb_loops, len(hosts_series),
iter_bench['nb-hosts'], iter_bench['step-hosts'],
iter_bench['runtime']))
metrics_log_dir = prepare_metrics(log_dir, iter_bench, HM.MEMORY)
init_jitter()
start_memory_bench(iter_bench)
time.sleep(bench['runtime'])
while (get_host_list(MEMORY_RUN).keys()):
time.sleep(1)
disable_jitter()
compute_metrics(metrics_log_dir, iter_bench, HM.MEMORY)
else:
HP.logger.error("MEMORY: Canceling Test")
def do_cpu_job(bench_all, current_job, log_dir, total_runtime):
bench = dict(bench_all)
if parse_job_config(bench, current_job, HM.CPU, log_dir) is True:
nb_loops = 0
hosts_series = compute_nb_hosts_series(bench)
for nb_hosts in hosts_series:
nb_loops = nb_loops + 1
iter_bench = dict(bench)
iter_bench['cores'] = get_default_value(current_job, 'cores', 1)
iter_bench['nb-hosts'] = nb_hosts
total_runtime += iter_bench['runtime']
iter_bench['hosts-list'] = get_hosts_list_from_affinity(iter_bench)
if (len(iter_bench['hosts-list']) < iter_bench['nb-hosts']):
HP.logger.error("CPU: %d hosts expected while affinity only provides %d hosts available" % (iter_bench['nb-hosts'], len(iter_bench['hosts-list'])))
HP.logger.error("CPU: Canceling test %d / %d" % ((iter_bench['nb-hosts'], iter_bench['max_hosts'])))
continue
HP.logger.info("CPU: Waiting bench %d / %d"
" to finish on %d hosts (step = %d): should take"
" %d seconds" % (nb_loops, len(hosts_series),
iter_bench['nb-hosts'], iter_bench['step-hosts'],
iter_bench['runtime']))
metrics_log_dir = prepare_metrics(log_dir, iter_bench, HM.CPU)
init_jitter()
start_cpu_bench(iter_bench)
time.sleep(bench['runtime'])
while (get_host_list(CPU_RUN).keys()):
time.sleep(1)
disable_jitter()
compute_metrics(metrics_log_dir, iter_bench, HM.CPU)
else:
HP.logger.error("CPU: Canceling Test")
def non_interactive_mode(filename, title):
global hosts
total_runtime = 0
name = "undefined"
bench_all = {}
bench_all['title'] = title
job = yaml.load(file(filename, 'r'))
if job['name'] is None:
HP.logger.error("Missing name parameter in yaml file")
disconnect_clients()
return
else:
name = job['name']
if job['required-hosts'] is None:
HP.logger.error("Missing required-hosts parameter in yaml file")
disconnect_clients()
return
bench_all['required-hosts'] = int(job['required-hosts'])
if bench_all['required-hosts'] < 1:
HP.logger.error("required-hosts shall be greater than 0")
disconnect_clients()
return
bench_all['runtime'] = get_default_value(job, 'runtime', 10)
bench_all['required-hypervisors'] = get_default_value(job, 'required-hypervisors', 0)
log_dir = prepare_log_dir(name)
# Saving original yaml file
shutil.copy2(filename, log_dir)
if (int(bench_all['required-hypervisors']) > 0):
HP.logger.info("Expecting %d hosts on %d hypervisors to start job %s" %
(bench_all['required-hosts'], int(bench_all['required-hypervisors']),
name))
else:
HP.logger.info("Expecting %d hosts to start job %s" %
(bench_all['required-hosts'], name))
hosts_count = len(hosts.keys())
previous_hosts_count = hosts_count
while (int(hosts_count) < bench_all['required-hosts']):
if (hosts_count != previous_hosts_count):
HP.logger.info("Still %d hosts to connect" % (bench_all['required-hosts'] - int(hosts_count)))
previous_hosts_count = hosts_count
dump_hosts(log_dir)
hosts_count = len(hosts.keys())
time.sleep(1)
dump_hosts(log_dir)
if len(compute_affinity()) < int(bench_all['required-hypervisors']):
HP.logger.error("%d hypervisors expected but only %d found" % (bench_all['required-hypervisors'], len(compute_affinity())))
HP.logger.error("Please adjust 'required-hypervisors' option")
HP.logger.error("Exiting")
disconnect_clients()
return
HP.logger.info("Starting %s" % name)
for next_job in job['jobs']:
HP.logger.info("Starting job %s" % next_job)
global results_network
global results_cpu
global results_memory
global results_storage
results_network = {}
results_cpu = {}
results_memory = {}
results_storage = {}
current_job = job['jobs'][next_job]
current_job['name'] = next_job
if 'component' not in current_job.keys():
HP.logger.error("Missing component in job %s, canceling job" % current_job['name'])
continue
if "cpu" in current_job['component']:
do_cpu_job(bench_all, current_job, log_dir, total_runtime)
if "memory" in current_job['component']:
do_memory_job(bench_all, current_job, log_dir, total_runtime)
if "network" in current_job['component']:
do_network_job(bench_all, current_job, log_dir, total_runtime)
if "storage" in current_job['component']:
do_storage_job(bench_all, current_job, log_dir, total_runtime)
HP.logger.info("End of %s" % name)
HP.logger.info("Results are available here : %s" % log_dir)
disconnect_clients()
if __name__ == '__main__':
HP.start_log('/var/log/health-server.log', logging.INFO)
input_file = ""
title = ""
startup_date = time.strftime("%Y_%m_%d-%Hh%M", time.localtime())
try:
opts, args = getopt.getopt(sys.argv[1:], "hf:t:", ['file', 'title'])
except getopt.GetoptError:
print "Error: One of the options passed to the cmdline was not supported"
print "Please fix your command line or read the help (-h option)"
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit(0)
elif opt in ("-f", "--file"):
input_file = arg
elif opt in ("-t", "--title"):
title = arg
if not input_file:
HP.logger.error("You must provide a yaml file as argument")
sys.exit(1)
if not title:
title = startup_date
HP.logger.info("No title provided, setup a default one to %s" % title)
myThread = threading.Thread(target=createAndStartServer)
myThread.start()
non_interactive = threading.Thread(target=non_interactive_mode,
args=tuple([input_file, title]))
non_interactive.start()
|
|
#!/usr/bin/python3
# Halide tutorial lesson 13: Tuples
# This lesson describes how to write Funcs that evaluate to multiple
# values.
# On linux, you can compile and run it like so:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_13 -std=c++11
# LD_LIBRARY_PATH=../bin ./lesson_13
# On os x:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -o lesson_13 -std=c++11
# DYLD_LIBRARY_PATH=../bin ./lesson_13
# If you have the entire Halide source tree, you can also build it by
# running:
# make tutorial_lesson_13_tuples
# in a shell with the current directory at the top of the halide
# source tree.
#include "Halide.h"
#include <stdio.h>
#include <algorithm>
#using namespace Halide
from halide import *
import numpy
import math
min_, max_ = __builtins__.min, __builtins__.max
def main():
# So far Funcs (such as the one below) have evaluated to a single
# scalar value for each point in their domain.
single_valued = Func()
x, y = Var("x"), Var("y")
single_valued[x, y] = x + y
# One way to write a Func that returns a collection of values is
# to add an additional dimension which indexes that
# collection. This is how we typically deal with color. For
# example, the Func below represents a collection of three values
# for every x, y coordinate indexed by c.
color_image = Func()
c = Var("c")
color_image[x, y, c] = select(c == 0, 245, # Red value
c == 1, 42, # Green value
132) # Blue value
# This method is often convenient because it makes it easy to
# operate on this Func in a way that treats each item in the
# collection equally:
brighter = Func()
brighter[x, y, c] = color_image[x, y, c] + 10
# However this method is also inconvenient for three reasons.
#
# 1) Funcs are defined over an infinite domain, so users of this
# Func can for example access color_image(x, y, -17), which is
# not a meaningful value and is probably indicative of a bug.
#
# 2) It requires a select, which can impact performance if not
# bounded and unrolled:
# brighter.bound(c, 0, 3).unroll(c)
#
# 3) With this method, all values in the collection must have the
# same type. While the above two issues are merely inconvenient,
# this one is a hard limitation that makes it impossible to
# express certain things in this way.
# It is also possible to represent a collection of values as a
# collection of Funcs:
func_array = [Func() for i in range(3)]
func_array[0][x, y] = x + y
func_array[1][x, y] = sin(x)
func_array[2][x, y] = cos(y)
# This method avoids the three problems above, but introduces a
# new annoyance. Because these are separate Funcs, it is
# difficult to schedule them so that they are all computed
# together inside a single loop over x, y.
# A third alternative is to define a Func as evaluating to a
# Tuple instead of an Expr. A Tuple is a fixed-size collection of
# Exprs which may have different type. The following function
# evaluates to an integer value (x+y), and a floating point value
# (sin(x*y)).
multi_valued = Func("multi_valued")
multi_valued[x, y] = Tuple(x + y, sin(x * y))
# Realizing a tuple-valued Func returns a collection of
# Buffers. We call this a Realization. It's equivalent to a
# std::vector of Buffer/Image objects:
if True:
r = multi_valued.realize(80, 60)
assert r.size() == 2
im1 = Image(Int(32), r[0])
im2 = Image(Float(32), r[1])
assert type(im1) is Image_int32
assert type(im2) is Image_float32
assert im1(30, 40) == 30 + 40
assert numpy.isclose(im2(30, 40), math.sin(30 * 40))
# All Tuple elements are evaluated together over the same domain
# in the same loop nest, but stored in distinct allocations. The
# equivalent C++ code to the above is:
if True:
multi_valued_0 = numpy.empty((80*60), dtype=numpy.int32)
multi_valued_1 = numpy.empty((80*60), dtype=numpy.int32)
for yy in range(80):
for xx in range(60):
multi_valued_0[xx + 60*yy] = xx + yy
multi_valued_1[xx + 60*yy] = math.sin(xx*yy)
# When compiling ahead-of-time, a Tuple-valued Func evaluates
# into multiple distinct output buffer_t structs. These appear in
# order at the end of the function signature:
# int multi_valued(...input buffers and params..., buffer_t *output_1, buffer_t *output_2)
# You can construct a Tuple by passing multiple Exprs to the
# Tuple constructor as we did above. Perhaps more elegantly, you
# can also take advantage of C++11 initializer lists and just
# enclose your Exprs in braces:
multi_valued_2 = Func("multi_valued_2")
#multi_valued_2(x, y) = {x + y, sin(x*y)}
multi_valued_2[x, y] = Tuple(x + y, sin(x * y))
# Calls to a multi-valued Func cannot be treated as Exprs. The
# following is a syntax error:
# Func consumer
# consumer[x, y] = multi_valued_2[x, y] + 10
# Instead you must index a Tuple with square brackets to retrieve
# the individual Exprs:
integer_part = multi_valued_2[x, y][0]
floating_part = multi_valued_2[x, y][1]
assert type(integer_part) is Expr
assert type(floating_part) is Expr
consumer = Func()
consumer[x, y] = Tuple(integer_part + 10, floating_part + 10.0)
# Tuple reductions.
if True:
# Tuples are particularly useful in reductions, as they allow
# the reduction to maintain complex state as it walks along
# its domain. The simplest example is an argmax.
# First we create an Image to take the argmax over.
input_func = Func()
input_func[x] = sin(x)
input = Image(Float(32), input_func.realize(100))
assert type(input) is Image_float32
# Then we defined a 2-valued Tuple which tracks the maximum value
# its index.
arg_max = Func()
# Pure definition.
#arg_max() = Tuple(0, input(0))
# (using [None] is a convention of this python interface)
arg_max[None] = Tuple(0, input(0))
# Update definition.
r = RDom(1, 99)
old_index = arg_max[None][0]
old_max = arg_max[None][1]
new_index = select(old_max > input[r], r, old_index)
new_max = max(input[r], old_max)
arg_max[None] = Tuple(new_index, new_max)
# The equivalent C++ is:
arg_max_0 = 0
arg_max_1 = float(input(0))
for r in range(1, 100):
old_index = arg_max_0
old_max = arg_max_1
new_index = r if (old_max > input(r)) else old_index
new_max = max_(input(r), old_max)
# In a tuple update definition, all loads and computation
# are done before any stores, so that all Tuple elements
# are updated atomically with respect to recursive calls
# to the same Func.
arg_max_0 = new_index
arg_max_1 = new_max
# Let's verify that the Halide and C++ found the same maximum
# value and index.
if True:
r = arg_max.realize()
r0 = Image(Int(32), r[0])
r1 = Image(Float(32), r[1])
assert type(r0) is Image_int32
assert type(r1) is Image_float32
assert arg_max_0 == r0(0)
assert numpy.isclose(arg_max_1, r1(0))
# Halide provides argmax and argmin as built-in reductions
# similar to sum, product, maximum, and minimum. They return
# a Tuple consisting of the point in the reduction domain
# corresponding to that value, and the value itself. In the
# case of ties they return the first value found. We'll use
# one of these in the following section.
# Tuples for user-defined types.
if True:
# Tuples can also be a convenient way to represent compound
# objects such as complex numbers. Defining an object that
# can be converted to and from a Tuple is one way to extend
# Halide's type system with user-defined types.
class Complex:
#Expr real, imag
# Construct from a Tuple
#Complex(Tuple t) : real(t[0]), imag(t[1])
def __init__(self, r, i=None):
if type(r) is Tuple:
t = r
self.real = t[0]
self.imag = t[1]
elif type(r) is float and type(i) is float:
self.real = Expr(r)
self.imag = Expr(i)
elif i is not None:
self.real = r
self.imag = i
else:
tt = Tuple(r)
self.real = tt[0]
self.imag = tt[1]
assert type(self.real) in [Expr, FuncRefExpr]
assert type(self.imag) in [Expr, FuncRefExpr]
return
def as_tuple(self):
"Convert to a Tuple"
return Tuple(self.real, self.imag)
def __add__(self, other):
"Complex addition"
return Tuple(self.real + other.real, self.imag + other.imag)
def __mul__(self, other):
"Complex multiplication"
return Tuple(self.real * other.real - self.imag * other.imag,
self.real * other.imag + self.imag * other.real)
def magnitude(self):
"Complex magnitude"
return (self.real * self.real) + (self.imag * self.imag)
# Other complex operators would go here. The above are
# sufficient for this example.
# Let's use the Complex struct to compute a Mandelbrot set.
mandelbrot = Func()
# The initial complex value corresponding to an x, y coordinate
# in our Func.
initial = Complex(x/15.0 - 2.5, y/6.0 - 2.0)
# Pure definition.
t = Var("t")
mandelbrot[x, y, t] = Complex(0.0, 0.0).as_tuple()
# We'll use an update definition to take 12 steps.
r=RDom(1, 12)
current = Complex(mandelbrot[x, y, r-1])
# The following line uses the complex multiplication and
# addition we defined above.
mandelbrot[x, y, r] = (Complex(current*current) + initial)
# We'll use another tuple reduction to compute the iteration
# number where the value first escapes a circle of radius 4.
# This can be expressed as an argmin of a boolean - we want
# the index of the first time the given boolean expression is
# false (we consider false to be less than true). The argmax
# would return the index of the first time the expression is
# true.
escape_condition = Complex(mandelbrot[x, y, r]).magnitude() < 16.0
first_escape = argmin(escape_condition)
assert type(first_escape) is Tuple
# We only want the index, not the value, but argmin returns
# both, so we'll index the argmin Tuple expression using
# square brackets to get the Expr representing the index.
escape = Func()
escape[x, y] = first_escape[0]
# Realize the pipeline and print the result as ascii art.
result = Image(Int(32), escape.realize(61, 25))
assert type(result) is Image_int32
code = " .:-~*={&%#@"
for yy in range(result.height()):
for xx in range(result.width()):
index = result(xx, yy)
if index < len(code):
print("%c" % code[index], end="")
else:
pass # is lesson 13 cpp version buggy ?
print("\n")
print("Success!")
return 0
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS',
'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL',
'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE',
'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code')
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
(r'', Text, 'root')]
}
|
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from datetime import datetime
import dateutil
from dateutil.tz import tzutc
from datetime import datetime
import stix
import stix.bindings.campaign as campaign_binding
from stix.common import (Activity, Confidence, InformationSource, Statement,
StructuredText, VocabString)
from stix.common.related import (GenericRelationshipList, RelatedCampaign,
RelatedIncident, RelatedIndicator, RelatedPackageRefs,
RelatedThreatActor, RelatedTTP)
from stix.data_marking import Marking
import stix.utils
from stix.utils import dates
from stix.common.vocabs import CampaignStatus, IntendedEffect
class AssociatedCampaigns(GenericRelationshipList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.AssociatedCampaignsType
_binding_var = "Associated_Campaign"
_contained_type = RelatedCampaign
_inner_name = "campaigns"
class Attribution(GenericRelationshipList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.AttributionType
_binding_var = "Attributed_Threat_Actor"
_contained_type = RelatedThreatActor
_inner_name = "threat_actors"
class AttributionList(stix.EntityList):
# NOT AN ACTUAL STIX CLASS. DO NOT CALL `.to_obj()`
# ON THIS DIRECTLY! THIS IS BEING USED FOR CASTING
# PURPOSES ONLY.
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = None
_binding_class = None
_binding_var = None
_contained_type = Attribution
_inner_name = None
def _fix_value(self, value):
try:
new_value = self._contained_type(None, value)
except:
raise ValueError("Can't put '%s' (%s) into a %s" %
(value, type(value), self.__class__))
return new_value
class RelatedIncidents(GenericRelationshipList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.RelatedIncidentsType
_binding_var = "Related_Incident"
_contained_type = RelatedIncident
_inner_name = "incidents"
class RelatedIndicators(GenericRelationshipList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.RelatedIndicatorsType
_binding_var = "Related_Indicator"
_contained_type = RelatedIndicator
_inner_name = "indicators"
class RelatedTTPs(GenericRelationshipList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.RelatedTTPsType
_binding_var = "Related_TTP"
_contained_type = RelatedTTP
_inner_name = "ttps"
class Names(stix.EntityList):
_namespace = "http://stix.mitre.org/Campaign-1"
_binding = campaign_binding
_binding_class = campaign_binding.NamesType
_binding_var = "Name"
_contained_type = VocabString
_inner_name = "names"
class Campaign(stix.Entity):
_binding = campaign_binding
_binding_class = _binding.CampaignType
_namespace = "http://stix.mitre.org/Campaign-1"
_version = "1.1.1"
def __init__(self, id_=None, idref=None, timestamp=None, title=None, description=None, short_description=None):
self.id_ = id_ or stix.utils.create_id("Campaign")
self.idref = idref
self.version = None # self._version
self.title = title
self.description = description
self.short_description = short_description
self.names = None
self.intended_effects = None
self.status = None
self.related_ttps = RelatedTTPs()
self.related_incidents = RelatedIncidents()
self.related_indicators = RelatedIndicators()
self.attribution = AttributionList()
self.associated_campaigns = AssociatedCampaigns()
self.confidence = None
self.activity = []
self.information_source = None
self.handling = None
self.related_packages = RelatedPackageRefs()
if timestamp:
self.timestamp = timestamp
else:
self.timestamp = datetime.now(tzutc()) if not idref else None
@property
def id_(self):
return self._id
@id_.setter
def id_(self, value):
if not value:
self._id = None
else:
self._id = value
self.idref = None
@property
def version(self):
return self._version
@version.setter
def version(self, value):
if not value:
self._version = None
else:
if value != Campaign._version:
self._version = value
else:
self._version = None
@property
def idref(self):
return self._idref
@idref.setter
def idref(self, value):
if not value:
self._idref = None
else:
self._idref = value
self.id_ = None # unset id_ if idref is present
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, value):
self._timestamp = dates.parse_value(value)
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
if value:
if isinstance(value, StructuredText):
self._description = value
else:
self._description = StructuredText(value=value)
else:
self._description = None
@property
def short_description(self):
return self._short_description
@short_description.setter
def short_description(self, value):
if value:
if isinstance(value, StructuredText):
self._short_description = value
else:
self._short_description = StructuredText(value=value)
else:
self._short_description = None
@property
def intended_effects(self):
return self._intended_effects
@intended_effects.setter
def intended_effects(self, value):
self._intended_effects = []
if not value:
return
elif isinstance(value, list):
for v in value:
self.add_intended_effect(v)
else:
self.add_intended_effect(value)
def add_intended_effect(self, value):
if not value:
return
elif isinstance(value, Statement):
self.intended_effects.append(value)
else:
intended_effect = IntendedEffect(value)
self.intended_effects.append(Statement(value=intended_effect))
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if not value:
self._status = None
elif isinstance(value, VocabString):
self._status = value
else:
self._status = CampaignStatus(value)
@property
def attribution(self):
return self._attribution
@attribution.setter
def attribution(self, value):
self._attribution = AttributionList()
if not value:
return
elif isinstance(value, AttributionList):
self._attribution = value
elif hasattr(value, '__getitem__'):
self._attribution = AttributionList(*value)
else:
self._attribution.append(value) # may raise a ValueError
def to_obj(self, return_obj=None, ns_info=None):
super(Campaign, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = self._binding_class()
return_obj.id = self.id_
return_obj.idref = self.idref
if self.timestamp:
return_obj.timestamp = self.timestamp.isoformat()
return_obj.version = self.version
return_obj.Title = self.title
if self.description:
return_obj.Description = self.description.to_obj(ns_info=ns_info)
if self.short_description:
return_obj.Short_Description = self.short_description.to_obj(ns_info=ns_info)
if self.names:
return_obj.Names = self.names.to_obj(ns_info=ns_info)
if self.intended_effects:
return_obj.Intended_Effect = [x.to_obj(ns_info=ns_info) for x in self.intended_effects]
if self.status:
return_obj.Status = self.status.to_obj(ns_info=ns_info)
if self.related_ttps:
return_obj.Related_TTPs = self.related_ttps.to_obj(ns_info=ns_info)
if self.related_incidents:
return_obj.Related_Incidents = self.related_incidents.to_obj(ns_info=ns_info)
if self.related_indicators:
return_obj.Related_Indicators = self.related_indicators.to_obj(ns_info=ns_info)
if self.attribution:
return_obj.Attribution = [x.to_obj(ns_info=ns_info) for x in self.attribution]
if self.associated_campaigns:
return_obj.Associated_Campaigns = self.associated_campaigns.to_obj(ns_info=ns_info)
if self.confidence:
return_obj.Confidence = self.confidence.to_obj(ns_info=ns_info)
if self.activity:
return_obj.Activity = [x.to_obj(ns_info=ns_info) for x in self.activity]
if self.information_source:
return_obj.Information_Source = self.information_source.to_obj(ns_info=ns_info)
if self.handling:
return_obj.Handling = self.handling.to_obj(ns_info=ns_info)
if self.related_packages:
return_obj.Related_Packages = self.related_packages.to_obj(ns_info=ns_info)
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
return_obj.id_ = obj.id
return_obj.idref = obj.idref
return_obj.timestamp = obj.timestamp
if isinstance(obj, cls._binding_class):
return_obj.version = obj.version or cls._version
return_obj.title = obj.Title
return_obj.description = StructuredText.from_obj(obj.Description)
return_obj.short_description = \
StructuredText.from_obj(obj.Short_Description)
return_obj.names = Names.from_obj(obj.Names)
return_obj.intended_effects = \
[Statement.from_obj(x) for x in obj.Intended_Effect]
return_obj.status = VocabString.from_obj(obj.Status)
return_obj.related_ttps = RelatedTTPs.from_obj(obj.Related_TTPs)
return_obj.related_incidents = \
RelatedIncidents.from_obj(obj.Related_Incidents)
return_obj.related_indicators = \
RelatedIndicators.from_obj(obj.Related_Indicators)
return_obj.attribution = \
[Attribution.from_obj(x) for x in obj.Attribution]
return_obj.associated_campaigns = \
AssociatedCampaigns.from_obj(obj.Associated_Campaigns)
return_obj.confidence = Confidence.from_obj(obj.Confidence)
return_obj.activity = \
[Activity.from_obj(x) for x in obj.Activity]
return_obj.information_source = \
InformationSource.from_obj(obj.Information_Source)
return_obj.handling = Marking.from_obj(obj.Handling)
return_obj.related_packages = \
RelatedPackageRefs.from_obj(obj.Related_Packages)
return return_obj
def to_dict(self):
d = {}
if self.id_:
d['id'] = self.id_
if self.idref:
d['idref'] = self.idref
if self.timestamp:
d['timestamp'] = self.timestamp.isoformat()
if self.version:
d['version'] = self.version or self._version
if self.title:
d['title'] = self.title
if self.description:
d['description'] = self.description.to_dict()
if self.short_description:
d['short_description'] = self.short_description.to_dict()
if self.names:
d['names'] = self.names.to_dict()
if self.intended_effects:
d['intended_effects'] = [x.to_dict() for x in self.intended_effects]
if self.status:
d['status'] = self.status.to_dict()
if self.related_ttps:
d['related_ttps'] = self.related_ttps.to_dict()
if self.related_incidents:
d['related_incidents'] = self.related_incidents.to_dict()
if self.related_indicators:
d['related_indicators'] = self.related_indicators.to_dict()
if self.attribution:
d['attribution'] = [x.to_dict() for x in self.attribution]
if self.associated_campaigns:
d['associated_campaigns'] = self.associated_campaigns.to_dict()
if self.confidence:
d['confidence'] = self.confidence.to_dict()
if self.activity:
d['activity'] = [x.to_dict() for x in self.activity]
if self.information_source:
d['information_source'] = self.information_source.to_dict()
if self.handling:
d['handling'] = self.handling.to_dict()
if self.related_packages:
d['related_packages'] = self.related_packages.to_dict()
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.id_ = dict_repr.get('id')
return_obj.idref = dict_repr.get('idref')
return_obj.timestamp = dict_repr.get('timestamp')
return_obj.version = dict_repr.get('version', cls._version)
return_obj.title = dict_repr.get('title')
return_obj.description = \
StructuredText.from_dict(dict_repr.get('description'))
return_obj.short_description = \
StructuredText.from_dict(dict_repr.get('short_description'))
return_obj.names = Names.from_dict(dict_repr.get('names'))
return_obj.intended_effects = \
[Statement.from_dict(x) for x in dict_repr.get('intended_effects', [])]
return_obj.status = VocabString.from_dict(dict_repr.get('status'))
return_obj.related_ttps = \
RelatedTTPs.from_dict(dict_repr.get('related_ttps'))
return_obj.related_incidents = \
RelatedIncidents.from_dict(dict_repr.get('related_incidents'))
return_obj.related_indicators = \
RelatedIndicators.from_dict(dict_repr.get('related_indicators'))
return_obj.attribution = \
[Attribution.from_dict(x) for x in
dict_repr.get('attribution', [])]
return_obj.associated_campaigns = \
AssociatedCampaigns.from_dict(dict_repr.get('associated_campaigns'))
return_obj.confidence = \
Confidence.from_dict(dict_repr.get('confidence'))
return_obj.activity = \
[Activity.from_dict(x) for x in dict_repr.get('activity', [])]
return_obj.information_source = \
InformationSource.from_dict(dict_repr.get('information_source'))
return_obj.handling = Marking.from_dict(dict_repr.get('handling'))
return_obj.related_packages = \
RelatedPackageRefs.from_dict(dict_repr.get('related_packages'))
return return_obj
|
|
'''
Created on June 28, 2016
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
import utilities.utilities as utilities
import index
import importlib
# Maximum number of attempts for any one command
MAX_ATTEMPTS = 20
# Time between attempts, in seconds
TIME_BETWEEN_ATTEMPTS_SEC = 30
# Reliability variable name so we prevent typos
RELIABILITY_VARIABLE_NAME = "reliability"
# Total duration of time in which we should cache measurements here locally.
TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS = utilities.ONE_HOUR_MS
# Take a battery reading every 4 hours
BATTERY_MEASUREMENT_PERIODICITY_MS = utilities.ONE_HOUR_MS * 6
# Minimum number of battery readings required to make a decision on the battery life - 3 days worth
MINIMUM_BATTERY_READINGS = 10
# Total number of battery readings to maintain
MAXIMUM_BATTERY_READINGS = MINIMUM_BATTERY_READINGS * 2
# Space type language-neutral constants
# Internal docs: https://presence.atlassian.net/wiki/spaces/BOTS/pages/656638178/Space+Constants+and+Definitions
SPACE_TYPE = {
"kitchen": 1,
"bedroom": 2,
"bathroom": 3,
"hallway": 4,
"livingroom": 5,
"diningroom": 6,
"familyroom": 7,
"laundryroom": 8,
"office": 9,
"stairs": 10,
"garage": 11,
"basement": 12,
"other": 13
}
# Helper enums
NEWEST_MEASUREMENT = 0
VALUE = 0
TIMESTAMP = 1
class Device:
"""
This is a base class for each of our devices
"""
# Low battery threshold - Override in sub-classes
LOW_BATTERY_THRESHOLD = 10
# Low signal strength threshold - Override in sub-classes
LOW_RSSI_THRESHOLD = -87
# List of Device Types this class is compatible with - Specify in sub-classes
DEVICE_TYPES = []
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
"""
Constructor
:param botengine: BotEngine environment
:param device_id: Device ID
:param device_type: Device Type
:param device_description: Device description (nickname)
:param precache_measurements: True (default) to download historical measurements to cache them locally, the length of time of which is defined by device.TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS
"""
# Device ID
self.device_id = device_id
# Device type
self.device_type = int(device_type)
# Device description
self.description = device_description.strip()
# This is set by the controller object after init during synchronization with the location
self.location_object = None
# Measurements for each parameter, newest measurements at index 0
# self.measurements["parameterName"] = [ ( newest_value, newest_timestamp ), ( value, timestamp ), ... ]
self.measurements = {}
# Last alert received { "alert_type": { "parameter_one" : "value_one", "timestamp_ms": timestamp_ms_set_locally } }
self.last_alert = None
# Spaces this device is associated with. For example:
# "spaces": [
# {
# "name": "Kitchen",
# "spaceId": 152,
# "spaceType": 1
# },
# {
# "name": "Hallway",
# "spaceId": 154,
# "spaceType": 4
# },
# {
# "name": "Living Room",
# "spaceId": 157,
# "spaceType": 5
# }
# ]
self.spaces = []
# Last parameters that we updated
self.last_updated_params = []
# Battery level
self.battery_level = 100
# List of battery measured battery levels over time
self.battery_levels = []
# Last battery update time in ms
self.last_battery_update_ms = 0
# True if we have a low battery
self.low_battery = False
# True if this device is currently connected
self.is_connected = False
# True if we can control this device
self.can_control = False
# True if we can read from this device
self.can_read = False
# Remote IP address hash. Devices connected to the same external IP address will have the same hash.
self.remote_addr_hash = None
# The proxy ID is the device ID of the gateway this device connects through, if any.
self.proxy_id = None
# The goal (scenario) ID for this device
self.goal_id = None
# Approximate latitude (available on devices that directly connect to the cloud, like gateways)
self.latitude = None
# Approximate longitude (available on devices that directly connect to the cloud, like gateways)
self.longitude = None
# Born on timestamp
self.born_on = None
# True to enforce the default cache size. This can be reconfigured externally, followed by a call to garbage collect when needed to get rid of excess cache.
self.enforce_cache_size = precache_measurements
# Total communications odometer (includes measurements and RSSI updates / check-ins)
self.total_communications_odometer = 0
# Trip communications odometer - see how many communications we received in a shorter period of time, including RSSI check-ins
self.communications_odometer = 0
# Measurement odometer - how many actual new measurements did we receive
self.measurement_odometer = 0
# Timestamp of the last time we received a communication from this device
self.last_communications_timestamp = None
# Every device gets a dictionary of intelligence modules, and can populate these intelligence modules in each device model
self.intelligence_modules = {}
if precache_measurements:
# Download and start this object out with a history of measurements
self.cache_measurements(botengine, botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS, botengine.get_timestamp())
self.new_version(botengine)
def new_version(self, botengine):
"""
New version
NOTE: YOU CANNOT CHANGE THE CLASS NAME OF A MICROSERVICE AT THIS TIME.
Microservice changes will be identified through different 'module' names only. If you change the class name, it is currently ignored.
This can be revisited in future architecture changes, noted below.
The correct behavior is to create the object, then initialize() it every time you want to use it in a new bot execution environment
:param botengine: BotEngine environment
"""
# Synchronize device microservices
if str(self.device_type) in index.MICROSERVICES['DEVICE_MICROSERVICES']:
# Synchronize microservices
changed = False
module_names = [x['module'] for x in index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]]
for m in module_names:
changed |= m not in self.intelligence_modules
for m in list(self.intelligence_modules.keys()):
changed |= m not in module_names
if changed:
# Remove microservices that no longer exist
delete = []
for module_name in self.intelligence_modules.keys():
found = False
for intelligence_info in index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]:
if intelligence_info['module'] == module_name:
found = True
break
if not found:
botengine.get_logger().info("\tDeleting device microservice: " + str(module_name))
delete.append(module_name)
for d in delete:
del self.intelligence_modules[d]
# Add more microservices
for intelligence_info in index.MICROSERVICES['DEVICE_MICROSERVICES'][str(self.device_type)]:
if intelligence_info['module'] not in self.intelligence_modules:
try:
intelligence_module = importlib.import_module(intelligence_info['module'])
class_ = getattr(intelligence_module, intelligence_info['class'])
botengine.get_logger().info("\tAdding device microservice: " + str(intelligence_info['module']))
intelligence_object = class_(botengine, self)
self.intelligence_modules[intelligence_info['module']] = intelligence_object
except Exception as e:
import traceback
botengine.get_logger().error("Could not add device microservice: {}: {}; {}".format(str(intelligence_info), str(e), traceback.format_exc()))
if botengine.playback:
import time
time.sleep(10)
elif len(self.intelligence_modules) > 0:
# There are no intelligence modules for this device type, and yet we have some intelligence modules locally. Delete everything.
botengine.get_logger().info("\tDeleting all device microservices")
self.intelligence_modules = {}
# Tell all device microservices we're running a new version
for microservice in list(self.intelligence_modules.values()):
try:
microservice.new_version(botengine)
except Exception as e:
botengine.get_logger().warning("location.py - Error delivering new_version to device microservice (continuing execution): " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
def initialize(self, botengine):
"""
Initialize this object
:param botengine: BotEngine environment
"""
# Initialize all device microservices
for device_microservice in self.intelligence_modules.values():
device_microservice.initialize(botengine)
def destroy(self, botengine):
"""
Destroy this device
:param botengine: BotEngine environment
"""
return
def get_device_type_name(self):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Super abstract device type name
return _("Device")
def get_icon(self):
"""
Get the name of an icon
:return: the font icon name of this device type
"""
raise NotImplementedError
def get_icon_font(self):
"""
Get the icon font package from which to render an icon
As most of the device icons come from the "People Power Regular" icon font, this is currently the default.
You can override this method in a specific device class.
:return: The name of the icon font package
"""
return utilities.ICON_FONT_PEOPLEPOWER_REGULAR
def is_goal_id(self, target_goal_id):
"""
This is the proper way to check for whether or not this device matches the given target goal ID,
because goal IDs can change by an order of 1000 for each different brand.
:param botengine: BotEngine environment
:return: True if the goal ID matches for this device
"""
if self.goal_id is not None:
return self.goal_id % 1000 == target_goal_id
return False
#===========================================================================
# Microservice notification distribution methods
#===========================================================================
def device_measurements_updated(self, botengine):
"""
Distribute notifications to all microservices that your measurements have been updated
:param botengine:
:return:
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_measurements_updated(botengine, self)
def device_metadata_updated(self, botengine):
"""
Distribute notifications to all microservices that your metadata has been updated
:param botengine:
:return:
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_metadata_updated(botengine, self)
def device_alert(self, botengine, alert_type, alert_params):
"""
Distribute notifications to all microservices that an alert has been generated from this device
:param botengine: BotEngine environment
:param alert_type: Type of alert
:param alert_params: Dictionary of alert parameters
"""
# Added May 4, 2021
if not hasattr(self, "last_alert"):
self.last_alert = {}
alert_params['timestamp_ms'] = botengine.get_timestamp()
self.last_alert = {
alert_type : alert_params
}
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].device_alert(botengine, self, alert_type, alert_params)
#===========================================================================
# Measurement synchronization and updates
#===========================================================================
def synchronize(self, botengine):
"""
Synchronize with the server
:param botengine: BotEngine environment
"""
self.cache_measurements(botengine, botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS, botengine.get_timestamp())
def cache_measurements(self, botengine, oldest_timestamp_ms, newest_timestamp_ms):
"""
Download and cache historical measurements locally
:param botengine: BotEngine environment
:param oldest_timestamp_ms: Oldest timestamp to download history from
:param newest_timestamp_ms: Newest timestamp to download history to
"""
try:
measurements = botengine.get_measurements(self.device_id, oldest_timestamp_ms=oldest_timestamp_ms, newest_timestamp_ms=newest_timestamp_ms)
except:
# This can happen because this bot may not have read permissions for this device.
# botengine.get_logger().warning("Cannot synchronize measurements for device {}; device ID {}".format(self.description, self.device_id))
return
botengine.get_logger().info("Synchronizing measurements for device: " + str(self.description))
if 'measures' in measurements:
for measure in measurements['measures']:
if 'value' not in measure:
#botengine.get_logger().error("device.py: Measurement has no value: " + str(measure) + ";\n Measurement was: " + str(measure))
continue
value = utilities.normalize_measurement(measure['value'])
param_name = measure['name']
time = measure['time']
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if str(measure['index']).lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
if param_name in self.measurements:
if self.measurements[param_name][0][0] == value and self.measurements[param_name][0][1] == time:
# Already captured this measurement
continue
self.add_measurement(botengine, param_name, value, time)
def update(self, botengine, measures):
"""
Attempt to parse the inputs to update this object
:param measures: Full or partial measurement block from bot inputs
"""
self.last_updated_params = []
self.communicated(botengine.get_timestamp())
# # Handy debug tool.
# if measures is not None:
# for measure in measures:
# if measure['deviceId'] == self.device_id and 'value' in measure:
# param_name = measure['name']
# if 'index' in measure:
# if measure['index'] is not None:
# param_name = "{}.{}".format(measure['name'], measure['index'])
#
# if param_name in self.measurements:
# if len(self.measurements[param_name]) > 0:
# if 'time' in measure:
# if not measure['updated'] and measure['time'] == self.measurements[param_name][NEWEST_MEASUREMENT][TIMESTAMP]:
# # Nothing to update
# botengine.get_logger().info(utilities.Color.GREEN + "\tSAME: {} @ {} = {}".format(param_name, measure['time'], measure['value']) + utilities.Color.END)
# continue
#
# if measure['updated']:
# botengine.get_logger().info(utilities.Color.GREEN + "\tUPDATED: {} @ {} = {}".format(param_name, measure['time'], measure['value']) + utilities.Color.END)
# else:
# botengine.get_logger().info(utilities.Color.GREEN + "\tTIME DIFF: {} @ {} = {}".format(param_name, measure['time'], measure['value']) + utilities.Color.END)
if measures is not None:
for measure in measures:
if measure['deviceId'] == self.device_id:
param_name = measure['name']
if param_name == 'batteryLevel' and measure['updated']:
if 'value' not in measure:
botengine.get_logger().info("device.py: Updated parameter provided no updated value: {}".format(measure))
continue
# Update the battery_level
self.battery_level = int(measure['value'])
self.last_updated_params.append('batteryLevel')
elif param_name not in self.measurements or measure['updated']:
if 'value' not in measure:
botengine.get_logger().info("device.py: Updated parameter provided no updated value: {}".format(measure))
continue
value = utilities.normalize_measurement(measure['value'])
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if str(measure['index']).lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
is_param_get_new_value = self.add_measurement(botengine, param_name, value, measure['time'])
if is_param_get_new_value:
self.last_updated_params.append(param_name)
# List of devices (this one and its proxy) that were updated, to later synchronize with the location outside of this object
updated_devices = []
updated_metadata = []
# Update all device intelligence modules
if len(self.last_updated_params) > 0:
updated_devices.append(self)
else:
# Metadata was updated
updated_metadata.append(self)
# Make sure our proxy (gateway) gets pinged - it implicitly updated here and needs to trigger microservices
if self.proxy_id is not None:
if self.proxy_id in self.location_object.devices:
d, m = self.location_object.devices[self.proxy_id].update(botengine, measures)
updated_devices += d
updated_metadata += m
botengine.get_logger().info("Updated '{}' with params: {}".format(self.description, self.last_updated_params))
return (updated_devices, updated_metadata)
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].file_uploaded(botengine, device_object, file_id, filesize_bytes, content_type, file_extension)
def add_measurement(self, botengine, name, value, timestamp):
"""
Update the device's status
:param botengine:
:param name:
:param value:
:param timestamp:
:return:
"""
measurement_updated = False
if name not in self.measurements:
# Create the measurement
self.measurements[name] = []
measurement_updated = True
self.measurement_odometer += 1
self.measurements[name].insert(0, (value, timestamp))
else:
last_value = self.measurements[name][0][0]
if value != last_value:
measurement_updated = True
self.measurement_odometer += 1
self.measurements[name].insert(0, (value, timestamp))
# Auto garbage-collect
if self.enforce_cache_size:
while (len(self.measurements[name]) > 1) and self.measurements[name][-1][1] <= botengine.get_timestamp() - TOTAL_DURATION_TO_CACHE_MEASUREMENTS_MS:
del(self.measurements[name][-1])
return measurement_updated
def communicated(self, timestamp):
"""
Call this function when the device communicates at all.
This lets us evaluate how often the device communicates, how many times per day, communications during test mode, etc.
"""
if self.last_communications_timestamp is not None:
#self.log("\t=> Last communication was " + str((timestamp - self.last_communications_timestamp) / 1000) + " seconds ago")
pass
self.last_communications_timestamp = timestamp
self.total_communications_odometer += 1
self.communications_odometer += 1
def reset_odometers(self):
"""
Reset all our odometers except the total_communications_odometer
For example, if we're entering TEST mode and want to keep track of communications
"""
self.communications_odometer = 0
self.measurement_odometer = 0
def get_measurement_history(self, botengine, param_name):
"""
Get the measurement history for this parameter, newest measurements first
[ ( value, timestamp), (value, timestamp) ]
:param botengine: BotEngine environment
:param param_name: Parameter name
:return: List of measurements history tuples, or None if the measurement doesn't exist
"""
if param_name in self.measurements:
return self.measurements[param_name]
return None
#===========================================================================
# Device health
#===========================================================================
def did_update_rssi(self, botengine=None):
"""
:return: True if we updated the RSSI for this device on this execution
"""
return 'rssi' in self.last_updated_params
def get_rssi(self, botengine=None):
"""
:return: The most recent RSSI value, or None if it doesn't exist
"""
if 'rssi' in self.measurements:
return self.measurements['rssi'][0][0]
def low_signal_strength(self, botengine=None):
"""
:return: True if this device currently has a low signal strength
"""
rssi = self.get_rssi(botengine)
if rssi is not None:
return rssi < self.LOW_RSSI_THRESHOLD
return False
def raw_command(self, name, value):
"""
Send a command for the given local measurement name
"""
pass
def is_command(self, measurement_name):
"""
:param measurement_name: Name of a local measurement name
:return: True if the given parameter name is a command
"""
return False
def get_proxy_object(self, botengine=None):
"""
:return: Gateway / Proxy object this device connects through. None if it doesn't exist
"""
if self.proxy_id is not None:
if self.proxy_id in self.location_object.devices:
return self.location_object.devices[self.proxy_id]
return None
def did_tamper(self, botengine):
"""
Did someone tamper with this device
:param botengine:
:return:
"""
if 'tamper' in self.last_updated_params:
if 'tamper' in self.measurements:
if len(self.measurements['tamper']) > 0:
return self.measurements['tamper'][0][0]
return False
#===========================================================================
# Coordinates
#===========================================================================
def update_coordinates(self, botengine, latitude, longitude):
"""
Update the latitude and longitude
:param botengine: BotEngine environment
:param latitude: Latitude
:param longitude: Longitude
"""
if float(latitude) == self.latitude and float(longitude) == self.longitude:
return
self.latitude = float(latitude)
self.longitude = float(longitude)
# Notify my microservices
for intelligence_id in self.intelligence_modules:
self.intelligence_modules[intelligence_id].coordinates_updated(botengine, latitude, longitude)
# Notify all children microservices
for device_id in self.location_object.devices:
if self.location_object.devices[device_id].proxy_id == self.device_id:
for intelligence_id in self.location_object.devices[device_id].intelligence_modules:
self.location_object.devices[device_id].intelligence_modules[intelligence_id].coordinates_updated(botengine, latitude, longitude)
#===========================================================================
# Spaces
#===========================================================================
def is_in_space(self, botengine, space_description_or_type):
"""
Determine if this device is associated with the given space description.
The description must be a word inside our SPACE_TYPE dictionary.
:param botengine: BotEngine environment
:param space_description_or_type: Space type number or description from our SPACE_TYPE dictionary
:return: True if the device is in the given space
"""
space_type = None
if space_description_or_type.lower() in SPACE_TYPE:
space_type = SPACE_TYPE[space_description_or_type.lower()]
else:
try:
space_type = int(space_description_or_type)
except:
botengine.get_logger().error("device.is_in_space(): Couldn't identify what space type you're talking about - {}".format(space_description_or_type))
return False
for space in self.spaces:
if space['spaceType'] == space_type:
return True
return False
def is_in_spaces(self, botengine, space_descriptions_or_types_list):
"""
Determine if this device is associated with any of the given spaces in the list.
If the list contains descriptive strings, the strings must be words inside of our SPACE_TYPE dictionary.
:param botengine: BotEngine environment
:param space_descriptions_or_types_list: List of space type numbers, or list of strings from our SPACE_TYPE dictionary
:return: True if the device is in any of the given spaces
"""
space_types = []
for s in space_descriptions_or_types_list:
if s.lower() in SPACE_TYPE:
space_types.append(SPACE_TYPE[s.lower()])
else:
try:
space_type = int(s)
space_types.append(space_type)
except:
botengine.get_logger().error("device.is_in_spaces(): Couldn't identify what space type you're talking about - {}".format(s))
continue
comparison_types = []
for space in self.spaces:
comparison_types.append(space['spaceType'])
for t in space_types:
if t in comparison_types:
return True
return False
#===========================================================================
# Data request
#===========================================================================
def request_data(self, botengine, oldest_timestamp_ms=None, newest_timestamp_ms=None, param_name_list=None, reference=None, index=None, ordered=1):
"""
Selecting a large amount of data from the database can take a significant amount of time and impact server
performance. To avoid this long waiting period while executing bots, a bot can submit a request for all the
data it wants from this location asynchronously. The server gathers all the data on its own time, and then
triggers the bot with trigger 2048. Your bot must include trigger 2048 to receive the trigger.
Selected data becomes available as a file in CSV format, compressed by LZ4, and stored for one day.
The bot receives direct access to this file.
You can call this multiple times to extract data out of multiple devices. The request will be queued up and
the complete set of requests will be flushed at the end of this bot execution.
:param botengine:
:param oldest_timestamp_ms:
:param newest_timestamp_ms:
:param param_name_list:
:param reference:
:param index:
:param ordered:
:return:
"""
if oldest_timestamp_ms is None:
oldest_timestamp_ms = botengine.get_timestamp() - utilities.ONE_MONTH_MS * 6
botengine.request_data(type=botengine.DATA_REQUEST_TYPE_PARAMETERS,
device_id=self.device_id,
oldest_timestamp_ms=oldest_timestamp_ms,
newest_timestamp_ms=newest_timestamp_ms,
param_name_list=param_name_list,
reference=reference,
index=index,
ordered=ordered)
#===========================================================================
# CSV methods for machine learning algorithm integrations
#===========================================================================
def get_csv(self, botengine, oldest_timestamp_ms=None, newest_timestamp_ms=None, params=[]):
"""
Get a .csv string of all the data
This is useful when you're using .csv data from a user's account outside of the bot microservices environment to construct machine learning algorithms,
and then want to drag-and-drop those same algorithms into a bot environment and watch it run the same way without having to transform data.
Mimics the type of .csv output you'd obtain with the following CLI commands:
botengine --download_device <device_id>
botengine --download_type <device_type>
:param botengine: BotEngine environment
:param oldest_timestamp_ms: oldest timestamp in milliseconds
:param newest_timestamp_ms: newest timestamp in milliseconds
:param params: List of parameters
:return: .csv string, largely matching the .csv data you would receive from the "botengine --download_device [device_id]" command line interface. Or None if this device doesn't have data.
"""
if len(self.measurements) == 0:
botengine.get_logger().info("{}: get_csv() - This device has no measurements")
return None
if params:
titles = sorted(params)
else:
titles = sorted(self.measurements.keys())
last_measurements = {}
for title in titles:
try:
last_measurements[title] = self.measurements[title][0][0]
except:
pass
# Check to see that all the parameters we're requesting have valid measurements in this device object
# Remember that an index number will modify the name of the parameter to make it unique, and we need to match against the unique name of each parameter
if not set(params).issubset(last_measurements.keys()):
botengine.get_logger().info("{}: get_csv() - Not all of the requested parameters exist for this device")
return None
output = "device_type,device_id,description,timestamp_ms,timestamp_iso,"
for t in titles:
output = "{}{},".format(output, t)
output += "\n"
try:
measurements = botengine.get_measurements(self.device_id, oldest_timestamp_ms=oldest_timestamp_ms, newest_timestamp_ms=newest_timestamp_ms, param_name=params)
except:
# This can happen because this bot may not have read permissions for this device.
# botengine.get_logger().warning("Cannot synchronize measurements for device: " + str(self.description))
return None
processed_readings = {}
if 'measures' in measurements:
for measure in measurements['measures']:
if 'value' not in measure:
continue
value = utilities.normalize_measurement(measure['value'])
param_name = measure['name']
time = int(measure['time'])
# If there's an index number, we just augment the parameter name with the index number to make it a unique parameter name. param_name.index
if 'index' in measure:
if measure['index'] is not None:
if str(measure['index']).lower() != "none":
param_name = "{}.{}".format(param_name, measure['index'])
processed_readings[time] = (param_name, value)
measurements = None
import gc
gc.collect()
botengine.get_logger().info("{}: get_csv() - Processing {} measurements ...".format(self.description, str(len(processed_readings))))
for timestamp_ms in sorted(processed_readings.keys()):
dt = self.location_object.get_local_datetime_from_timestamp(botengine, timestamp_ms)
output += "{},{},{},{},{},".format(self.device_type, self.device_id.replace(",","_"), self.description.replace(",","_"), timestamp_ms, utilities.iso_format(dt))
for t in titles:
if t == processed_readings[timestamp_ms][0]:
output += "{},".format(processed_readings[timestamp_ms][1])
else:
output += "{},".format(last_measurements[t])
output += "\n"
return output
#===============================================================================
# These functions are outside the Device class above.
#===============================================================================
def send_command_reliably(botengine, device_id, param_name, param_value):
"""
Send a command reliably
:param botengine: BotEngine
:param device_id: Device ID to send the command to
:param param_name: Parameter name
:param param_value: Parameter value
"""
botengine.get_logger().info("{}: Send command reliably".format(device_id))
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
queue = {}
if device_id not in queue:
queue[device_id] = {}
botengine.send_commands(device_id, [botengine.form_command(param_name, param_value)])
botengine.cancel_timers(device_id)
botengine.start_timer(TIME_BETWEEN_ATTEMPTS_SEC, _attempt_reliable_delivery, None, "reliability")
# queue[device_id] = {'param_name': ('param_value', attempts, timestamp)}
if param_name in queue[device_id]:
if queue[device_id][param_name][0] == param_value:
# No need to update the timestamp
return
queue[device_id][param_name] = (param_value, 0, botengine.get_timestamp())
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
def cancel_reliable_command(botengine, device_id, param_name):
"""
Stop trying to send a command reliably
:param botengine:
:param device_id: Device ID
:param param_name: Parameter name to cancel.
:return:
"""
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
return
if device_id in queue:
if param_name in queue[device_id]:
del(queue[device_id][param_name])
if len(queue[device_id]) == 0:
del(queue[device_id])
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
def queued_commands_for_device(botengine, device_id):
"""
Get the queued commands for the current device in a dictionary of the form: { 'paramName': ('value', attempts, send_timestamp) , ... }
Basically if this response isn't empty, then there are commands in the queue that haven't been verified yet.
:return: Dictionary of commands in the queue, or a blank dictionary {} if there are no commands or the device isn't found
"""
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is not None:
if device_id in queue:
return queue[device_id]
return {}
def _attempt_reliable_delivery(botengine, args):
"""
Attempt reliable delivery of everything in our queue
This is executed by a timer.
"""
botengine.get_logger().info(">reliability")
queue = botengine.load_variable(RELIABILITY_VARIABLE_NAME)
if queue is None:
return
logger = botengine.get_logger()
logger.debug("RELIABILITY: Queue looks like " + str(queue))
import copy
for device_id in copy.copy(queue):
# Prune out all our successfully delivered commands, and commands that have timed out
params_to_remove = []
for param_name in queue[device_id]:
(param_value, attempts, timestamp) = queue[device_id][param_name]
if attempts < MAX_ATTEMPTS:
# Check to see if the last attempt went through
measures = None
try:
measures = botengine.get_measurements(device_id, param_name=param_name, oldest_timestamp_ms=timestamp)
except:
# No longer have access to the device
params_to_remove.append(param_name)
logger.debug("RELIABILITY: measurements since " + str(timestamp) + ": " + str(measures))
if measures is not None:
if 'measures' in measures:
for m in measures['measures']:
if m['name'] == param_name and m['value'] == param_value:
# Command had been delivered reliably
logger.debug("RELIABILITY: COMMAND HAS BEEN DELIVERED RELIABLY")
params_to_remove.append(param_name)
break
else:
# TODO log this error somewhere
logger.debug("RELIABILITY: MAXIMUM ATTEMPTS REACHED FOR DEVICE " + str(device_id) + "; PARAM_NAME=" + str(param_name) + "; PARAM_VALUE=" + str(param_value))
params_to_remove.append(param_name)
for param in params_to_remove:
if param in queue[device_id]:
del(queue[device_id][param])
if len(queue[device_id]) > 0:
botengine.cancel_timers("reliability")
botengine.start_timer(TIME_BETWEEN_ATTEMPTS_SEC, _attempt_reliable_delivery, None, "reliability")
for param_name in queue[device_id]:
# Increment our attempts
(param_value, attempts, timestamp) = queue[device_id][param_name]
attempts += 1
queue[device_id][param_name] = (param_value, attempts, timestamp)
logger.debug("RELIABILITY: Re-sending command to " + device_id + ": " + str(param_name) + " = " + str(param_value))
botengine.send_command(device_id, param_name, param_value)
else:
del(queue[device_id])
logger.debug("RELIABILITY: Cleaned queue looks like " + str(queue))
botengine.save_variable(RELIABILITY_VARIABLE_NAME, queue)
|
|
#!/usr/bin/env python
#############################################################################
# barcodeHasher.py
# 2015 James A. Stapleton
#
# This program sorts short reads into a dictionary on the basis of barcode
# sequences. Output is a JSON-dumped file called unpairedDict.txt or
# pairedDict.txt, depending on whether barcode pairing is used.
#
# Arguments:
# infile: comma-separated list of sequence files.
#
# afterBarcode: one or more (comma-separated) sequences, one of
# which is required to follow the barcode to confirm that
# the read is not spurious
#
# Options:
#
# --SCA2: call when adapters with the SCA2 PCR primer
# sequence are used
# (SCA2: 5'-ACACGACGTGAACGATAGGAATTG-3')
#
# --primer: primer sequence used in the PCR. U for uracil.
# Default is TR. --SCA2 can be used for SCA2.
#
# --pairSeparateFile: call for two-tube barcode pairing
#
# --pairSameFile: call for one-tube barcode pairing
#
# --useFwdUnpaired: call to use forward reads whose reverse pairs
# were dropped by trimmomatic
#
# --partial: print dictionaries every 1/NUMFRACTIONS of the data
#
# --FLASH: merge overlapping forward and reverse reads with FLASH
#
# --quality: add fastq quality line to the dictinary along with the
# sequence line, to allow error-checking by the assembler
# (e.g., SPAdes)
#
# --BARCODE_LENGTH: Length of the barcode. Default 16.
#
# --BARCODE_TRUNCATE: Ignore this many bases at the beginning of
# the barcode. Default 0.
#
# --PAIR_THRESHOLD: Minimum number of times a pair of barcodes needs
# to be seen to count as a pair. Default 1.
#
# --ENDTRIM: Trim this many of bases from the ends of the read during
# processing to remove low-confidence bases. Default 2.
#
# --NUMFRACTIONS: Number of fractional dictionaries to print when
# using the --partial flag. Default 10.
#
#############################################################################
import argparse
import time
import subprocess
import collections
import json
import itertools
import copy
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def main(infile, afterBarcode, SCA2, primer, pairSeparateFile, pairSameFile,
useFwdUnpaired, partial, FLASH, quality, BARCODE_LENGTH,
BARCODE_TRUNCATE, PAIR_THRESHOLD, ENDTRIM, NUMFRACTIONS):
if pairSeparateFile + pairSameFile > 1:
print "Please choose only one barcode pairing option"
return 1
COMPLEMENT_DICT = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G', 'N': 'N', 'U': 'AX'}
# 'X' as a marker for the positions of uracils in the reverse complement
AFTER_BARCODE_SEQS = []
AFTER_BARCODE_RC_SEQS = []
if afterBarcode:
for ABseq in afterBarcode.split(','):
AFTER_BARCODE_SEQS.append(ABseq)
AFTER_BARCODE_RC_SEQS.append(''.join([COMPLEMENT_DICT[base]
for base in ABseq])[::-1])
print "After barcode:", ABseq
ABcounts = [0] * len(AFTER_BARCODE_SEQS)
else:
ABcounts = [0]
AFTER_BARCODE_SEQS.append('')
AFTER_BARCODE_RC_SEQS.append('')
print "No after-barcode sequence checking"
if SCA2:
LOSTUSEQS = ['AATTCCT', 'ATCGTTC']
elif primer:
primer = primer.upper()
primerRC = ''.join([COMPLEMENT_DICT[base] for base in primer])[::-1]
LOSTUSEQS = primerRC.split("X")[1:] # start after the first U
else:
LOSTUSEQS = ['AGG', 'AATAGTT', 'ATGTGCATT']
LOSTUSEQ_RCS = []
for lostuseq in LOSTUSEQS:
LOSTUSEQ_RCS.append(''.join([COMPLEMENT_DICT[base] for
base in lostuseq])[::-1])
# take trimmed infiles, entered as file1.fq,file2.fq
if pairSeparateFile and useFwdUnpaired:
infile_F, infile_R, infile_unpaired, paired_F = infile.split(',')
elif pairSameFile and useFwdUnpaired:
infile_F, infile_R, infile_unpaired, trimmed = infile.split(',')
elif pairSeparateFile:
infile_F, infile_R, paired_F = infile.split(',')
elif pairSameFile:
infile_F, infile_R, trimmed = infile.split(',')
elif useFwdUnpaired:
infile_F, infile_R, infile_unpaired = infile.split(',')
else:
infile_F, infile_R = infile.split(',')
#run FLASH to combine overlapping read pairs
if FLASH:
subprocess.call(["flash", "-M", "140", "-t", "1", infile_F, infile_R])
# initilize stuff
master_hash = collections.defaultdict(list)
final_dict = collections.defaultdict(list)
readCount = 0
bad_after = 0
passed = 0
start_time = time.time()
pairing = 0
pair_dict = 0
# Pair barcodes
if (pairSeparateFile + pairSameFile):
pairing = 1
print 'Pairing barcodes'
# initilize stuff
readCount = 0
pair_dict = collections.defaultdict(lambda: collections.Counter())
start_time = time.time()
if pairSeparateFile:
trimmed = paired_F
with open(trimmed, 'rU') as merged:
for title, seq, qual_F in FastqGeneralIterator(merged):
# print time elapsed every 100000 reads processed
readCount += 1
if readCount % 100000 == 0:
print readCount
print time.time() - start_time, "seconds"
start_time = time.time()
if len(seq) < BARCODE_LENGTH + BARCODE_LENGTH:
continue
# pull out barcode1
barcode1 = seq[BARCODE_TRUNCATE:BARCODE_LENGTH]
seq_F = seq[BARCODE_LENGTH:]
# check for after_barcode_sequences
if AFTER_BARCODE_SEQS:
test = after_barcode_seq(seq_F, AFTER_BARCODE_SEQS)
if test == 1:
continue
else:
seq_F = seq_F[len(test):]
if len(seq) < len(test) + BARCODE_LENGTH:
continue
# check for lostU and lostUrc
seq_F, qual_F = trim_lost_U(seq_F, qual_F, LOSTUSEQS)
for lostuseq_RC in LOSTUSEQ_RCS[::-1]:
lostarray = [lostuseq_RC]
seq_F, qual_F = trim_lost_U(seq_F, qual_F, lostarray)
# check for RC of after_barcode_sequences
if AFTER_BARCODE_SEQS:
test = after_barcode_seq(seq_F, AFTER_BARCODE_RC_SEQS)
if test == 1:
continue
else:
seq_F = seq_F[len(test):]
# pull out barcode2
barcode2rc = seq_F[: BARCODE_LENGTH - BARCODE_TRUNCATE]
if len(barcode2rc) != BARCODE_LENGTH - BARCODE_TRUNCATE:
continue
barcode2 = ''.join([COMPLEMENT_DICT[base] for
base in barcode2rc])[::-1]
seq_F = seq_F[BARCODE_LENGTH:]
pair_dict[barcode1][barcode2] += 1
pair_dict[barcode2][barcode1] += 1
#print pair_dict to file
with open('barcodePairs.txt', 'w') as barcodePairs:
print >> barcodePairs, json.dumps(pair_dict, sort_keys=True,
indent=4, separators=(',', ': '))
print 'Building the dictionary'
readCount = 0
# count the number of input reads to allow partial dictionaries
numReads = 0
if partial:
if FLASH:
with open("out.extendedFrags.fastq", 'rU') as merged:
for line in merged:
numReads += 1
with open("out.notCombined_1.fastq", 'rU') as unmerged_F:
for line in unmerged_F:
numReads += 1
else:
with open(infile_F, 'rU') as unmerged_F:
for line in unmerged_F:
numReads += 1
if useFwdUnpaired:
with open(infile_unpaired, 'rU') as unpaired_F:
for line in unpaired_F:
numReads += 1
numReads = numReads/4
# iterate over merged reads from FLASH
if FLASH:
with open("out.extendedFrags.fastq", 'rU') as merged:
for title, seq, qual in FastqGeneralIterator(merged):
readCount, bad_after, ABcounts, passed, start_time = processReads(readCount, seq, qual, 0, 0, BARCODE_TRUNCATE,
BARCODE_LENGTH, ENDTRIM, bad_after, ABcounts,
AFTER_BARCODE_SEQS, AFTER_BARCODE_RC_SEQS,
LOSTUSEQS, passed, master_hash, pair_dict,
partial, numReads, NUMFRACTIONS, COMPLEMENT_DICT,
LOSTUSEQ_RCS, start_time,
PAIR_THRESHOLD, pairing, quality)
# iterate over unmerged reads from FLASH
with open("out.notCombined_1.fastq", 'rU') as unmerged_F:
with open("out.notCombined_2.fastq", 'rU') as unmerged_R:
f_iter = FastqGeneralIterator(unmerged_F)
r_iter = FastqGeneralIterator(unmerged_R)
for (title, seq, qual), (title_R, seq_R, qual_R) in itertools.izip(f_iter, r_iter):
readCount, bad_after, ABcounts, passed, start_time = processReads(readCount, seq, qual, seq_R, qual_R, BARCODE_TRUNCATE,
BARCODE_LENGTH, ENDTRIM, bad_after, ABcounts,
AFTER_BARCODE_SEQS, AFTER_BARCODE_RC_SEQS,
LOSTUSEQS, passed, master_hash, pair_dict,
partial, numReads, NUMFRACTIONS, COMPLEMENT_DICT,
LOSTUSEQ_RCS, start_time,
PAIR_THRESHOLD, pairing, quality)
else: # if not running FLASH, iterate over infiles
with open(infile_F, 'rU') as unmerged_F:
with open(infile_R, 'rU') as unmerged_R:
f_iter = FastqGeneralIterator(unmerged_F)
r_iter = FastqGeneralIterator(unmerged_R)
for (title, seq, qual), (title_R, seq_R, qual_R) in itertools.izip(f_iter, r_iter):
readCount, bad_after, ABcounts, passed, start_time = processReads(readCount, seq, qual, seq_R, qual_R, BARCODE_TRUNCATE,
BARCODE_LENGTH, ENDTRIM, bad_after, ABcounts,
AFTER_BARCODE_SEQS, AFTER_BARCODE_RC_SEQS,
LOSTUSEQS, passed, master_hash, pair_dict,
partial, numReads, NUMFRACTIONS, COMPLEMENT_DICT,
LOSTUSEQ_RCS, start_time,
PAIR_THRESHOLD, pairing, quality)
# iterate over forward_unpaired
if useFwdUnpaired:
print "Using forward unpaired reads"
with open(infile_unpaired, 'rU') as unpaired_F:
for title, seq, qual in FastqGeneralIterator(unpaired_F):
readCount, bad_after, ABcounts, passed, start_time = processReads(readCount, seq, qual, 0, 0, BARCODE_TRUNCATE,
BARCODE_LENGTH, ENDTRIM, bad_after, ABcounts,
AFTER_BARCODE_SEQS, AFTER_BARCODE_RC_SEQS,
LOSTUSEQS, passed, master_hash, pair_dict,
partial, numReads, NUMFRACTIONS, COMPLEMENT_DICT,
LOSTUSEQ_RCS, start_time,
PAIR_THRESHOLD, pairing, quality)
print str(readCount) + " total read pairs, " + str(passed) + " passed, " \
+ str(bad_after) + " non-compliant barcodes, AB counts: " + str(ABcounts)
# print master_hash to file
if (not pairSeparateFile) and (not pairSameFile):
with open('unpairedDict.txt', 'w') as unpairedDict:
print >> unpairedDict, json.dumps(master_hash, sort_keys=True,
indent=4, separators=(',', ': '))
del master_hash
return 0
else:
# combine master_hash and pair_dict into final_dict
with open('confirmedPairs.txt', 'w') as confirmed:
final_dict = pairBarcodes(master_hash, pair_dict, final_dict,
PAIR_THRESHOLD, confirmed)
# print final_dict to file
with open('pairedDict.txt', 'w') as pairedDict:
print >> pairedDict, json.dumps(final_dict, sort_keys=True, indent=4,
separators=(',', ': '))
del master_hash
del pair_dict
del final_dict
return 0
######## Function definitions ##############
def processReads(readCount, seq, qual_F, seq_R, qual_R, BARCODE_TRUNCATE,
BARCODE_LENGTH, ENDTRIM, bad_after, ABcounts,
AFTER_BARCODE_SEQS, AFTER_BARCODE_RC_SEQS, LOSTUSEQS,
passed, master_hash, pair_dict, partial, numReads,
NUMFRACTIONS, COMPLEMENT_DICT, LOSTUSEQ_RCS, start_time,
PAIR_THRESHOLD, pairing, quality):
# Print time elapsed every 100000 reads processed
readCount += 1
if readCount % 100000 == 0:
print readCount
print time.time() - start_time, "seconds"
start_time = time.time()
# if testing partial fractions of the dataset,
# print dicts every 1/NUMFRACTIONS of the data
if partial:
if not readCount % (numReads/NUMFRACTIONS):
filenum = readCount/(numReads/NUMFRACTIONS)
final_dict = collections.defaultdict(list)
with open('unpairedDict'+str(filenum)+'.txt', 'w') as unpairedDict:
print >> unpairedDict, json.dumps(master_hash, sort_keys=True,
indent=4, separators=(',', ': '))
with open('confirmedPairs'+str(filenum)+'.txt', 'w') as confirmed:
final_dict = pairBarcodes(master_hash, pair_dict, final_dict,
PAIR_THRESHOLD, confirmed)
if pairing:
with open('pairedDict'+str(filenum)+'.txt', 'w') as pairedDict:
print >> pairedDict, json.dumps(final_dict, sort_keys=True,
indent=4, separators=(',', ': '))
del final_dict
final_dict = collections.defaultdict(list)
barcode = seq[BARCODE_TRUNCATE:BARCODE_LENGTH]
seq_F = seq[BARCODE_LENGTH:-ENDTRIM]
qual_F = qual_F[BARCODE_LENGTH:-ENDTRIM]
if seq_R and len(seq_R) > 30:
barcode_RC = ''.join([COMPLEMENT_DICT[base] for base in barcode])[::-1]
# look for RC of barcode at the end of a reverse read of a
# fragment shorter than the read length
if seq_R[-BARCODE_LENGTH : -BARCODE_TRUNCATE] == barcode_RC:
seq_R = seq_R[ENDTRIM : -BARCODE_LENGTH]
qual_R = qual_R[ENDTRIM : -BARCODE_LENGTH]
# look for RC of the after-barcode sequences and lost-U sequences
already_found_afterbarcodeseq = 0
for afterbarcodeseqrc in AFTER_BARCODE_RC_SEQS:
if already_found_afterbarcodeseq:
break
test = 0
test, seq_R, qual_R = reversetrim(seq_R, qual_R, afterbarcodeseqrc)
if test:
for lostuseqrc in LOSTUSEQ_RCS:
if test:
test, seq_R, qual_R = reversetrim(seq_R, qual_R, lostuseqrc)
else:
already_found_afterbarcodeseq = 1
break
else:
seq_R = seq_R[ENDTRIM:-ENDTRIM]
qual_R = qual_R[ENDTRIM:-ENDTRIM]
test, seq_F, qual_F, seq_R, qual_R, bad_after, readCount, ABcounts = filters(seq_F, qual_F, seq_R, qual_R,
bad_after, readCount, ABcounts,
AFTER_BARCODE_SEQS, LOSTUSEQS)
if test == 1:
return readCount, bad_after, ABcounts, passed, start_time
else:
if seq_R and len(seq_R) > 30:
seq_R, qual_R = sequence_checker(seq_R, qual_R, LOSTUSEQS)
master_hash[barcode].append(seq_R)
if quality:
master_hash[barcode].append(qual_R)
else:
master_hash[barcode].append("")
if quality:
master_hash[barcode].append("")
seq_F, qual_F = sequence_checker(seq_F, qual_F, LOSTUSEQS)
master_hash[barcode].append(seq_F)
if quality:
master_hash[barcode].append(qual_F)
passed += 1
return readCount, bad_after, ABcounts, passed, start_time
def reversetrim(seq_R, qual_R, checkseq):
'''trims sequences off front end of a reverse sequence read'''
if seq_R[-len(checkseq):] == checkseq:
seq_R = seq_R[:-len(checkseq)]
qual_R = qual_R[:-len(checkseq)]
return 1, seq_R, qual_R
else:
return 0, seq_R, qual_R
def filters(seq_F, qual_F, seq_R, qual_R, bad_after, readCount, ABcounts,
AFTER_BARCODE_SEQS, LOSTUSEQS):
"Collection of calls to the various filter functions"
# read must have correct defined sequence after the barcode,
# otherwise count and throw out
if AFTER_BARCODE_SEQS:
check = after_barcode_seq(seq_F, AFTER_BARCODE_SEQS)
if check == 1:
bad_after += 1
return 1, seq_F, qual_F, seq_R, qual_R, bad_after, readCount, ABcounts
else:
i = 0
for ABseq in AFTER_BARCODE_SEQS:
if check == ABseq:
ABcounts[i] += 1
seq_F = seq_F[len(ABseq):]
qual_F = qual_F[len(ABseq):]
break
i += 1
# trim at N's
seq_F, qual_F, seq_R, qual_R = Ntest(seq_F, qual_F, seq_R, qual_R)
# look for lostU sequence after barcode, remove it
seq_F, qual_F = trim_lost_U(seq_F, qual_F, LOSTUSEQS)
# if everything is good,
return 0, seq_F, qual_F, seq_R, qual_R, bad_after, readCount, ABcounts
def Ntest(seq_F, qual_F, seq_R, qual_R):
"trim sequences with N's"
Ntest = 0
for i in xrange(0, len(seq_F)):
if seq_F[i] == 'N':
Ntest = 1
break
if Ntest == 1:
seq_F = seq_F[0:i-1]
qual_F = qual_F[0:i-1]
if seq_R != 0:
for i in xrange(1, len(seq_R)):
if seq_R[i] == 'N':
Ntest = 1
break
if Ntest == 1:
seq_R = seq_R[0:i-1]
qual_R = qual_R[0:i-1]
return seq_F, qual_F, seq_R, qual_R
def after_barcode_seq(seq_F, AFTER_BARCODE_SEQS):
"throw out sequences without the right sequence after the barcode"
for ABseq in AFTER_BARCODE_SEQS:
if seq_F[:len(ABseq)] == ABseq:
return ABseq
# if none of the allowed sequences are found,
return 1
def sequence_checker(sequence, qual, LOSTUSEQS):
"check the sequence for the LostU sequence or its RC and trim"
# look for LOSTUSEQ or its RC with no mismatches. Trim.
COMPLEMENT_DICT = {'A':'T', 'G':'C', 'T':'A', 'C':'G', 'N':'N'}
testseq = ''
for lostuseq in LOSTUSEQS:
testseq += lostuseq
if len(testseq) > 7:
testseq_RC = ''.join([COMPLEMENT_DICT[base] for base in testseq])[::-1]
for i in xrange(len(sequence)-len(testseq)+1):
if sequence[i:i+len(testseq)] == testseq or sequence[i:i+len(testseq)] == testseq_RC:
sequence = sequence[:i]
qual = qual[:i]
break
return sequence, qual
def trim_lost_U(seq_F, qual_F, LOSTUSEQS):
""" test for lost U at the 3' end of the PCR primer sequence """
keepgoing = 1
for lostuseq in LOSTUSEQS:
if keepgoing:
if len(seq_F) < len(lostuseq):
return seq_F, qual_F
if seq_F[:len(lostuseq)] == lostuseq:
seq_F = seq_F[len(lostuseq):]
qual_F = qual_F[len(lostuseq):]
#if LOSTUSEQ[0] found, also look for LOSTUSEQ[1] etc.
else:
keepgoing = 0
return seq_F, qual_F
def pairBarcodes(master_hash, pair_dict, final_dict, PAIR_THRESHOLD, confirmed):
numPairs = 0
pair_dict_copy = copy.deepcopy(pair_dict)
for barcode in master_hash:
if barcode in pair_dict_copy:
# sort the inner dict by values and take the 1st one = best_match
sorted_pair_candidates = pair_dict_copy[barcode].most_common(1)
if sorted_pair_candidates:
best_match = sorted_pair_candidates[0][0]
if best_match == 'X':
continue
# verify pair match by checking whether barcode
# is best_match's most frequent pair
if (sorted_pair_candidates[0][1] > PAIR_THRESHOLD and
pair_dict_copy[best_match]):
# make sure best_match is f,
# i.e., hasn't been deleted in a prior cycle
cross_check = pair_dict_copy[best_match].most_common(1)
if (barcode == cross_check[0][0] and
cross_check[0][1] > PAIR_THRESHOLD and
master_hash.get(best_match) is not None):
# we have a verified pair!
numPairs += 1
confirmed.write(barcode + ' ' + best_match + '\n')
seq_list1 = master_hash.get(barcode)
seq_list2 = master_hash.get(best_match)
final_dict[barcode] = seq_list1 + seq_list2
# insert dummy as flag so when best_match is tried
# there is no duplication
pair_dict_copy[best_match]['X'] += 9999999
else:
final_dict[barcode] = master_hash[barcode]
else:
final_dict[barcode] = master_hash[barcode]
else:
final_dict[barcode] = master_hash[barcode]
else:
final_dict[barcode] = master_hash[barcode]
print "Found " + str(numPairs) + " barcode pairs"
return final_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('afterBarcode', nargs='?', default=False,
help='one or more (comma-separated) sequences, one of which is required to follow the barcode to confirm that the read is not spurious')
parser.add_argument('--SCA2', action='store_true', default=False,
help='call when adapters with the SCA2 PCR primer sequence are used. SCA2: 5-ACACGACGTGAACGATAGGAATTG-3')
parser.add_argument('--primer', action='store',
help='primer sequence used in the PCR. U for uracil. Default is TR. --SCA2 can be used for SCA2.')
parser.add_argument('--pairSeparateFile', action='store_true', default=False,
help='call for two-tube barcode pairing')
parser.add_argument('--pairSameFile', action='store_true', default=False,
help='call for one-tube barcode pairing')
parser.add_argument('--useFwdUnpaired', action='store_true', default=False,
help='call to use forward reads whose reverse pairs were dropped by trimmomatic')
parser.add_argument('--partial', action='store_true', default=False,
help='print dictionaries every 1/NUMFRACTIONS of the data')
parser.add_argument('--FLASH', action='store_true', default=False,
help='merge overlapping forward and reverse reads with FLASH')
parser.add_argument('--quality', action='store_true', default=False,
help='add fastq quality line to the dictinary along with the sequence line, to allow error-checking by the assembler (e.g., SPAdes)')
parser.add_argument('--BARCODE_LENGTH', action="store", dest="BARCODE_LENGTH", type=int, default=16,
help='length of the barcode, default 16.')
parser.add_argument('--BARCODE_TRUNCATE', action="store", dest="BARCODE_TRUNCATE", type=int, default=0,
help='ignore this many bases at the beginning of the barcode, default 0.')
parser.add_argument('--PAIR_THRESHOLD', action="store", dest="PAIR_THRESHOLD", type=int, default=1,
help='minimum number of times a pair of barcodes needs to be seen to count as a pair, default 1.')
parser.add_argument('--ENDTRIM', action="store", dest="ENDTRIM", type=int, default=2,
help='trim this many of bases from the ends of the read during processing to remove low-confidence bases, default 2.')
parser.add_argument('--NUMFRACTIONS', action="store", dest="NUMFRACTIONS", type=int, default=10,
help='number of fractional dictionaries to print when using the --partial flag, default 10.')
args = parser.parse_args()
main(args.infile, args.afterBarcode, args.SCA2, args.primer, args.pairSeparateFile,
args.pairSameFile, args.useFwdUnpaired, args.partial, args.FLASH, args.quality,
args.BARCODE_LENGTH, args.BARCODE_TRUNCATE, args.PAIR_THRESHOLD, args.ENDTRIM,
args.NUMFRACTIONS)
|
|
# -*- coding: utf-8 -*-
"""
Converts model for WebDNN to run on web browser
1. ResNet feature extractor + image feature embedding
2. Sentence generator
Targeting for WebDNN 1.1.0.
It does not support LSTM layer's conversion and input switching (image feature / word vector),
so constructing IR manually is needed.
Model 1:
[image] -> ResNet-50 -> image_vec -> [image_embedded_feature]
Model 2:
[image_embedded_feature] -> ElementwiseMul |
|-> ElementwiseSum -> lstm -> out_word -> [word_probability]
[word id] -> word_vec -> ElementwiseMul |
ElementwiseMul is used as switch for selecting which input is supplied to LSTM.
When image_embedded_feature is used, corresponding multiplication value is set to 1, otherwise, 0.
image_vec layer is used in model 1 to reduce unnecessary computation in model 2.
"""
import argparse
import json
import os
import sys
import pickle
import chainer
import chainer.computational_graph
import numpy as np
from webdnn.backend.interface.generator import generate_descriptor
from webdnn.graph.axis import Axis
from webdnn.graph.graph import Graph
from webdnn.graph.operators.axiswise_bias import AxiswiseBias
from webdnn.graph.operators.elementwise_mul import ElementwiseMul
from webdnn.graph.operators.elementwise_sum import ElementwiseSum
from webdnn.graph.operators.linear import Linear
from webdnn.graph.operators.lstm import LSTM
from webdnn.graph.operators.softmax import Softmax
from webdnn.graph.order import OrderNC, OrderC, OrderCN, OrderNTC, OrderNT
from webdnn.graph.traverse import dump_dot
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.graph.operators.embedding import Embedding
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.util import console
from net import ImageCaption
resnet_link = None
def get_resnet_io_variable(nn_input=None):
global resnet_link
if resnet_link is None:
resnet_link = chainer.links.model.vision.resnet.ResNet50Layers()
out_layer_name = "pool5"
if nn_input is None:
nn_input = chainer.Variable(
np.zeros((1, 3, 224, 224), dtype=np.float32))
nn_output = resnet_link(nn_input, layers=[out_layer_name])[out_layer_name]
return nn_input, nn_output
def generate_graph_model1(caption_net):
resnet_in, resnet_out = get_resnet_io_variable()
image_vec_out = caption_net.image_vec(resnet_out)
converter = ChainerConverter()
graph = converter.convert([resnet_in], [image_vec_out])
return graph
def _convert_lstm_to_webdnn_order(x):
# NOTE:
# In WebDNN, W_i, W_h, and b contains weights about input gate(v_i), forget gate(v_f) activated value(v_a), and output gate(v_o)
# based on this order (v_i, v_f, v_a, v_o). However in chainer, they are packed in different order (v_a, v_i, v_f, and v_o).
# Also, webdnn packs this weights as an tensor whose shape is (C1 or C2, 4, C2), but chainer packs as (C1 or C2, C2, 4)
# Based on webdnn/test/operators_test/lstm_test.py
# inverse conversion of x.reshape(x.shape[0], 4, x.shape[1] //
# 4).swapaxes(1, 2)[:, :, [2, 0, 1, 3]].reshape(x.shape)
return x.reshape(x.shape[0], x.shape[1] // 4, 4).swapaxes(1, 2)[:, [1, 2, 0, 3], :].reshape(x.shape)
def generate_graph_model2(caption_net, hidden_num):
# inputs
var_input_img = Variable([1, 1, hidden_num], OrderNTC)
var_input_word = Variable([1, 1], OrderNT)
var_switch_img = Variable([1, 1, hidden_num], OrderNTC)
var_switch_word = Variable([1, 1, hidden_num], OrderNTC)
var_last_h = Variable([1, hidden_num], OrderNC)
var_last_c = Variable([1, hidden_num], OrderNC)
# prepare for lstm
var_emb_word, = Embedding(None)(var_input_word, ConstantVariable(
caption_net.word_vec.W.data, OrderCN)) # OrderNTC
var_lstm_input = (var_emb_word * var_switch_word) + \
(var_input_img * var_switch_img)
# lstm
lstm_opr = LSTM(None, use_bias=True, return_sequences=False,
activation="tanh", recurrent_activation="sigmoid",
use_initial_h=True, use_initial_c=True)
w_input = _convert_lstm_to_webdnn_order(caption_net.lstm.upward.W.data.T)
w_hidden = _convert_lstm_to_webdnn_order(caption_net.lstm.lateral.W.data.T)
b = _convert_lstm_to_webdnn_order(
caption_net.lstm.upward.b.data[None, :])[0]
var_lstm_h, var_lstm_c = lstm_opr(x=var_lstm_input,
w_input=ConstantVariable(
w_input, OrderCN),
w_hidden=ConstantVariable(
w_hidden, OrderCN),
b=ConstantVariable(b, OrderC),
initial_h=var_last_h, initial_c=var_last_c)
# word probability
var_word_score, = Linear(None)(var_lstm_h, ConstantVariable(
caption_net.out_word.W.data.T, OrderCN))
var_word_score_biased, = AxiswiseBias(None, axis=Axis.C)(var_word_score,
ConstantVariable(caption_net.out_word.b.data, OrderC))
var_word_prob, = Softmax(None, axis=Axis.C)(var_word_score_biased)
return Graph([var_input_img, var_input_word, var_switch_img, var_switch_word, var_last_h, var_last_c],
[var_word_prob, var_lstm_h, var_lstm_c])
def generate_example_io(caption_net, word_ids, image_path):
import PIL.Image
input_img_feat = resnet_link.extract([PIL.Image.open(image_path)], layers=["pool5"])[
"pool5"] # Chainer.Variable(1, 2048)
caption_net.lstm.reset_state()
input_img_embedded = caption_net.image_vec(input_img_feat)
image_lstm_output = caption_net.lstm(input_img_embedded)
bos_raw_vec = chainer.Variable(
np.array([[word_ids["<S>"]]], dtype=np.int32))
bos_word_vec = caption_net.word_vec(bos_raw_vec)
bos_lstm_output = caption_net.lstm(bos_word_vec)
bos_word_score = caption_net.out_word(bos_lstm_output)
bos_word_prob = chainer.functions.softmax(bos_word_score)
return {"input_img_embedded": input_img_embedded.data.flatten().tolist(),
"image_lstm_output": image_lstm_output.data.flatten().tolist(),
"bos_raw_vec": bos_raw_vec.data.flatten().tolist(),
"bos_lstm_output": bos_lstm_output.data.flatten().tolist(),
"bos_word_prob": bos_word_prob.data.flatten().tolist()}
def main():
sys.setrecursionlimit(10000) # workaround for deep copying large graph
parser = argparse.ArgumentParser()
parser.add_argument("--backend", default="webgpu,webassembly")
parser.add_argument("--encoding", default="eightbit")
parser.add_argument('--out', '-o', default='webdnn/image-caption-model',
help='Directory to output the graph descriptor')
parser.add_argument('--sentence', '-s', required=True, type=str,
help='sentence dataset file path')
parser.add_argument('--model', '-m', required=True, type=str,
help='input model file path')
parser.add_argument("--example_image",
help="example image for comparing output")
parser.add_argument("--visualize_ir", action="store_true")
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
out_dir_graph1 = os.path.join(args.out, "image-feature")
out_dir_graph2 = os.path.join(args.out, "caption-generation")
hidden_num = 512
with open(args.sentence, 'rb') as f:
sentence_dataset = pickle.load(f)
word_ids = sentence_dataset['word_ids']
word_num = len(word_ids)
id_to_word = [""] * word_num
for k, v in word_ids.items():
id_to_word[v] = k
with open(os.path.join(args.out, "word_data.json"), "w") as f:
json.dump({"id_to_word": id_to_word,
"bos_id": word_ids["<S>"],
"eos_id": word_ids["</S>"],
"word_num": word_num,
"hidden_num": hidden_num}, f)
caption_net = ImageCaption(
word_num=word_num, feature_num=2048, hidden_num=hidden_num)
chainer.serializers.load_hdf5(args.model, caption_net)
graph1 = generate_graph_model1(caption_net)
graph2 = generate_graph_model2(caption_net, hidden_num=hidden_num)
if args.example_image:
example_io = generate_example_io(
caption_net, word_ids, args.example_image)
with open(os.path.join(args.out, "example_io.json"), "w") as f:
json.dump(example_io, f)
if args.visualize_ir:
ir_dot_path = os.path.join(args.out, "ir.dot")
with open(ir_dot_path, "w") as f:
f.write(dump_dot(graph2))
console.stderr(
f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'")
any_backend_failed = False
last_backend_exception = None
for backend in args.backend.split(","):
try:
graph_exec_data = generate_descriptor(
backend, graph1, constant_encoder_name=args.encoding)
graph_exec_data.save(out_dir_graph1)
graph_exec_data = generate_descriptor(
backend, graph2, constant_encoder_name=args.encoding)
graph_exec_data.save(out_dir_graph2)
except Exception as ex:
any_backend_failed = True
last_backend_exception = ex
console.error(
f"Failed generating descriptor for backend {backend}: {str(ex)}\n")
if any_backend_failed:
raise last_backend_exception
if __name__ == "__main__":
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', True):
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2020_06_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
|
"""
This file contains the plotting functionalities that are available for Pastas.
Examples
--------
ml.plot.decomposition()
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from pandas import DataFrame, Timestamp, concat
from scipy.stats import probplot
from .decorators import model_tmin_tmax
from .stats import acf
logger = logging.getLogger(__name__)
class Plotting:
def __init__(self, ml):
self.ml = ml # Store a reference to the model class
def __repr__(self):
msg = "This module contains all the built-in plotting options that " \
"are available."
return msg
@model_tmin_tmax
def plot(self, tmin=None, tmax=None, oseries=True, simulation=True,
ax=None, figsize=None, legend=True, **kwargs):
"""Make a plot of the observed and simulated series.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
oseries: bool, optional
True to plot the observed time series.
simulation: bool, optional
True to plot the simulated time series.
ax: Matplotlib.axes instance, optional
Axes to add the plot to.
figsize: tuple, optional
Tuple with the height and width of the figure in inches.
legend: bool, optional
Boolean to determine to show the legend (True) or not (False).
Returns
-------
ax: matplotlib.axes
matplotlib axes with the simulated and optionally the observed
timeseries.
"""
if ax is None:
_, ax = plt.subplots(figsize=figsize, **kwargs)
ax.set_title("Results of {}".format(self.ml.name))
if oseries:
o = self.ml.observations()
o_nu = self.ml.oseries.series.drop(o.index)
if not o_nu.empty:
# plot parts of the oseries that are not used in grey
o_nu.plot(linestyle='', marker='.', color='0.5', label='',
ax=ax)
o.plot(linestyle='', marker='.', color='k', ax=ax)
if simulation:
sim = self.ml.simulate(tmin=tmin, tmax=tmax)
sim.plot(ax=ax)
plt.xlim(tmin, tmax)
plt.ylabel("Groundwater levels [meter]")
if legend:
plt.legend()
plt.tight_layout()
return ax
@model_tmin_tmax
def results(self, tmin=None, tmax=None, figsize=(10, 8), split=False,
adjust_height=False, **kwargs):
"""Plot different results in one window to get a quick overview.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
figsize: tuple, optional
tuple of size 2 to determine the figure size in inches.
split: bool, optional
Split the stresses in multiple stresses when possible. Default is
True.
adjust_height: bool, optional
Adjust the height of the graphs, so that the vertical scale of all
the graphs on the left is equal
Returns
-------
matplotlib.axes
"""
# Number of rows to make the figure with
o = self.ml.observations()
sim = self.ml.simulate(tmin=tmin, tmax=tmax)
res = self.ml.residuals(tmin=tmin, tmax=tmax)
plot_noise = self.ml.settings["noise"] and self.ml.noisemodel
if plot_noise:
noise = self.ml.noise(tmin=tmin, tmax=tmax)
contribs = self.ml.get_contributions(split=split, tmin=tmin, tmax=tmax)
fig = plt.figure(figsize=figsize, **kwargs)
ylims = [(min([sim.min(), o[tmin:tmax].min()]),
max([sim.max(), o[tmin:tmax].max()]))]
if adjust_height:
if plot_noise:
ylims.append((min([res.min(), noise.min()]),
max([res.max(), noise.max()])))
else:
ylims.append((res.min(), res.max()))
for contrib in contribs:
hs = contrib[tmin:tmax]
if hs.empty:
if contrib.empty:
ylims.append((0.0, 0.0))
else:
ylims.append((contrib.min(), hs.max()))
else:
ylims.append((hs.min(), hs.max()))
hrs = get_height_ratios(ylims)
else:
hrs = [2] + [1] * (len(contribs) + 1)
nrows = len(contribs) + 2
gs = fig.add_gridspec(ncols=2, nrows=nrows, width_ratios=[2, 1],
height_ratios=hrs)
# Main frame
ax1 = fig.add_subplot(gs[0, 0])
o_nu = self.ml.oseries.series.drop(o.index)
if not o_nu.empty:
# plot parts of the oseries that are not used in grey
o_nu.plot(ax=ax1, linestyle='', marker='.', color='0.5', label='',
x_compat=True)
o.plot(ax=ax1, linestyle='', marker='.', color='k', x_compat=True)
# add evp to simulation
sim.name = '{} ($R^2$ = {:0.1f}%)'.format(
sim.name, self.ml.stats.evp(tmin=tmin, tmax=tmax))
sim.plot(ax=ax1, x_compat=True)
ax1.legend(loc=(0, 1), ncol=3, frameon=False)
ax1.set_ylim(ylims[0])
if adjust_height:
ax1.set_ylim(ylims[0])
ax1.grid(True)
# Residuals and noise
ax2 = fig.add_subplot(gs[1, 0], sharex=ax1)
res.plot(ax=ax2, color='k', x_compat=True)
if plot_noise:
noise.plot(ax=ax2, x_compat=True)
ax2.axhline(0.0, color='k', linestyle='--', zorder=0)
ax2.legend(loc=(0, 1), ncol=3, frameon=False)
if adjust_height:
ax2.grid(True)
# Stats frame
ax3 = fig.add_subplot(gs[0:2, 1])
ax3.set_title('Model Information', loc='left')
# Add a row for each stressmodel
i = 0
for sm_name in self.ml.stressmodels:
# get the step-response
step = self.ml.get_step_response(sm_name, add_0=True)
if i == 0:
rmax = step.index.max()
else:
rmax = max(rmax, step.index.max())
step_row = i + 2
# plot the contribution
sm = self.ml.stressmodels[sm_name]
nsplit = sm.get_nsplit()
if split and nsplit > 1:
for _ in range(nsplit):
ax = fig.add_subplot(gs[i + 2, 0], sharex=ax1)
contribs[i].plot(ax=ax, x_compat=True)
ax.legend(loc=(0, 1), ncol=3, frameon=False)
if adjust_height:
ax.set_ylim(ylims[2 + i])
ax.grid(True)
i = i + 1
else:
ax = fig.add_subplot(gs[i + 2, 0], sharex=ax1)
contribs[i].plot(ax=ax, x_compat=True)
title = [stress.name for stress in sm.stress]
if len(title) > 3:
title = title[:3] + ["..."]
plt.title("Stresses: %s" % title, loc="right")
ax.legend(loc=(0, 1), ncol=3, frameon=False)
if adjust_height:
ax.set_ylim(ylims[2 + i])
ax.grid(True)
i = i + 1
# plot the step-reponse
axb = fig.add_subplot(gs[step_row, 1])
step.plot(ax=axb)
if adjust_height:
axb.grid(True)
# xlim sets minorticks back after plots:
ax1.minorticks_off()
ax1.set_xlim(tmin, tmax)
axb.set_xlim(0, rmax)
fig.tight_layout(pad=0.0)
# Draw parameters table
parameters = self.ml.parameters.copy()
parameters['name'] = parameters.index
cols = ["name", "optimal", "stderr"]
parameters = parameters.loc[:, cols]
for name, vals in parameters.loc[:, cols].iterrows():
parameters.loc[name, "optimal"] = '{:.2f}'.format(vals.optimal)
stderr_perc = np.abs(np.divide(vals.stderr, vals.optimal) * 100)
parameters.loc[name, "stderr"] = '{:.1f}{}'.format(stderr_perc,
"\u0025")
ax3.axis('off')
# loc='upper center'
ax3.table(bbox=(0., 0., 1.0, 1.0), cellText=parameters.values,
colWidths=[0.5, 0.25, 0.25], colLabels=cols)
return fig.axes
@model_tmin_tmax
def decomposition(self, tmin=None, tmax=None, ytick_base=True, split=True,
figsize=(10, 8), axes=None, name=None,
return_warmup=False, min_ylim_diff=None, **kwargs):
"""Plot the decomposition of a time-series in the different stresses.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
ytick_base: Boolean or float, optional
Make the ytick-base constant if True, set this base to float if
float.
split: bool, optional
Split the stresses in multiple stresses when possible. Default is
True.
axes: matplotlib.Axes instance, optional
Matplotlib Axes instance to plot the figure on to.
figsize: tuple, optional
tuple of size 2 to determine the figure size in inches.
name: str, optional
Name to give the simulated time series in the legend.
return_warmup: bool, optional
Include the warmup period or not.
min_ylim_diff: float, optional
Float with the difference in the ylimits.
**kwargs: dict, optional
Optional arguments, passed on to the plt.subplots method.
Returns
-------
axes: list of matplotlib.axes
"""
o = self.ml.observations()
# determine the simulation
sim = self.ml.simulate(tmin=tmin, tmax=tmax,
return_warmup=return_warmup)
if name is not None:
sim.name = name
# determine the influence of the different stresses
contribs = self.ml.get_contributions(split=split, tmin=tmin, tmax=tmax,
return_warmup=return_warmup)
names = [s.name for s in contribs]
if self.ml.transform:
contrib = self.ml.get_transform_contribution(tmin=tmin, tmax=tmax)
contribs.append(contrib)
names.append(self.ml.transform.name)
# determine ylim for every graph, to scale the height
ylims = [(min([sim.min(), o[tmin:tmax].min()]),
max([sim.max(), o[tmin:tmax].max()]))]
for contrib in contribs:
hs = contrib[tmin:tmax]
if hs.empty:
if contrib.empty:
ylims.append((0.0, 0.0))
else:
ylims.append((contrib.min(), hs.max()))
else:
ylims.append((hs.min(), hs.max()))
if min_ylim_diff is not None:
for i, ylim in enumerate(ylims):
if np.diff(ylim) < min_ylim_diff:
ylims[i] = (np.mean(ylim) - min_ylim_diff / 2,
np.mean(ylim) + min_ylim_diff / 2)
# determine height ratios
height_ratios = get_height_ratios(ylims)
nrows = len(contribs) + 1
if axes is None:
# open a new figure
gridspec_kw = {'height_ratios': height_ratios}
fig, axes = plt.subplots(nrows, sharex=True, figsize=figsize,
gridspec_kw=gridspec_kw, **kwargs)
axes = np.atleast_1d(axes)
o_label = o.name
set_axes_properties = True
else:
if len(axes) != nrows:
msg = 'Makes sure the number of axes equals the number of ' \
'series'
raise Exception(msg)
fig = axes[0].figure
o_label = ''
set_axes_properties = False
# plot simulation and observations in top graph
o_nu = self.ml.oseries.series.drop(o.index)
if not o_nu.empty:
# plot parts of the oseries that are not used in grey
o_nu.plot(linestyle='', marker='.', color='0.5', label='',
markersize=2, ax=axes[0], x_compat=True)
o.plot(linestyle='', marker='.', color='k', label=o_label,
markersize=3, ax=axes[0], x_compat=True)
sim.plot(ax=axes[0], x_compat=True)
if set_axes_properties:
axes[0].set_title('observations vs. simulation')
axes[0].set_ylim(ylims[0])
axes[0].grid(True)
axes[0].legend(ncol=3, frameon=False)
if ytick_base and set_axes_properties:
if isinstance(ytick_base, bool):
# determine the ytick-spacing of the top graph
yticks = axes[0].yaxis.get_ticklocs()
if len(yticks) > 1:
ytick_base = yticks[1] - yticks[0]
else:
ytick_base = None
axes[0].yaxis.set_major_locator(
MultipleLocator(base=ytick_base))
# plot the influence of the stresses
for i, contrib in enumerate(contribs):
ax = axes[i + 1]
contrib.plot(ax=ax, x_compat=True)
if set_axes_properties:
if ytick_base:
# set the ytick-spacing equal to the top graph
locator = MultipleLocator(base=ytick_base)
ax.yaxis.set_major_locator(locator)
ax.set_title(names[i])
ax.set_ylim(ylims[i + 1])
ax.grid(True)
ax.minorticks_off()
if set_axes_properties:
axes[0].set_xlim(tmin, tmax)
fig.tight_layout(pad=0.0)
return axes
@model_tmin_tmax
def diagnostics(self, tmin=None, tmax=None, figsize=(10, 8), **kwargs):
"""Plot a window that helps in diagnosing basic model assumptions.
Parameters
----------
tmin
tmax
figsize: tuple, optional
Tuple with the height and width of the figure in inches.
Returns
-------
axes: list of matplotlib.axes
"""
if self.ml.settings["noise"]:
res = self.ml.noise(tmin=tmin, tmax=tmax)
else:
res = self.ml.residuals(tmin=tmin, tmax=tmax)
fig = plt.figure(figsize=figsize, **kwargs)
shape = (2, 3)
ax = plt.subplot2grid(shape, (0, 0), colspan=2, rowspan=1)
ax.set_title(res.name)
res.plot(ax=ax)
ax1 = plt.subplot2grid(shape, (1, 0), colspan=2, rowspan=1)
ax1.set_ylabel('Autocorrelation')
conf = 1.96 / np.sqrt(res.index.size)
r = acf(res)
ax1.axhline(conf, linestyle='--', color="dimgray")
ax1.axhline(-conf, linestyle='--', color="dimgray")
ax1.stem(r.index, r.values, basefmt="gray")
ax1.set_xlim(r.index.min(), r.index.max())
ax1.set_xlabel("Lag (Days)")
ax2 = plt.subplot2grid(shape, (0, 2), colspan=1, rowspan=1)
res.hist(bins=20, ax=ax2)
ax3 = plt.subplot2grid(shape, (1, 2), colspan=1, rowspan=1)
probplot(res, plot=ax3, dist="norm", rvalue=True)
c = ax.get_lines()[0].get_color()
ax3.get_lines()[0].set_color(c)
fig.tight_layout(pad=0.0)
return fig.axes
def block_response(self, stressmodels=None, ax=None, figsize=None,
**kwargs):
"""Plot the block response for a specific stressmodels.
Parameters
----------
stressmodels: list, optional
List with the stressmodels to plot the block response for.
ax: Matplotlib.axes instance, optional
Axes to add the plot to.
figsize: tuple, optional
Tuple with the height and width of the figure in inches.
Returns
-------
matplotlib.axes
matplotlib axes instance.
"""
if ax is None:
_, ax = plt.subplots(figsize=figsize, **kwargs)
if not stressmodels:
stressmodels = self.ml.stressmodels.keys()
legend = []
for name in stressmodels:
if hasattr(self.ml.stressmodels[name], 'rfunc'):
self.ml.get_block_response(name).plot(ax=ax)
legend.append(name)
else:
logger.warning("Stressmodel {} not in stressmodels "
"list.".format(name))
plt.xlim(0)
plt.xlabel("Time [days]")
plt.legend(legend)
return ax
def step_response(self, stressmodels=None, ax=None, figsize=None,
**kwargs):
"""Plot the block response for a specific stressmodels.
Parameters
----------
stressmodels: list, optional
List with the stressmodels to plot the block response for.
Returns
-------
matplotlib.axes
matplotlib axes instance.
"""
if ax is None:
_, ax = plt.subplots(figsize=figsize, **kwargs)
if not stressmodels:
stressmodels = self.ml.stressmodels.keys()
legend = []
for name in stressmodels:
if hasattr(self.ml.stressmodels[name], 'rfunc'):
self.ml.get_step_response(name).plot(ax=ax)
legend.append(name)
else:
logger.warning("Stressmodel {} not in stressmodels "
"list.".format(name))
plt.xlim(0)
plt.xlabel("Time [days]")
plt.legend(legend)
return ax
@model_tmin_tmax
def stresses(self, tmin=None, tmax=None, cols=1, split=True, sharex=True,
figsize=(10, 8), **kwargs):
"""This method creates a graph with all the stresses used in the
model.
Parameters
----------
tmin
tmax
cols: int
number of columns used for plotting.
split: bool, optional
Split the stress
sharex: bool, optional
Sharex the x-axis.
figsize: tuple, optional
Tuple with the height and width of the figure in inches.
Returns
-------
axes: matplotlib.axes
matplotlib axes instance.
"""
stresses = []
for name in self.ml.stressmodels.keys():
nstress = len(self.ml.stressmodels[name].stress)
if split and nstress > 1:
for istress in range(nstress):
stress = self.ml.get_stress(name, istress=istress)
stresses.append(stress)
else:
stress = self.ml.get_stress(name)
if isinstance(stress, list):
stresses.extend(stress)
else:
stresses.append(stress)
rows = len(stresses)
rows = -(-rows // cols) # round up with out additional import
fig, axes = plt.subplots(rows, cols, sharex=sharex, figsize=figsize,
**kwargs)
if hasattr(axes, "flatten"):
axes = axes.flatten()
else:
axes = [axes]
for ax, stress in zip(axes, stresses):
stress.plot(ax=ax)
ax.legend([stress.name], loc=2)
plt.xlim(tmin, tmax)
fig.tight_layout(pad=0.0)
return axes
@model_tmin_tmax
def contributions_pie(self, tmin=None, tmax=None, ax=None,
figsize=None, split=True, partition='std',
wedgeprops=None, startangle=90,
autopct='%1.1f%%', **kwargs):
"""Make a pie chart of the contributions. This plot is based on the
TNO Groundwatertoolbox.
Parameters
----------
tmin: str or pandas.Timestamp, optional.
tmax: str or pandas.Timestamp, optional.
ax: matplotlib.axes, optional
Axes to plot the pie chart on. A new figure and axes will be
created of not providided.
figsize: tuple, optional
tuple of size 2 to determine the figure size in inches.
split: bool, optional
Split the stresses in multiple stresses when possible.
partition : str
statistic to use to determine contribution of stress, either
'sum' or 'std' (default).
wedgeprops: dict, optional, default None
dict containing pie chart wedge properties, default is None,
which sets edgecolor to white.
startangle: float
at which angle to start drawing wedges
autopct: str
format string to add percentages to pie chart
kwargs: dict, optional
The keyword arguments are passed on to plt.pie.
Returns
-------
ax: matplotlib.axes
"""
if ax is None:
_, ax = plt.subplots(figsize=figsize)
contribs = self.ml.get_contributions(split=split, tmin=tmin, tmax=tmax)
if partition == 'sum':
# the part of each pie is determined by the sum of the contribution
frac = [np.abs(contrib).sum() for contrib in contribs]
elif partition == 'std':
# the part of each pie is determined by the std of the contribution
frac = [contrib.std() for contrib in contribs]
else:
msg = 'Unknown value for partition: {}'.format(partition)
raise (Exception(msg))
# make sure the unexplained part is 100 - evp %
evp = self.ml.stats.evp(tmin=tmin, tmax=tmax) / 100
frac = np.array(frac) / sum(frac) * evp
frac = np.append(frac, 1 - evp)
if 'labels' not in kwargs:
labels = [contrib.name for contrib in contribs]
labels.append("Unexplained")
kwargs['labels'] = labels
if wedgeprops is None:
wedgeprops = {'edgecolor': 'w'}
ax.pie(frac, wedgeprops=wedgeprops, startangle=startangle,
autopct=autopct, **kwargs)
ax.axis('equal')
return ax
@model_tmin_tmax
def stacked_results(self, tmin=None, tmax=None, figsize=(10, 8), **kwargs):
"""Create a results plot, similar to `ml.plots.results()`, in which
the individual contributions of stresses (in stressmodels with multiple
stresses) are stacked.
Note: does not plot the individual contributions of StressModel2
Parameters
----------
tmin : str or pandas.Timestamp, optional
tmax : str or pandas.Timestamp, optional
figsize : tuple, optional
Returns
-------
axes: list of axes objects
"""
# %% Contribution per stress on model results plot
def custom_sort(t):
"""Sort by mean contribution"""
return t[1].mean()
# Create standard results plot
axes = self.ml.plots.results(tmin=tmin, tmax=tmax, figsize=figsize,
**kwargs)
nsm = len(self.ml.stressmodels)
# loop over axes showing stressmodel contributions
for i, sm in zip(range(3, 3 + 2 * nsm, 2),
self.ml.stressmodels.keys()):
# Get the contributions for StressModels with multiple
# stresses
contributions = []
sml = self.ml.stressmodels[sm]
if (len(sml.stress) > 0) and (sml._name != "StressModel2"):
nsplit = sml.get_nsplit()
if nsplit > 1:
for istress in range(len(sml.stress)):
h = self.ml.get_contribution(sm, istress=istress)
name = sml.stress[istress].name
if name is None:
name = sm
contributions.append((name, h))
else:
h = self.ml.get_contribution(sm)
name = sm
contributions.append((name, h))
contributions.sort(key=custom_sort)
# add stacked plot to correct axes
ax = axes[i]
del ax.lines[0] # delete existing line
contrib = [c[1] for c in contributions] # get timeseries
vstack = concat(contrib, axis=1)
names = [c[0] for c in contributions] # get names
ax.stackplot(vstack.index, vstack.values.T, labels=names)
ax.legend(loc="best", ncol=5, fontsize=8)
return axes
class TrackSolve:
""" Track and visualize optimization progress for pastas models.
Parameters
----------
ml : pastas.Model
pastas Model to set up tracking for
tmin : str or pandas.Timestamp, optional
start time for simulation, by default None which
defaults to first index in ml.oseries.series
tmax : str or pandas.Timestamp, optional
end time for simulation, by default None which
defaults to last index in ml.oseries.series
update_iter : int, optional
update plot every update_iter iterations,
by default 1
Notes
-----
- Requires a matplotlib backend that supports interactive
plotting, i.e. mpl.use("TkAgg").
- Some possible speedups on the matplotlib side:
- mpl.style.use("fast")
- mpl.rcParams['path.simplify_threshold'] = 1.0
- Since only parameters are passed to callback function in ml.solve,
everything else passed to ml.solve must be known beforehand(?). This means
if the tmin/tmax are passed in ml.solve() and not to TrackSolve(), the
resulting plot will not correctly represent the statistics of the
optimization.
- TODO: check if more information passed to solve can be picked up
from the model object instead of having to pass to TrackSolve.
- TODO: check if statistics are calculated correctly as compared to
results from ml.solve().
- TODO: check if animation can be sped up somehow.
- TODO: check what the relationship is between no. of iterations
and the LeastSquares nfev and njev values. Model fit is only updated
every few iterations ( = nparams?). Perhaps only update figure when
fit and parameter values actually change?
Examples
--------
Create a TrackSolve object for your model:
>>> track = TrackSolve(ml)
Initialize figure:
>>> fig = track.initialize_figure()
Solve model and pass track.update_figure as callback function:
>>> ml.solve(callback=track.update_figure)
"""
def __init__(self, ml, tmin=None, tmax=None, update_iter=None):
logger.warning("TrackSolve feature under development. If you find any "
"bugs please post an issue on GitHub: "
"https://github.com/pastas/pastas/issues")
self.ml = ml
self.viewlim = 75 # no of iterations on axes by default
if update_iter is None:
self.update_iter = \
len(self.ml.parameters.loc[self.ml.parameters.vary].index)
else:
self.update_iter = update_iter # update plot every update_iter
# get tmin/tmax
if tmin is None:
self.tmin = self.ml.oseries.series.index[0]
else:
self.tmin = Timestamp(tmin)
if tmax is None:
self.tmax = self.ml.oseries.series.index[-1]
else:
self.tmax = Timestamp(tmax)
# parameters
self.parameters = DataFrame(columns=self.ml.parameters.index)
self.parameters.loc[0] = self.ml.parameters.initial.values
# iteration counter
self.itercount = 0
# calculate RMSE residuals
res = self._residuals(self.ml.parameters.initial.values)
r_rmse = np.sqrt(np.sum(res ** 2))
self.rmse_res = np.array([r_rmse])
# calculate RMSE noise
if self.ml.noisemodel is not None:
noise = self._noise(self.ml.parameters.initial.values)
n_rmse = np.sqrt(np.sum(noise ** 2))
self.rmse_noise = np.array([n_rmse])
# get observations
self.obs = self.ml.observations(tmin=self.tmin,
tmax=self.tmax)
# calculate EVP
self.evp = self._calc_evp(res.values, self.obs.values)
def _append_params(self, params):
"""Append parameters to self.parameters DataFrame and
update itercount, rmse values and evp.
Parameters
----------
params : np.array
array containing parameters
"""
# update itercount
self.itercount += 1
# add parameters to DataFrame
self.parameters.loc[self.itercount,
self.ml.parameters.index] = params.copy()
# calculate new RMSE values
r_res = self._residuals(params)
self.rmse_res = np.r_[self.rmse_res, np.sqrt(np.sum(r_res ** 2))]
if self.ml.noisemodel is not None:
n_res = self._noise(params)
self.rmse_noise = np.r_[
self.rmse_noise, np.sqrt(np.sum(n_res ** 2))]
# recalculate EVP
self.evp = self._calc_evp(r_res.values, self.obs.values)
def _update_axes(self):
"""extend xlim if no. of iterations exceeds
current window.
"""
for iax in self.axes[1:]:
iax.set_xlim(right=self.viewlim)
self.fig.canvas.draw()
def _update_settings(self):
self.tmin = self.ml.settings["tmin"]
self.tmax = self.ml.settings["tmax"]
self.freq = self.ml.settings["freq"]
@staticmethod
def _calc_evp(res, obs):
""" calculate evp
"""
if obs.var() == 0.0:
evp = 1.
else:
evp = max(0.0, (1 - (res.var(ddof=0) / obs.var(ddof=0))))
return evp
def _noise(self, params):
"""get noise
Parameters
----------
params : np.array
array containing parameters
Returns
-------
noise: np.array
array containing noise
"""
noise = self.ml.noise(parameters=params, tmin=self.tmin,
tmax=self.tmax)
return noise
def _residuals(self, params):
"""calculate residuals
Parameters
----------
params : np.array
array containing parameters
Returns
-------
res: np.array
array containing residuals
"""
res = self.ml.residuals(parameters=params, tmin=self.tmin,
tmax=self.tmax)
return res
def _simulate(self):
"""simulate model with last entry in self.parameters
Returns
-------
sim: pd.Series
series containing model evaluation
"""
sim = self.ml.simulate(parameters=self.parameters.iloc[-1, :].values,
tmin=self.tmin, tmax=self.tmax,
freq=self.ml.settings["freq"])
return sim
def initialize_figure(self, figsize=(10, 8), dpi=100):
"""Initialize figure for plotting optimization progress.
Parameters
----------
figsize : tuple, optional
figure size, passed to plt.subplots(), by default (10, 8)
dpi : int, optional
dpi of the figure passed to plt.subplots(), by default 100
Returns
-------
fig: matplotlib.pyplot.Figure
handle to the figure
"""
# create plot
self.fig, self.axes = plt.subplots(3, 1, figsize=(10, 8), dpi=100)
self.ax0, self.ax1, self.ax2 = self.axes
# plot oseries
self.obs.plot(marker=".", ls="none", label="observations",
color="k", ms=4, x_compat=True, ax=self.ax0)
# plot simulation
sim = self._simulate()
self.simplot, = self.ax0.plot(sim.index, sim, label="model")
self.ax0.set_ylabel("oseries/model")
self.ax0.set_title(
"Iteration: {0} (EVP: {1:.2%})".format(self.itercount,
self.evp))
self.ax0.legend(loc="lower right")
# plot RMSE (residuals and/or residuals)
legend_handles = []
self.r_rmse_plot_line, = self.ax1.plot(
range(self.itercount + 1), self.rmse_res, c="k", ls="solid",
label="Residuals")
self.r_rmse_plot_dot, = self.ax1.plot(
self.itercount, self.rmse_res[-1], c="k", marker="o", ls="none")
legend_handles.append(self.r_rmse_plot_line)
self.ax1.set_xlim(0, self.viewlim)
self.ax1.set_ylim(0, 1.05 * self.rmse_res[-1])
self.ax1.set_ylabel("RMSE")
if self.ml.noisemodel is not None:
self.n_rmse_plot_line, = self.ax1.plot(
range(self.itercount + 1), self.rmse_noise, c="C0", ls="solid",
label="Noise")
self.n_rmse_plot_dot, = self.ax1.plot(
self.itercount, self.rmse_res[-1], c="C0", marker="o",
ls="none")
legend_handles.append(self.n_rmse_plot_line)
legend_labels = [i.get_label() for i in legend_handles]
self.ax1.legend(legend_handles, legend_labels, loc="upper right")
# plot parameters values on semilogy
plt.sca(self.ax2)
plt.yscale("log")
self.param_plot_handles = []
legend_handles = []
for pname, row in self.ml.parameters.iterrows():
pa, = self.ax2.plot(
range(self.itercount + 1), np.abs(row.initial), marker=".",
ls="none", label=pname)
pb, = self.ax2.plot(range(self.itercount + 1),
np.abs(row.initial), ls="solid",
c=pa.get_color())
self.param_plot_handles.append((pa, pb))
legend_handles.append(pa)
legend_labels = [i.get_label() for i in legend_handles]
self.ax2.legend(legend_handles, legend_labels, loc="lower right",
ncol=3)
self.ax2.set_xlim(0, self.viewlim)
self.ax2.set_ylim(1e-6, 1e5)
self.ax2.set_ylabel("Parameter values")
self.ax2.set_xlabel("Iteration")
# set grid for each plot
for iax in [self.ax0, self.ax1, self.ax2]:
iax.grid(b=True)
self.fig.tight_layout()
return self.fig
def update_figure(self, params):
"""Method to update figure while model is being solved. Pass this
method to ml.solve(), e.g.:
>>> track = TrackSolve(ml)
>>> fig = track.initialize_figure()
>>> ml.solve(callback=track.update_figure)
Parameters
----------
params : np.array
array containing parameters
"""
# update parameters
self._append_params(params)
# update settings from ml.settings
self._update_settings()
# check if figure should be updated
if self.itercount % self.update_iter != 0:
return
# update view limits if needed
if self.itercount >= self.viewlim:
self.viewlim += 50
self._update_axes()
# update simulation
sim = self._simulate()
self.simplot.set_data(sim.index, sim.values)
# update rmse residuals
self.r_rmse_plot_line.set_data(
range(self.itercount + 1), np.array(self.rmse_res))
self.r_rmse_plot_dot.set_data(
np.array([self.itercount]), np.array(self.rmse_res[-1]))
# update rmse noise
self.n_rmse_plot_line.set_data(
range(self.itercount + 1), np.array(self.rmse_noise))
self.n_rmse_plot_dot.set_data(
np.array([self.itercount]), np.array(self.rmse_noise[-1]))
# update parameter plots
for j, (p1, p2) in enumerate(self.param_plot_handles):
p1.set_data(np.array([self.itercount]),
np.abs(self.parameters.iloc[-1, j]))
p2.set_data(range(self.itercount + 1),
self.parameters.iloc[:, j].abs().values)
# update title
self.ax0.set_title(
"Iteration: {0} (EVP: {1:.2%})".format(self.itercount,
self.evp))
self.fig.canvas.draw()
def get_height_ratios(ylims):
height_ratios = []
for ylim in ylims:
hr = ylim[1] - ylim[0]
if np.isnan(hr):
hr = 0.0
height_ratios.append(hr)
return height_ratios
|
|
#!/usr/bin/python
from collections import defaultdict
from NodeGraphQt import QtWidgets, QtCore, QtGui
from NodeGraphQt.constants import (NODE_PROP_QLABEL,
NODE_PROP_QLINEEDIT,
NODE_PROP_QCOMBO,
NODE_PROP_QCHECKBOX,
NODE_PROP_QSPINBOX,
NODE_PROP_COLORPICKER,
NODE_PROP_SLIDER)
from NodeGraphQt.errors import NodePropertyError
class BaseProperty(QtWidgets.QWidget):
value_changed = QtCore.Signal(str, object)
def set_value(self, value):
raise NotImplementedError
def get_value(self):
raise NotImplementedError
class _ColorSolid(QtWidgets.QWidget):
def __init__(self, parent=None, color=None):
super(_ColorSolid, self).__init__(parent)
self.setMinimumSize(15, 15)
self.setMaximumSize(15, 15)
self.color = color or (0, 0, 0)
def paintEvent(self, event):
size = self.geometry()
rect = QtCore.QRect(1, 1, size.width() - 2, size.height() - 2)
painter = QtGui.QPainter(self)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtGui.QColor(*self._color))
painter.drawRoundedRect(rect, 4, 4)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = color
hex = '#{0:02x}{1:02x}{2:02x}'.format(*self._color)
self.setToolTip('rgb: {}\nhex: {}'.format(self._color[0:3], hex))
self.update()
class PropColorPicker(BaseProperty):
def __init__(self, parent=None):
super(PropColorPicker, self).__init__(parent)
self._solid = _ColorSolid(self)
self._solid.setMaximumHeight(15)
self._label = QtWidgets.QLabel()
self._update_label()
button = QtWidgets.QPushButton('select color')
button.clicked.connect(self._on_select_color)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 8, 0)
layout.setSpacing(4)
layout.addWidget(self._solid, 0, QtCore.Qt.AlignCenter)
layout.addWidget(self._label, 0, QtCore.Qt.AlignCenter)
layout.addWidget(button, 1, QtCore.Qt.AlignLeft)
def _on_select_color(self):
color = QtWidgets.QColorDialog.getColor(QtGui.QColor(*self.get_value()))
if color.isValid():
self.set_value(color.getRgb())
def _update_label(self):
self._label.setStyleSheet(
'QLabel {{color: rgba({}, {}, {}, 255);}}'
.format(*self._solid.color))
self._label.setText(self.hex_color())
self._label.setAlignment(QtCore.Qt.AlignCenter)
self._label.setMinimumWidth(60)
def hex_color(self):
return '#{0:02x}{1:02x}{2:02x}'.format(*self._solid.color)
def get_value(self):
return self._solid.color
def set_value(self, value):
if value != self.get_value():
self._solid.color = value
self._update_label()
self.value_changed.emit(self.toolTip(), value)
class PropSlider(BaseProperty):
def __init__(self, parent=None):
super(PropSlider, self).__init__(parent)
self._block = False
self._slider = QtWidgets.QSlider()
self._spnbox = QtWidgets.QSpinBox()
self._init()
def _init(self):
self._slider.setOrientation(QtCore.Qt.Horizontal)
self._slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self._slider.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Preferred)
self._spnbox.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(self._spnbox)
layout.addWidget(self._slider)
self._spnbox.valueChanged.connect(self._on_spnbox_changed)
self._slider.valueChanged.connect(self._on_slider_changed)
# store the original press event.
self._slider_press_event = self._slider.mousePressEvent
self._slider.mousePressEvent = self.sliderMousePressEvent
self._slider.mouseReleaseEvent = self.sliderMouseReleaseEvent
def sliderMousePressEvent(self, event):
self._block = True
self._slider_press_event(event)
def sliderMouseReleaseEvent(self, event):
self.value_changed.emit(self.toolTip(), self.get_value())
self._block = False
def _on_slider_changed(self, value):
self._spnbox.setValue(value)
def _on_spnbox_changed(self, value):
if value != self._slider.value():
self._slider.setValue(value)
if not self._block:
self.value_changed.emit(self.toolTip(), self.get_value())
def get_value(self):
return self._spnbox.value()
def set_value(self, value):
if value != self.get_value():
self._block = True
self._spnbox.setValue(value)
self.value_changed.emit(self.toolTip(), value)
self._block = False
def set_min(self, value=0):
self._spnbox.setMinimum(value)
self._slider.setMinimum(value)
def set_max(self, value=0):
self._spnbox.setMaximum(value)
self._slider.setMaximum(value)
class PropLabel(QtWidgets.QLabel):
value_changed = QtCore.Signal(str, object)
def get_value(self):
return self.text()
def set_value(self, value):
if value != self.get_value():
self.setText(value)
self.value_changed.emit(self.toolTip(), value)
class PropLineEdit(QtWidgets.QLineEdit):
value_changed = QtCore.Signal(str, object)
def __init__(self, parent=None):
super(PropLineEdit, self).__init__(parent)
self.returnPressed.connect(self._on_return_pressed)
def _on_return_pressed(self):
self.value_changed.emit(self.toolTip(), self.get_value())
def get_value(self):
return self.text()
def set_value(self, value):
if value != self.get_value():
self.setText(value)
self.value_changed.emit(self.toolTip(), value)
class PropComboBox(QtWidgets.QComboBox):
value_changed = QtCore.Signal(str, object)
def __init__(self, parent=None):
super(PropComboBox, self).__init__(parent)
self.currentIndexChanged.connect(self._on_index_changed)
def _on_index_changed(self):
self.value_changed.emit(self.toolTip(), self.get_value())
def items(self):
return [self.itemText(i) for i in range(self.count())]
def set_items(self, items):
self.clear()
self.addItems(items)
def get_value(self):
return self.currentText()
def set_value(self, value):
if value != self.get_value():
idx = self.findText(value, QtCore.Qt.MatchExactly)
self.setCurrentIndex(idx)
if idx >= 0:
self.value_changed.emit(self.toolTip(), value)
class PropCheckBox(QtWidgets.QCheckBox):
value_changed = QtCore.Signal(str, object)
def __init__(self, parent=None):
super(PropCheckBox, self).__init__(parent)
self.clicked.connect(self._on_clicked)
def _on_clicked(self):
self.value_changed.emit(self.toolTip(), self.get_value())
def get_value(self):
return self.isChecked()
def set_value(self, value):
if value != self.get_value():
self.setChecked(value)
self.value_changed.emit(self.toolTip(), value)
class PropSpinBox(QtWidgets.QSpinBox):
value_changed = QtCore.Signal(str, object)
def __init__(self, parent=None):
super(PropSpinBox, self).__init__(parent)
self.setButtonSymbols(self.NoButtons)
self.valueChanged.connect(self._on_value_change)
def _on_value_change(self, value):
self.value_changed.emit(self.toolTip(), value)
def get_value(self):
return self.value()
def set_value(self, value):
if value != self.get_value():
self.setValue(value)
WIDGET_MAP = {
NODE_PROP_QLABEL: PropLabel,
NODE_PROP_QLINEEDIT: PropLineEdit,
NODE_PROP_QCOMBO: PropComboBox,
NODE_PROP_QCHECKBOX: PropCheckBox,
NODE_PROP_QSPINBOX: PropSpinBox,
NODE_PROP_COLORPICKER: PropColorPicker,
NODE_PROP_SLIDER: PropSlider,
}
# main property widgets.
class PropWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super(PropWindow, self).__init__(parent)
self.__layout = QtWidgets.QGridLayout()
self.__layout.setColumnStretch(1, 1)
self.__layout.setSpacing(6)
layout = QtWidgets.QVBoxLayout(self)
layout.setAlignment(QtCore.Qt.AlignTop)
layout.addLayout(self.__layout)
def __repr__(self):
return '<PropWindow object at {}>'.format(hex(id(self)))
def add_widget(self, name, widget, value, label=None):
"""
Add a property widget to the window.
Args:
name (str): property name to be displayed.
widget (BaseProperty): property widget.
value (object): property value.
label (str): custom label to display.
"""
widget.setToolTip(name)
widget.set_value(value)
if label is None:
label = name
row = self.__layout.rowCount()
if row > 0:
row += 1
self.__layout.addWidget(QtWidgets.QLabel(label), row, 0,
QtCore.Qt.AlignCenter | QtCore.Qt.AlignRight)
self.__layout.addWidget(widget, row, 1)
def get_widget(self, name):
"""
Returns the property widget from the name.
Args:
name (str): property name.
Returns:
QtWidgets.QWidget: property widget.
"""
for row in range(self.__layout.rowCount()):
item = self.__layout.itemAtPosition(row, 1)
if item and name == item.widget().toolTip():
return item.widget()
class NodePropWidget(QtWidgets.QWidget):
"""
Node properties widget for display a Node object.
Args:
parent:
node (NodeGraphQt.Node): node.
"""
#: signal (node_id, prop_name, prop_value)
property_changed = QtCore.Signal(str, str, object)
property_closed = QtCore.Signal(str)
def __init__(self, parent=None, node=None):
super(NodePropWidget, self).__init__(parent)
self.__node_id = node.id
self.__tab_windows = {}
self.__tab = QtWidgets.QTabWidget()
close_btn = QtWidgets.QPushButton('X')
close_btn.setToolTip('close property')
close_btn.clicked.connect(self._on_close)
self.name_wgt = PropLineEdit()
self.name_wgt.setToolTip('name')
self.name_wgt.set_value(node.name())
self.name_wgt.value_changed.connect(self._on_property_changed)
self.type_wgt = QtWidgets.QLabel(node.type_)
self.type_wgt.setAlignment(QtCore.Qt.AlignRight)
self.type_wgt.setToolTip('type_')
font = self.type_wgt.font()
font.setPointSize(10)
self.type_wgt.setFont(font)
name_layout = QtWidgets.QHBoxLayout()
name_layout.setContentsMargins(0, 0, 0, 0)
name_layout.addWidget(QtWidgets.QLabel('name'))
name_layout.addWidget(self.name_wgt)
name_layout.addWidget(close_btn)
layout = QtWidgets.QVBoxLayout(self)
layout.setSpacing(4)
layout.addLayout(name_layout)
layout.addWidget(self.__tab)
layout.addWidget(self.type_wgt)
self._read_node(node)
def __repr__(self):
return '<NodePropWidget object at {}>'.format(hex(id(self)))
def _on_close(self):
"""
called by the close button.
"""
self.property_closed.emit(self.__node_id)
def _on_property_changed(self, name, value):
"""
slot function called when a property widget has changed.
Args:
name (str): property name.
value (object): new value.
"""
self.property_changed.emit(self.__node_id, name, value)
def _read_node(self, node):
"""
Populate widget from a node.
Args:
node (NodeGraphQt.Node): node class.
"""
model = node.model
graph_model = node.graph.model
common_props = graph_model.get_node_common_properties(node.type_)
# sort tabs and properties.
tab_mapping = defaultdict(list)
for prop_name, prop_val in model.custom_properties.items():
tab_name = model.get_tab_name(prop_name)
tab_mapping[tab_name].append((prop_name, prop_val))
# add tabs.
for tab in sorted(tab_mapping.keys()):
if tab != 'Node':
self.add_tab(tab)
# populate tab properties.
for tab in sorted(tab_mapping.keys()):
prop_window = self.__tab_windows[tab]
for prop_name, value in tab_mapping[tab]:
wid_type = model.get_widget_type(prop_name)
WidClass = WIDGET_MAP.get(wid_type)
widget = WidClass()
if prop_name in common_props.keys():
if 'items' in common_props[prop_name].keys():
widget.set_items(common_props[prop_name]['items'])
if 'range' in common_props[prop_name].keys():
prop_range = common_props[prop_name]['range']
widget.set_min(prop_range[0])
widget.set_max(prop_range[1])
prop_window.add_widget(prop_name, widget, value)
widget.value_changed.connect(self._on_property_changed)
# add "Node" tab properties.
self.add_tab('Node')
default_props = ['color', 'text_color', 'disabled', 'id']
prop_window = self.__tab_windows['Node']
for prop_name in default_props:
wid_type = model.get_widget_type(prop_name)
WidClass = WIDGET_MAP.get(wid_type)
widget = WidClass()
prop_window.add_widget(prop_name,
widget,
model.get_property(prop_name))
widget.value_changed.connect(self._on_property_changed)
self.type_wgt.setText(model.get_property('type_'))
def node_id(self):
"""
Returns the node id linked to the widget.
Returns:
str: node id
"""
return self.__node_id
def add_widget(self, name, widget, tab='Properties'):
"""
add new node property widget.
Args:
name (str): property name.
widget (BaseProperty): property widget.
tab (str): tab name.
"""
if tab not in self._widgets.keys():
tab = 'Properties'
window = self.__tab_windows[tab]
window.add_widget(name, widget)
widget.value_changed.connect(self._on_property_changed)
def add_tab(self, name):
"""
add a new tab.
Args:
name (str): tab name.
Returns:
PropWindow: tab child widget.
"""
if name in self.__tab_windows.keys():
raise AssertionError('Tab name {} already taken!'.format(name))
self.__tab_windows[name] = PropWindow(self)
self.__tab.addTab(self.__tab_windows[name], name)
return self.__tab_windows[name]
def get_widget(self, name):
"""
get property widget.
Args:
name (str): property name.
Returns:
QtWidgets.QWidget: property widget.
"""
if name == 'name':
return self.name_wgt
for tab_name, prop_win in self.__tab_windows.items():
widget = prop_win.get_widget(name)
if widget:
return widget
if __name__ == '__main__':
import sys
from NodeGraphQt import Node, NodeGraph
class TestNode(Node):
NODE_NAME = 'test node'
def __init__(self):
super(TestNode, self).__init__()
self.create_property('label_test', 'foo bar',
widget_type=NODE_PROP_QLABEL)
self.create_property('text_edit', 'hello',
widget_type=NODE_PROP_QLINEEDIT)
self.create_property('color_picker', (0, 0, 255),
widget_type=NODE_PROP_COLORPICKER)
self.create_property('integer', 10,
widget_type=NODE_PROP_QSPINBOX)
self.create_property('list', 'foo',
items=['foo', 'bar'],
widget_type=NODE_PROP_QCOMBO)
self.create_property('range', 50,
range=(45, 55),
widget_type=NODE_PROP_SLIDER)
def prop_changed(node_id, prop_name, prop_value):
print('-'*100)
print(node_id, prop_name, prop_value)
def prop_close(node_id):
print('='*100)
print(node_id)
app = QtWidgets.QApplication(sys.argv)
graph = NodeGraph()
graph.register_node(TestNode)
test_node = graph.create_node('nodeGraphQt.nodes.TestNode')
node_prop = NodePropWidget(node=test_node)
node_prop.property_changed.connect(prop_changed)
node_prop.property_closed.connect(prop_close)
node_prop.show()
app.exec_()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from oauthlib.common import Request
from oauthlib.oauth1 import SIGNATURE_RSA, SIGNATURE_PLAINTEXT
from oauthlib.oauth1 import SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY
from oauthlib.oauth1.rfc5849 import Client, bytes_type
from ...unittest import TestCase
class ClientRealmTests(TestCase):
def test_client_no_realm(self):
client = Client("client-key")
uri, header, body = client.sign("http://example-uri")
self.assertTrue(
header["Authorization"].startswith('OAuth oauth_nonce='))
def test_client_realm_sign_with_default_realm(self):
client = Client("client-key", realm="moo-realm")
self.assertEqual(client.realm, "moo-realm")
uri, header, body = client.sign("http://example-uri")
self.assertTrue(
header["Authorization"].startswith('OAuth realm="moo-realm",'))
def test_client_realm_sign_with_additional_realm(self):
client = Client("client-key", realm="moo-realm")
uri, header, body = client.sign("http://example-uri", realm="baa-realm")
self.assertTrue(
header["Authorization"].startswith('OAuth realm="baa-realm",'))
# make sure sign() does not override the default realm
self.assertEqual(client.realm, "moo-realm")
class ClientConstructorTests(TestCase):
def test_convert_to_unicode_resource_owner(self):
client = Client('client-key',
resource_owner_key=b'owner key')
self.assertFalse(isinstance(client.resource_owner_key, bytes_type))
self.assertEqual(client.resource_owner_key, 'owner key')
def test_give_explicit_timestamp(self):
client = Client('client-key', timestamp='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_timestamp'], '1')
def test_give_explicit_nonce(self):
client = Client('client-key', nonce='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_nonce'], '1')
def test_decoding(self):
client = Client('client_key', decoding='utf-8')
uri, headers, body = client.sign('http://a.b/path?query',
http_method='POST', body='a=b',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertIsInstance(uri, bytes_type)
self.assertIsInstance(body, bytes_type)
for k, v in headers.items():
self.assertIsInstance(k, bytes_type)
self.assertIsInstance(v, bytes_type)
def test_rsa(self):
client = Client('client_key', signature_method=SIGNATURE_RSA)
self.assertIsNone(client.rsa_key) # don't need an RSA key to instantiate
class SignatureMethodTest(TestCase):
def test_rsa_method(self):
private_key = (
"-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDk1/bxy"
"S8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG\nAlwXWfzXw"
"SMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVa"
"h\n5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjT"
"MO7IdrwIDAQAB\nAoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9"
"GxocdM1m30WyWRFMEz2nKJ8fR\np3vTD4w8yplTOhcoXdQZl0kRoaD"
"zrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC\nDY6xveQczE7qt7V"
"k7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i\nrf6"
"qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVaw"
"Ft3UMhe\n542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+"
"XO/2xKp/d/ty1OIeovx\nC60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZL"
"eMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT\nSuy30sKjLzqoGw1kR+wv7"
"C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA\nkmaMg2PNr"
"jUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzV"
"S\nJzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lT"
"LVduVgh4v5yLT\nGa6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPv"
"dMlxqXA==\n-----END RSA PRIVATE KEY-----"
)
client = Client('client_key', signature_method=SIGNATURE_RSA,
rsa_key=private_key, timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="RSA-SHA1", '
'oauth_consumer_key="client_key", '
'oauth_signature="ktvzkUhtrIawBcq21DRJrAyysTc3E1Zq5GdGu8EzH'
'OtbeaCmOBDLGHAcqlm92mj7xp5E1Z6i2vbExPimYAJL7FzkLnkRE5YEJR4'
'rNtIgAf1OZbYsIUmmBO%2BCLuStuu5Lg3tAluwC7XkkgoXCBaRKT1mUXzP'
'HJILzZ8iFOvS6w5E%3D"')
self.assertEqual(h['Authorization'], correct)
def test_plaintext_method(self):
client = Client('client_key',
signature_method=SIGNATURE_PLAINTEXT,
timestamp='1234567890',
nonce='abc',
client_secret='foo',
resource_owner_secret='bar')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="PLAINTEXT", '
'oauth_consumer_key="client_key", '
'oauth_signature="foo%26bar"')
self.assertEqual(h['Authorization'], correct)
def test_invalid_method(self):
client = Client('client_key', signature_method='invalid')
self.assertRaises(ValueError, client.sign, 'http://example.com')
def test_rsa_no_key(self):
client = Client('client_key', signature_method=SIGNATURE_RSA)
self.assertRaises(ValueError, client.sign, 'http://example.com')
def test_register_method(self):
Client.register_signature_method('PIZZA',
lambda base_string, client: 'PIZZA')
self.assertTrue('PIZZA' in Client.SIGNATURE_METHODS)
client = Client('client_key', signature_method='PIZZA',
timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.com')
self.assertEquals(h['Authorization'], (
'OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="PIZZA", '
'oauth_consumer_key="client_key", '
'oauth_signature="PIZZA"'
))
class SignatureTypeTest(TestCase):
def test_params_in_body(self):
client = Client('client_key', signature_type=SIGNATURE_TYPE_BODY,
timestamp='1378988215', nonce='14205877133089081931378988215')
_, h, b = client.sign('http://i.b/path', http_method='POST', body='a=b',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
correct = ('a=b&oauth_nonce=14205877133089081931378988215&'
'oauth_timestamp=1378988215&'
'oauth_version=1.0&'
'oauth_signature_method=HMAC-SHA1&'
'oauth_consumer_key=client_key&'
'oauth_signature=2JAQomgbShqoscqKWBiYQZwWq94%3D')
self.assertEqual(b, correct)
def test_params_in_query(self):
client = Client('client_key', signature_type=SIGNATURE_TYPE_QUERY,
timestamp='1378988215', nonce='14205877133089081931378988215')
u, _, _ = client.sign('http://i.b/path', http_method='POST')
correct = ('http://i.b/path?oauth_nonce=14205877133089081931378988215&'
'oauth_timestamp=1378988215&'
'oauth_version=1.0&'
'oauth_signature_method=HMAC-SHA1&'
'oauth_consumer_key=client_key&'
'oauth_signature=08G5Snvw%2BgDAzBF%2BCmT5KqlrPKo%3D')
self.assertEqual(u, correct)
def test_invalid_signature_type(self):
client = Client('client_key', signature_type='invalid')
self.assertRaises(ValueError, client.sign, 'http://i.b/path')
class SigningTest(TestCase):
def test_case_insensitive_headers(self):
client = Client('client_key')
# Uppercase
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
# Lowercase
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'content-type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['content-type'], 'application/x-www-form-urlencoded')
# Capitalized
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-type'], 'application/x-www-form-urlencoded')
# Random
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'conTent-tYpe': 'application/x-www-form-urlencoded'})
self.assertEqual(h['conTent-tYpe'], 'application/x-www-form-urlencoded')
def test_sign_no_body(self):
client = Client('client_key', decoding='utf-8')
self.assertRaises(ValueError, client.sign, 'http://i.b/path',
http_method='POST', body=None,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
def test_sign_body(self):
client = Client('client_key')
_, h, b = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
def test_sign_get_with_body(self):
client = Client('client_key')
for method in ('GET', 'HEAD'):
self.assertRaises(ValueError, client.sign, 'http://a.b/path?query',
http_method=method, body='a=b',
headers={
'Content-Type': 'application/x-www-form-urlencoded'
})
def test_sign_unicode(self):
client = Client('client_key', nonce='abc', timestamp='abc')
_, h, b = client.sign('http://i.b/path', http_method='POST',
body='status=%E5%95%A6%E5%95%A6',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'status=%E5%95%A6%E5%95%A6')
self.assertIn('oauth_signature="yrtSqp88m%2Fc5UDaucI8BXK4oEtk%3D"', h['Authorization'])
_, h, b = client.sign('http://i.b/path', http_method='POST',
body='status=%C3%A6%C3%A5%C3%B8',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'status=%C3%A6%C3%A5%C3%B8')
self.assertIn('oauth_signature="oG5t3Eg%2FXO5FfQgUUlTtUeeZzvk%3D"', h['Authorization'])
|
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import json
import os
import uuid
import pika
import requests
import time
import urllib
from contextlib import contextmanager
from functools import wraps
from celery import Celery
from multiprocessing import Process
from cloudify.exceptions import NonRecoverableError
from cloudify.utils import setup_logger
from cloudify_rest_client import CloudifyClient
from cloudify_rest_client.executions import Execution
from os import path
from testenv.processes.manager_rest import MANAGER_REST_PORT
PROVIDER_CONTEXT = {
'cloudify': {
'workflows': {
'task_retries': 0,
'task_retry_interval': 0
}
}
}
PROVIDER_NAME = 'integration_tests'
celery = Celery(broker='amqp://',
backend='amqp://')
celery.conf.update(
CELERY_TASK_SERIALIZER="json"
)
logger = setup_logger('testenv.utils')
def task_exists(name, *args):
logger.info('task_exists invoked with : {0}'
.format(args))
if 'non_existent' in name:
logger.info('non_existent operation, raising NonRecoverableError')
raise NonRecoverableError('non_existent operation [{0}]'.format(name))
return True
def deploy_application(dsl_path,
timeout_seconds=30,
blueprint_id=None,
deployment_id=None,
wait_for_execution=True,
inputs=None):
"""
A blocking method which deploys an application from the provided dsl path.
"""
return deploy_and_execute_workflow(dsl_path=dsl_path,
workflow_name='install',
timeout_seconds=timeout_seconds,
blueprint_id=blueprint_id,
deployment_id=deployment_id,
wait_for_execution=wait_for_execution,
inputs=inputs)
def deploy(dsl_path, blueprint_id=None, deployment_id=None, inputs=None):
client = create_rest_client()
if not blueprint_id:
blueprint_id = str(uuid.uuid4())
blueprint = client.blueprints.upload(dsl_path, blueprint_id)
if deployment_id is None:
deployment_id = str(uuid.uuid4())
deployment = client.deployments.create(
blueprint.id,
deployment_id,
inputs=inputs)
wait_for_deployment_creation_to_complete(
deployment_id=deployment_id)
return deployment
def wait_for_deployment_creation_to_complete(
deployment_id, timeout_seconds=30):
do_retries(func=verify_deployment_environment_creation_complete,
timeout_seconds=timeout_seconds,
deployment_id=deployment_id)
def deploy_and_execute_workflow(dsl_path,
workflow_name,
timeout_seconds=240,
blueprint_id=None,
deployment_id=None,
wait_for_execution=True,
parameters=None,
inputs=None):
"""
A blocking method which deploys an application from the provided dsl path.
and runs the requested workflows
"""
deployment = deploy(dsl_path, blueprint_id, deployment_id, inputs)
execution = execute_workflow(workflow_name, deployment.id, parameters,
timeout_seconds, wait_for_execution)
return deployment, execution.id
def execute_workflow(workflow_name, deployment_id,
parameters=None,
timeout_seconds=240,
wait_for_execution=True):
"""
A blocking method which runs the requested workflow
"""
client = create_rest_client()
execution = client.executions.start(deployment_id, workflow_name,
parameters=parameters or {})
if wait_for_execution:
wait_for_execution_to_end(execution,
timeout_seconds=timeout_seconds)
return execution
def verify_deployment_environment_creation_complete(deployment_id):
# a workaround for waiting for the deployment environment creation to
# complete
client = create_rest_client()
execs = client.executions.list(deployment_id)
if not execs \
or execs[0].status != Execution.TERMINATED \
or execs[0].workflow_id != 'create_deployment_environment':
from testenv import TestEnvironment # avoid cyclic import
logs = TestEnvironment.read_celery_management_logs() or ''
logs = logs[len(logs) - 100000:]
raise RuntimeError(
"Expected a single execution for workflow "
"'create_deployment_environment' with status 'terminated'; "
"Found these executions instead: {0}.\nCelery log:\n{1}".format(
json.dumps(execs, indent=2), logs))
def undeploy_application(deployment_id,
timeout_seconds=240,
delete_deployment=False):
"""
A blocking method which undeploys an application from the provided dsl
path.
"""
client = create_rest_client()
execution = client.executions.start(deployment_id,
'uninstall')
wait_for_execution_to_end(execution, timeout_seconds=timeout_seconds)
if execution.error and execution.error != 'None':
raise RuntimeError(
'Workflow execution failed: {0}'.format(execution.error))
if delete_deployment:
time.sleep(5) # elasticsearch...
client.deployments.delete(deployment_id)
def is_node_started(node_id):
client = create_rest_client()
node_instance = client.node_instances.get(node_id)
return node_instance['state'] == 'started'
def create_rest_client():
return CloudifyClient('localhost', port=MANAGER_REST_PORT)
def get_resource(resource):
"""
Gets the path for the provided resource.
:param resource: resource name relative to /resources.
"""
import resources
resources_path = path.dirname(resources.__file__)
resource_path = path.join(resources_path, resource)
if not path.exists(resource_path):
raise RuntimeError("Resource '{0}' not found in: {1}".format(
resource, resource_path))
return resource_path
def wait_for_execution_to_end(execution, timeout_seconds=240):
client = create_rest_client()
deadline = time.time() + timeout_seconds
while execution.status not in Execution.END_STATES:
time.sleep(0.5)
execution = client.executions.get(execution.id)
if time.time() > deadline:
raise TimeoutException('Execution timed out: \n{0}'
.format(json.dumps(execution, indent=2)))
if execution.status == Execution.FAILED:
raise RuntimeError(
'Workflow execution failed: {0} [{1}]'.format(execution.error,
execution.status))
return execution
def do_retries(func,
timeout_seconds=10,
exception_class=BaseException,
**kwargs):
deadline = time.time() + timeout_seconds
while True:
try:
func(**kwargs)
break
except exception_class:
if time.time() > deadline:
raise
time.sleep(0.5)
def do_retries_boolean(func, timeout_seconds=10, **kwargs):
deadline = time.time() + timeout_seconds
while True:
return_value = func(**kwargs)
if return_value:
break
else:
if time.time() > deadline:
raise RuntimeError(
'function {0} did not return True in {1} seconds'
.format(func.__name__, timeout_seconds)
)
time.sleep(1)
def timeout(seconds=60):
def decorator(func):
def wrapper(*args, **kwargs):
process = Process(None, func, None, args, kwargs)
process.start()
process.join(seconds)
if process.is_alive():
process.terminate()
raise TimeoutException(
'test timeout exceeded [timeout={0}'.format(seconds))
return wraps(func)(wrapper)
return decorator
def send_task(task, queue, args=None):
task_name = task.name.replace('mock_plugins.', '')
return celery.send_task(
name=task_name,
args=args,
queue=queue)
def publish_event(queue,
routing_key,
event,
exchange_name='cloudify-monitoring',
exchange_type='topic'):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange=exchange_name,
type=exchange_type,
durable=False,
auto_delete=True,
internal=False)
channel.queue_declare(
queue=queue,
auto_delete=True,
durable=False,
exclusive=False)
channel.queue_bind(exchange=exchange_name,
queue=queue,
routing_key=routing_key)
channel.basic_publish(exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(event))
channel.close()
connection.close()
def delete_provider_context():
requests.delete('http://localhost:9200'
'/cloudify_storage/provider_context/CONTEXT')
def restore_provider_context():
delete_provider_context()
client = create_rest_client()
client.manager.create_context(PROVIDER_NAME, PROVIDER_CONTEXT)
def wait_for_url(url, timeout=15):
end = time.time() + timeout
while end >= time.time():
try:
status = urllib.urlopen(url).getcode()
if status == 200:
return
except IOError:
time.sleep(1)
raise RuntimeError('Url {0} is not available (waited {1} '
'seconds)'.format(url, timeout))
def timestamp():
now = time.strftime("%c")
return now.replace(' ', '-')
@contextmanager
def update_storage(ctx):
"""
A context manager for updating plugin state.
:param ctx: task invocation context
"""
deployment_id = ctx.deployment.id
plugin_name = ctx.plugin
if plugin_name is None:
# hack for tasks that are executed locally.
# TODO - Aren't these tasks also a part of a plugin?
# TODO - the ctx in this case should include the plugin name
# TODO - as if it was a remote task.
if ctx.task_name.startswith('worker_installer'):
plugin_name = 'agent_installer'
if ctx.task_name.startswith('plugin_installer'):
plugin_name = 'plugin_installer'
storage_file_path = os.path.join(
os.environ['TEST_WORKING_DIR'],
'plugins-storage',
'{0}.json'.format(plugin_name)
)
# create storage file
# if it doesn't exist
if not os.path.exists(storage_file_path):
f = open(storage_file_path, 'w')
json.dump({}, f)
with open(storage_file_path, 'r') as f:
data = json.load(f)
if deployment_id not in data:
data[deployment_id] = {}
yield data.get(deployment_id)
with open(storage_file_path, 'w') as f:
json.dump(data, f, indent=2)
f.write(os.linesep)
class TimeoutException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
def __str__(self):
return self.message
|
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module describes which operations can be done with strings in YAQL.
"""
import string as string_module
from yaql.language import specs
from yaql.language import utils
from yaql.language import yaqltypes
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_>')
def gt(left, right):
""":yaql:operator >
Returns true if the left operand is strictly greater than the right,
ordering lexicographically, otherwise false.
:signature: left > right
:arg left: left operand
:argType left: string
:arg right: right operand
:argType right: string
:returnType: boolean
.. code::
yaql> "abc" > "ab"
true
yaql> "abc" > "abb"
true
yaql> "abc" > "abc"
false
"""
return left > right
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_<')
def lt(left, right):
""":yaql:operator <
Returns true if the left operand is strictly less than the right, ordering
lexicographically, otherwise false.
:signature: left < right
:arg left: left operand
:argType left: string
:arg right: right operand
:argType right: string
:returnType: boolean
.. code::
yaql> "ab" < "abc"
true
yaql> "abb" < "abc"
true
yaql> "abc" < "abc"
false
"""
return left < right
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_>=')
def gte(left, right):
""":yaql:operator >=
Returns true if the left operand is greater or equal to the right, ordering
lexicographically, otherwise false.
:signature: left >= right
:arg left: left operand
:argType left: string
:arg right: right operand
:argType right: string
:returnType: boolean
.. code::
yaql> "abc" >= "ab"
true
yaql> "abc" >= "abc"
true
"""
return left >= right
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_<=')
def lte(left, right):
""":yaql:operator <=
Returns true if the left operand is less or equal to the right, ordering
lexicographically, otherwise false.
:signature: left <= right
:arg left: left operand
:argType left: string
:arg right: right operand
:argType right: string
:returnType: boolean
.. code::
yaql> "ab" <= "abc"
true
yaql> "abc" <= "abc"
true
"""
return left <= right
@specs.parameter('args', yaqltypes.String())
def concat(*args):
""":yaql:concat
Returns concatenated args.
:signature: concat([args])
:arg [args]: values to be joined
:argType [args]: string
:returnType: string
.. code::
yaql> concat("abc", "de", "f")
"abcdef"
"""
return ''.join(args)
@specs.parameter('string', yaqltypes.String())
@specs.method
def to_upper(string):
""":yaql:toUpper
Returns a string with all case-based characters uppercase.
:signature: string.toUpper()
:receiverArg string: value to uppercase
:argType string: string
:returnType: string
.. code::
yaql> "aB1c".toUpper()
"AB1C"
"""
return string.upper()
@specs.parameter('string', yaqltypes.String())
@specs.extension_method
def len_(string):
""":yaql:len
Returns size of the string.
:signature: string.len()
:receiverArg string: input string
:argType string: string
:returnType: integer
.. code::
yaql> "abc".len()
3
"""
return len(string)
@specs.parameter('string', yaqltypes.String())
@specs.method
def to_lower(string):
""":yaql:toLower
Returns a string with all case-based characters lowercase.
:signature: string.toLower()
:receiverArg string: value to lowercase
:argType string: string
:returnType: string
.. code::
yaql> "AB1c".toLower()
"ab1c"
"""
return string.lower()
@specs.parameter('string', yaqltypes.String())
@specs.parameter('separator', yaqltypes.String(nullable=True))
@specs.parameter('max_splits', int)
@specs.method
def split(string, separator=None, max_splits=-1):
""":yaql:split
Returns a list of tokens in the string, using separator as the
delimiter.
:signature: string.split(separator => null, maxSplits => -1)
:receiverArg string: value to be splitted
:argType string: string
:arg separator: delimiter for splitting. null by default, which means
splitting with whitespace characters
:argType separator: string
:arg maxSplits: maximum number of splittings. -1 by default, which means
all possible splits are done
:argType maxSplits: integer
:returnType: list
.. code::
yaql> "abc de f".split()
["abc", "de", "f"]
yaql> "abc de f".split(maxSplits => 1)
["abc", "de f"]
yaql> "abcde".split("c")
["ab", "de"]
"""
return string.split(separator, max_splits)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('separator', yaqltypes.String(nullable=True))
@specs.parameter('max_splits', int)
@specs.method
def right_split(string, separator=None, max_splits=-1):
""":yaql:rightSplit
Returns a list of tokens in the string, using separator as the
delimiter. If maxSplits is given then at most maxSplits splits are done -
the rightmost ones.
:signature: string.rightSplit(separator => null, maxSplits => -1)
:receiverArg string: value to be splitted
:argType string: string
:arg separator: delimiter for splitting. null by default, which means
splitting with whitespace characters
:argType separator: string
:arg maxSplits: number of splits to be done - the rightmost ones.
-1 by default, which means all possible splits are done
:argType maxSplits: integer
:returnType: list
.. code::
yaql> "abc de f".rightSplit()
["abc", "de", "f"]
yaql> "abc de f".rightSplit(maxSplits => 1)
["abc de", "f"]
"""
return string.rsplit(separator, max_splits)
@specs.parameter('sequence', yaqltypes.Iterable())
@specs.parameter('separator', yaqltypes.String())
@specs.inject('str_delegate', yaqltypes.Delegate('str'))
@specs.method
def join(sequence, separator, str_delegate):
""":yaql:join
Returns a string with sequence elements joined by the separator.
:signature: sequence.join(separator)
:receiverArg sequence: chain of values to be joined
:argType sequence: sequence of strings
:arg separator: value to be placed between joined pairs
:argType separator: string
:returnType: string
.. code::
yaql> ["abc", "de", "f"].join("")
"abcdef"
yaql> ["abc", "de", "f"].join("|")
"abc|de|f"
"""
return separator.join(map(str_delegate, sequence))
@specs.parameter('sequence', yaqltypes.Iterable())
@specs.parameter('separator', yaqltypes.String())
@specs.inject('str_delegate', yaqltypes.Delegate('str'))
@specs.method
def join_(separator, sequence, str_delegate):
""":yaql:join
Returns a string with sequence elements joined by the separator.
:signature: separator.join(sequence)
:receiverArg separator: value to be placed between joined pairs
:argType separator: string
:arg sequence: chain of values to be joined
:argType sequence: sequence of strings
:returnType: string
.. code::
yaql> "|".join(["abc", "de", "f"])
"abc|de|f"
"""
return join(sequence, separator, str_delegate)
@specs.parameter('value', nullable=True)
def str_(value):
""":yaql:str
Returns a string representation of the value.
:signature: str(value)
:arg value: value to be evaluated to string
:argType value: any
:returnType: string
.. code::
yaql> str(["abc", "de"])
"(u'abc', u'd')"
yaql> str(123)
"123"
"""
if value is None:
return 'null'
elif value is True:
return 'true'
elif value is False:
return 'false'
else:
return str(value)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('chars', yaqltypes.String(nullable=True))
@specs.method
def trim(string, chars=None):
""":yaql:trim
Returns a string with the leading and trailing chars removed.
:signature: string.trim(chars => null)
:receiverArg string: value to be trimmed
:argType string: string
:arg chars: symbols to be removed from input string. null by default,
which means trim is done with whitespace characters
:argType chars: string
:returnType: string
.. code::
yaql> " abcd ".trim()
"abcd"
yaql> "aababa".trim("a")
"bab"
"""
return string.strip(chars)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('chars', yaqltypes.String(nullable=True))
@specs.method
def trim_left(string, chars=None):
""":yaql:trimLeft
Returns a string with the leading chars removed.
:signature: string.trimLeft(chars => null)
:receiverArg string: value to be trimmed
:argType string: string
:arg chars: symbols to be removed from start of input string. null by
default, which means trim is done with whitespace characters
:argType chars: string
:returnType: string
.. code::
yaql> " abcd ".trimLeft()
"abcd "
yaql> "aababa".trimLeft("a")
"baba"
"""
return string.lstrip(chars)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('chars', yaqltypes.String(nullable=True))
@specs.method
def trim_right(string, chars=None):
""":yaql:trimRight
Returns a string with the trailing chars removed.
:signature: string.trimRight(chars => null)
:receiverArg string: value to be trimmed
:argType string: string
:arg chars: symbols to be removed from end of input string. null by
default, which means trim is done with whitespace characters
:argType chars: string
:returnType: string
.. code::
yaql> " abcd ".trimRight()
" abcd"
yaql> "aababa".trimRight("a")
"aabab"
"""
return string.rstrip(chars)
@specs.parameter('string', yaqltypes.String(nullable=True))
@specs.parameter('chars', yaqltypes.String(nullable=True))
@specs.extension_method
def norm(string, chars=None):
""":yaql:norm
Returns a string with the leading and trailing chars removed.
If the resulting string is empty, returns null.
:signature: string.norm(chars => null)
:receiverArg string: value to be cut with specified chars
:argType string: string
:arg chars: symbols to be removed from the start and the end of input
string. null by default, which means norm is done with whitespace
characters
:argType chars: string
:returnType: string
.. code::
yaql> " abcd ".norm()
"abcd"
yaql> "aaaa".norm("a")
null
"""
if string is None:
return None
value = string.strip(chars)
return None if not value else value
@specs.parameter('string', yaqltypes.String(nullable=True))
@specs.parameter('trim_spaces', bool, alias='trim')
@specs.parameter('chars', yaqltypes.String(nullable=True))
@specs.extension_method
def is_empty(string, trim_spaces=True, chars=None):
""":yaql:isEmpty
Returns true if the string with removed leading and trailing chars is
empty.
:signature: string.isEmpty(trimSpaces => true, chars => null)
:receiverArg string: value to be checked for emptiness after trim
:argType string: string
:arg trimSpaces: true by default, which means string to be trimmed with
chars. false means checking whether input string is empty
:argType trimSpaces: boolean
:arg chars: symbols for trimming. null by default, which means trim is
done with whitespace characters
:argType chars: string
:returnType: boolean
.. code::
yaql> "abaab".isEmpty(chars=>"ab")
true
yaql> "aba".isEmpty(chars=>"a")
false
"""
if string is None:
return True
if trim_spaces:
string = string.strip(chars)
return not string
@specs.parameter('string', yaqltypes.String())
@specs.parameter('old', yaqltypes.String())
@specs.parameter('new', yaqltypes.String())
@specs.parameter('count', int)
@specs.method
def replace(string, old, new, count=-1):
""":yaql:replace
Returns a string with first count occurrences of old replaced with new.
:signature: string.replace(old, new, count => -1)
:receiverArg string: input string
:argType string: string
:arg old: value to be replaced
:argType old: string
:arg new: replacement for old value
:argType new: string
:arg count: how many first replacements to do. -1 by default, which means
to do all replacements
:argType count: integer
:returnType: string
.. code::
yaql> "abaab".replace("ab", "cd")
"cdacd"
"""
return string.replace(old, new, count)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('replacements', utils.MappingType)
@specs.parameter('count', int)
@specs.inject('str_func', yaqltypes.Delegate('str'))
@specs.method
@specs.name('replace')
def replace_with_dict(string, str_func, replacements, count=-1):
""":yaql:replace
Returns a string with all occurrences of replacements' keys replaced
with corresponding replacements' values.
If count is specified, only the first count occurrences of every key
are replaced.
:signature: string.replace(replacements, count => -1)
:receiverArg string: input string
:argType string: string
:arg replacements: dict of replacements in format {old => new ...}
:argType replacements: mapping
:arg count: how many first occurrences of every key are replaced. -1 by
default, which means to do all replacements
:argType count: integer
:returnType: string
.. code::
yaql> "abc ab abc".replace({abc => xx, ab => yy})
"xx yy xx"
yaql> "abc ab abc".replace({ab => yy, abc => xx})
"yyc yy yyc"
yaql> "abc ab abc".replace({ab => yy, abc => xx}, 1)
"yyc ab xx"
"""
for key, value in replacements.items():
string = string.replace(str_func(key), str_func(value), count)
return string
@specs.parameter('__format_string', yaqltypes.String())
@specs.extension_method
def format_(__format_string, *args, **kwargs):
""":yaql:format
Returns a string formatted with positional and keyword arguments.
:signature: string.format([args], {kwargs})
:receiverArg string: input string for formatting. Can be passed only as
first positional argument if used as a function. Can contain literal
text or replacement fields marked by braces {}. Every replacement field
should contain either the numeric index of a positional argument or the
name of a keyword argument
:argType string: string
:arg [args]: values for replacements for numeric markers
:argType [args]: chain of strings
:arg {kwargs}: values for keyword replacements
:argType {kwargs}: chain of key-value arguments, where values are strings
:returnValue: string
.. code::
yaql> "abc{0}ab{1}abc".format(" ", ",")
"abc ab,abc"
yaql> "abc{foo}ab{bar}abc".format(foo => " ", bar => ",")
"abc ab,abc"
yaql> format("abc{0}ab{foo}abc", ' ', foo => ",")
"abc ab,abc"
"""
return __format_string.format(*args, **kwargs)
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', int)
@specs.name('#operator_*')
def string_by_int(left, right, engine):
""":yaql:operator *
Returns string repeated count times.
:signature: left * right
:arg left: left operand
:argType left: string
:arg right: right operator, how many times repeat input string
:argType right: integer
:returnType: string
.. code::
yaql> "ab" * 2
"abab"
"""
utils.limit_memory_usage(engine, (-right + 1, u''), (right, left))
return left * right
@specs.parameter('left', yaqltypes.String())
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_in')
def in_(left, right):
""":yaql:operator in
Returns true if there is at least one occurrence of left string in right.
:signature: left in right
:arg left: left operand, which occurrence is checked
:argType left: string
:arg right: right operand
:argType right: string
:returnType: boolean
.. code::
yaql> "ab" in "abc"
true
yaql> "ab" in "acb"
false
"""
return left in right
@specs.parameter('left', int)
@specs.parameter('right', yaqltypes.String())
@specs.name('#operator_*')
def int_by_string(left, right, engine):
""":yaql:operator *
Returns string repeated count times.
:signature: left * right
:arg left: left operand, how many times repeat input string
:argType left: integer
:arg right: right operator
:argType right: string
:returnType: string
.. code::
yaql> 2 * "ab"
"abab"
"""
return string_by_int(right, left, engine)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('start', int)
@specs.parameter('length', int)
@specs.method
def substring(string, start, length=-1):
""":yaql:substring
Returns a substring beginning from start index ending with start+end index.
:signature: string.substring(start, length => -1)
:receiverArg string: input string
:argType string: string
:arg start: index for substring to start with
:argType start: integer
:arg length: length of substring. -1 by default, which means end of
substring to be equal to the end of input string
:argType length: integer
:returnType: string
.. code::
yaql> "abcd".substring(1)
"bcd"
yaql> "abcd".substring(1, 2)
"bc"
"""
if length < 0:
length = len(string)
if start < 0:
start += len(string)
return string[start:start + length]
@specs.parameter('string', yaqltypes.String())
@specs.parameter('sub', yaqltypes.String())
@specs.parameter('start', int)
@specs.method
def index_of(string, sub, start=0):
""":yaql:indexOf
Returns an index of first occurrence sub in string beginning from start.
-1 is a return value if there is no any occurrence.
:signature: string.indexOf(sub, start => 0)
:receiverArg string: input string
:argType string: string
:arg sub: substring to find in string
:argType sub: string
:arg start: index to start search with, 0 by default
:argType start: integer
:returnType: integer
.. code::
yaql> "cabcdab".indexOf("ab")
1
yaql> "cabcdab".indexOf("ab", 2)
5
yaql> "cabcdab".indexOf("ab", 6)
-1
"""
return string.find(sub, start)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('sub', yaqltypes.String())
@specs.parameter('start', int)
@specs.parameter('length', int)
@specs.method
def index_of_(string, sub, start, length):
""":yaql:indexOf
Returns an index of first occurrence sub in string beginning from start
ending with start+length.
-1 is a return value if there is no any occurrence.
:signature: string.indexOf(sub, start, length)
:receiverArg string: input string
:argType string: string
:arg sub: substring to find in string
:argType sub: string
:arg start: index to start search with, 0 by default
:argType start: integer
:arg length: length of string to find substring in
:argType length: integer
:returnType: integer
.. code::
yaql> "cabcdab".indexOf("bc", 2, 2)
2
"""
if start < 0:
start += len(string)
if length < 0:
length = len(string) - start
return string.find(sub, start, start + length)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('sub', yaqltypes.String())
@specs.parameter('start', int)
@specs.method
def last_index_of(string, sub, start=0):
""":yaql:lastIndexOf
Returns an index of last occurrence sub in string beginning from start.
-1 is a return value if there is no any occurrence.
:signature: string.lastIndexOf(sub, start => 0)
:receiverArg string: input string
:argType string: string
:arg sub: substring to find in string
:argType sub: string
:arg start: index to start search with, 0 by default
:argType start: integer
:returnType: integer
.. code::
yaql> "cabcdab".lastIndexOf("ab")
5
"""
return string.rfind(sub, start)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('sub', yaqltypes.String())
@specs.parameter('start', int)
@specs.parameter('length', int)
@specs.method
def last_index_of_(string, sub, start, length):
""":yaql:lastIndexOf
Returns an index of last occurrence sub in string beginning from start
ending with start+length.
-1 is a return value if there is no any occurrence.
:signature: string.lastIndexOf(sub, start, length)
:receiverArg string: input string
:argType string: string
:arg sub: substring to find in string
:argType sub: string
:arg start: index to start search with, 0 by default
:argType start: integer
:arg length: length of string to find substring in
:argType length: integer
:returnType: integer
.. code::
yaql> "cabcdbc".lastIndexOf("bc", 2, 5)
5
"""
if start < 0:
start += len(string)
if length < 0:
length = len(string) - start
return string.rfind(sub, start, start + length)
@specs.parameter('string', yaqltypes.String())
@specs.method
def to_char_array(string):
""":yaql:toCharArray
Converts a string to array of one character strings.
:signature: string.toCharArray()
:receiverArg string: input string
:argType string: string
:returnType: list
.. code::
yaql> "abc de".toCharArray()
["a", "b", "c", " ", "d", "e"]
"""
return tuple(string)
def characters(
digits=False, hexdigits=False,
ascii_lowercase=False, ascii_uppercase=False,
ascii_letters=False, letters=False,
octdigits=False, punctuation=False, printable=False,
lowercase=False, uppercase=False, whitespace=False):
""":yaql:characters
Returns a list of all distinct items of specified types.
:signature: characters(digits => false, hexdigits => false,
asciiLowercase => false, asciiUppercase => false,
asciiLetters => false, letters => false,
octdigits => false, punctuation => false,
printable => false, lowercase => false,
uppercase => false, whitespace => false)
:arg digits: include digits in output list if true, false by default
:argType digits: boolean
:arg hexdigits: include hexademical digits in output list if true, false
by default
:argType hexdigits: boolean
:arg asciiLowercase: include ASCII lowercase letters in output list if
true, false by default
:argType asciiLowercase: boolean
:arg asciiUppercase: include ASCII uppercase letters in output list if
true, false by default
:argType asciiUppercase: boolean
:arg asciiLetters: include both ASCII lowercase and uppercase letters
in output list if true, false by default
:argType asciiLetters: boolean
:arg letters: include both lowercase and uppercase letters in output list
if true, false by default
:argType letters: boolean
:arg octdigits: include digits from 0 to 7 in output list if true, false
by default
:argType octdigits: boolean
:arg punctuation: include ASCII characters, which are considered
punctuation, in output list if true, false by default
:argType punctuation: boolean
:arg printable: include digits, letters, punctuation, and whitespace in
output list if true, false by default
:argType printable: boolean
:arg lowercase: include lowercase letters in output list if true, false
by default
:argType lowercase: boolean
:arg uppercase: include uppercase letters in output list if true, false
by default
:argType uppercase: boolean
:arg whitespace: include all characters that are considered whitespace
in output list if true, false by default
:argType whitespace: boolean
:returnType: list
.. code::
yaql> characters(digits => true)
["1", "0", "3", "2", "5", "4", "7", "6", "9", "8"]
"""
string = ''
if digits:
string += string_module.digits
if hexdigits:
string += string_module.hexdigits
if ascii_lowercase:
string += string_module.ascii_lowercase
if ascii_uppercase:
string += string_module.ascii_uppercase
if ascii_letters:
string += string_module.ascii_letters
if letters:
string += string_module.letters
if octdigits:
string += string_module.octdigits
if punctuation:
string += string_module.punctuation
if printable:
string += string_module.printable
if lowercase:
string += string_module.lowercase
if uppercase:
string += string_module.uppercase
if whitespace:
string += string_module.whitespace
return tuple(set(string))
def is_string(arg):
""":yaql:isString
Returns true if arg is a string.
:signature: isString(arg)
:arg arg: input value
:argType arg: any
:returnType: boolean
.. code::
yaql> isString("ab")
true
yaql> isString(1)
false
"""
return isinstance(arg, str)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('prefixes', yaqltypes.String())
@specs.method
def starts_with(string, *prefixes):
""":yaql:startsWith
Returns true if a string starts with any of given args.
:signature: string.startsWith([args])
:receiverArg string: input string
:argType string: string
:arg [args]: chain of strings to check input string with
:argType [args]: strings
:returnType: boolean
.. code::
yaql> "abcd".startsWith("ab", "xx")
true
yaql> "abcd".startsWith("yy", "xx", "zz")
false
"""
return string.startswith(prefixes)
@specs.parameter('string', yaqltypes.String())
@specs.parameter('suffixes', yaqltypes.String())
@specs.method
def ends_with(string, *suffixes):
""":yaql:endsWith
Returns true if a string ends with any of given args.
:signature: string.endsWith([args])
:receiverArg string: input string
:argType string: string
:arg [args]: chain of strings to check input string with
:argType [args]: strings
:returnType: boolean
.. code::
yaql> "abcd".endsWith("cd", "xx")
true
yaql> "abcd".endsWith("yy", "xx", "zz")
false
"""
return string.endswith(suffixes)
@specs.parameter('num', yaqltypes.Number(nullable=True))
def hex_(num):
""":yaql:hex
Returns a string with hexadecimal representation of num.
:signature: hex(num)
:arg num: input number to be converted to hexademical
:argType num: number
:returnType: string
.. code::
yaql> hex(256)
"0x100"
"""
return hex(num)
def register(context):
context.register_function(gt)
context.register_function(lt)
context.register_function(gte)
context.register_function(lte)
context.register_function(len_)
context.register_function(to_lower)
context.register_function(to_upper)
context.register_function(split)
context.register_function(right_split)
context.register_function(join)
context.register_function(join_)
context.register_function(str_)
context.register_function(concat)
context.register_function(concat, name='#operator_+')
context.register_function(trim)
context.register_function(trim_left)
context.register_function(trim_right)
context.register_function(replace)
context.register_function(replace_with_dict)
context.register_function(format_)
context.register_function(is_empty)
context.register_function(string_by_int)
context.register_function(int_by_string)
context.register_function(substring)
context.register_function(index_of)
context.register_function(index_of_)
context.register_function(last_index_of)
context.register_function(last_index_of_)
context.register_function(to_char_array)
context.register_function(characters)
context.register_function(is_string)
context.register_function(norm)
context.register_function(in_)
context.register_function(starts_with)
context.register_function(ends_with)
context.register_function(hex_)
|
|
import warnings
import weakref
import six
from chainer.backends import cuda
from chainer import configuration
# for backward compatibility
from chainer.function_hook import FunctionHook # NOQA
from chainer import function_node
from chainer import variable
def no_backprop_mode():
"""Make a context manager which disables back-propagation.
In this context, Chainer does not make a computational graph. It has the
benefit of reducing memory consumption. However, a
:class:`~chainer.Variable` created in this context does not hold a
reference to the :class:`~chainer.FunctionNode` that created itself so no
gradients are accumulated by :func:`~chainer.Variable.backward`.
In the following example, ``y`` is created in this context, which means
that calling :func:`~chainer.Variable.backward` on ``y`` has no effect on
the gradients of ``x``.
>>> x = chainer.Variable(np.array([1,], np.float32))
>>> with chainer.no_backprop_mode():
... y = x + 1
>>> y.backward()
>>> x.grad is None
True
.. seealso::
See :func:`force_backprop_mode` for details on how to override this
context.
"""
return configuration.using_config('enable_backprop', False)
def force_backprop_mode():
"""Make a context manager which enables back-propagation.
When you want to enable back-propagation in :func:`no_backprop_mode`, call
this method. A :class:`~chainer.Variable` created in this context always
has a computational graph unless overridden by deeper contexts. If you call
this method outside of :func:`no_backprop_mode` context, it changes
nothing.
In the following example, ``y`` has a computational graph and calling
:func:`~chainer.Variable.backward` on ``y`` will compute and accumulate the
gradients of the variables in the graph, in this case only ``x``.
>>> x = chainer.Variable(np.array([1,], np.float32))
>>> with chainer.no_backprop_mode():
... with chainer.force_backprop_mode():
... y = x + 1
>>> y.backward()
>>> x.grad
array([1.], dtype=float32)
.. seealso::
See :func:`no_backprop_mode` for details on disabled back-propagation
mode.
"""
return configuration.using_config('enable_backprop', True)
class FunctionAdapter(function_node.FunctionNode):
"""Adapter class to wrap Function with FunctionNode.
While :class:`~chainer.FunctionNode` provides the interface
of new-style differentiable functions, the old-style
:class:`~chainer.Function` can still be used for the backward
compatibility.
This class provides an adapter of there interface; it adds
:class:`~chainer.FunctionNode` interface to any
:class:`~chainer.Function` object by delegation.
.. note::
The ownership of :class:`FunctionAdapter` and :class:`~chainer.Function`
is a bit tricky.
At the initialization, :class:`FunctionAdapter` is owned by the
:class:`~chainer.Function` object.
Once the function is applied to variables, the ownership is reversed;
the adapter becomes the owner of the
:class:`~chainer.Function` object and the :class:`~chainer.Function`
object changes the reference to a weak one.
Args:
function (Function): The function object to wrap.
.. versionadded:: 3.0.0
"""
_function = None
_weak_function = None
def __init__(self, function):
super(FunctionAdapter, self).__init__()
self._weak_function = weakref.ref(function)
function._owned_node = self
@property
def function(self):
"""The :class:`Function` object that this adapter is wrapping."""
func = self._function
if func is not None:
return func
weak_func = self._weak_function
return weak_func and weak_func()
@property
def label(self):
return self._function.label
@property
def _impl_name(self):
return self._function.__class__.__name__
def check_type_forward(self, in_types):
self._function.check_type_forward(in_types)
def forward(self, inputs):
# Retain all inputs by default in old-style functions.
self.retain_inputs(six.moves.range(len(inputs)))
return self._function.forward(inputs)
def backward(self, target_input_indexes, grad_outputs):
in_data = tuple([input.data for input in self.inputs])
grad_out_data = tuple([None if grad is None else grad.data
for grad in grad_outputs])
with cuda.get_device_from_array(*(in_data + grad_out_data)):
gxs = self._function.backward(in_data, grad_out_data)
for x, gx in six.moves.zip(self.inputs, gxs):
variable._check_grad_type(self, x, gx)
ret = []
for i in target_input_indexes:
if gxs[i] is None:
g = None
else:
# Intentionally not passing requires_grad=False so that
# backprop routines can raise an error when a further backprop
# is attempted against this gradient variable.
g = variable.Variable(gxs[i])
g.node._old_style_grad_generator = self._function.label
ret.append(g)
return tuple(ret)
class Function(object):
"""Old-style interface of a differentiable function.
This class provides an interface to implement an old-style differentiable
function (i.e., the function application is recorded to the computational
graph). The subclass of :class:`Function` that implement :meth:`forward`
and :meth:`backward` can be used to run the forward computation and
automatically induce the backpropagation procedure.
There is another way to implement such a function: subclassing
:class:`~chainer.FunctionNode`. There are mainly two
differences between them.
1. The *differentiable backprop* is available for
:class:`~chainer.FunctionNode`,
while it is not for :class:`Function` because the :meth:`backward`
of the latter directly operates on the arrays instead of
:class:`Variable` objects so that it cannot record the history of
the computation.
2. The information passed to :meth:`backward` is different. In
:class:`~chainer.FunctionNode`,
which inputs the function node has to compute
the gradients w.r.t. is passed so that it can omit unnecessary
computations, while :class:`Function` always has to compute gradients
w.r.t. all the input nodes.
The :class:`~chainer.FunctionNode` also accepts the
current gradient values of the input nodes so that the accumulation
work can be merged with the gradient computation if an efficient kernel
is available.
This class uses :class:`~chainer.FunctionAdapter` to convert
the interface to that of :class:`~chainer.FunctionNode` and
adds the :class:`~chainer.FunctionNode` object to the
computational graph.
See :class:`~chainer.FunctionNode` for the details of
building the computational graph in Chainer.
"""
_node = None
_owned_node = None
def __call__(self, *inputs):
"""Applies forward propagation with chaining backward references.
This method creates a new :class:`~chainer.FunctionAdapter`
object and runs the forward propagation using it.
See :class:`~chainer.FunctionNode` for the detailed
behavior of building the computational graph.
Args:
inputs: Tuple of input :class:`Variable`, :class:`numpy.ndarray` or
:class:`cupy.ndarray` objects.
If the input is an :class:`numpy.ndarray` or a
:class:`cupy.ndarray`, it is automatically wrapped with
:class:`Variable`.
Returns:
One :class:`Variable` object or a tuple of multiple
:class:`Variable` objects.
"""
node = self.node
# Swap the ownership
node._function = self
node._weak_function = None
self._node = weakref.ref(node)
self._owned_node = None
ret = node.apply(inputs)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
@property
def inputs(self):
"""The input nodes of the function."""
return self.node.inputs
@property
def outputs(self):
"""Weak references to the output nodes of the function."""
return self.node.outputs
@property
def node(self):
"""The :class:`FunctionAdapter` object that wraps this Function.
If the Function does not have a node object, this property
automatically creates a new one.
"""
noderef = self._node
nd = (noderef and noderef()) or self._owned_node
if nd is not None:
return nd
nd = FunctionAdapter(self)
self._owned_node = nd
return nd
@property
def local_function_hooks(self):
"""Ordered Dictionary of registered function hooks.
See :attr:`FunctionNode.local_function_hooks` for the detail.
"""
return self.node.local_function_hooks
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
It has the same length as the :attr:`outputs`. Elements that are not
retained are set to ``None``.
"""
return self.node.output_data
@property
def rank(self):
"""The topological ordinal of the corresponding function node."""
return self.node.rank
@property
def stack(self):
return self.node.stack
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
Before :meth:`forward` is called, this function is called.
You need to validate types of input data in this function
using :ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input data for :meth:`forward`.
"""
pass
def forward(self, inputs):
"""Applies forward propagation to input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which it selects is determined by the
type of input arrays.
Implementations of :class:`Function` must implement either CPU/GPU
methods or this method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs):
return self.forward_gpu(inputs)
else:
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Applies forward propagation to input arrays on CPU.
Args:
inputs: Tuple of :class:`numpy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`numpy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def forward_gpu(self, inputs):
"""Applies forward propagation to input arrays on GPU.
Args:
inputs: Tuple of :class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`cupy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def backward(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays.
It delegates the procedure to :meth:`backward_cpu` or
:meth:`backward_gpu` by default. Which it selects is determined by the
type of input arrays and output gradient arrays. Implementations of
:class:`Function` must implement either CPU/GPU methods or this method,
if the function is intended to be backprop-ed.
Args:
inputs: Tuple of input arrays.
grad_outputs: Tuple of output gradient arrays.
Returns:
tuple: Tuple of input gradient arrays. Some or all of them can be
``None``, if the function is not differentiable on
inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs):
return self.backward_gpu(inputs, grad_outputs)
else:
return self.backward_cpu(inputs, grad_outputs)
def backward_cpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on CPU.
Args:
inputs: Tuple of input :class:`numpy.ndarray` object(s).
grad_outputs: Tuple of output gradient :class:`numpy.ndarray`
object(s).
Returns:
tuple: Tuple of input gradient :class:`numpy.ndarray` object(s).
Some or all of them can be ``None``, if the function is not
differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def backward_gpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on GPU.
Args:
inputs: Tuple of input :class:`cupy.ndarray`
object(s).
grad_outputs: Tuple of output gradient
:class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of input gradient :class:`cupy.ndarray`
object(s). Some or all of them can be ``None``, if the function is
not differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def unchain(self):
"""Purges in/out nodes and this function itself from the graph.
See :meth:`FunctionNode.unchain() <chainer.FunctionNode.unchain>`
for the detail.
"""
self.node.unchain()
def add_hook(self, hook, name=None):
"""Registers a function hook.
See :meth:`FunctionNode.add_hook` for the detail.
Args:
hook(~chainer.FunctionHook):
Function hook to be registered.
name(str): Name of the function hook.
name must be unique among function hooks
registered to the function. If ``None``,
default name of the function hook is used.
"""
self.node.add_hook(hook, name)
def delete_hook(self, name):
"""Unregisters the specified function hook.
Args:
name(str): the name of the function hook
to be unregistered.
"""
self.node.delete_hook(name)
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function can specify
which inputs are required for backprop.
If this method is not called, the function keeps all input arrays. If
you want to release all input arrays, call this method by passing an
empty sequence. *Note that this behavior is different from that of*
:meth:`FunctionNode.retain_inputs() \
<chainer.FunctionNode.retain_inputs>`.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self.node.retain_inputs(indexes)
def retain_outputs(self, indexes, retain_after_backward=False):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function can specify
which outputs are required for backprop. If this method is not called,
any output variables are not marked to keep the data array at the point
of returning from :meth:`__call__`. The retained arrays are stored to
:attr:`output_data`.
.. note::
It is STRONGLY RECOMMENDED to use this method if the function
requires some or all output arrays in backprop. The function can
also use output arrays just by keeping references to them directly,
whereas it might influence on the performance of later function
applications to the output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
retain_after_backward (bool): This option has no effect. It is
left only for the backward compatibility.
"""
if retain_after_backward:
warnings.warn('retain_after_backward option has no effect',
DeprecationWarning)
self.node.retain_outputs(indexes)
|
|
##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""d3d9.h"""
from .winapi import *
from .d3d9types import *
from .d3d9caps import *
D3DSHADER9 = Blob(Const(DWORD), "_shaderSize(pFunction)")
D3DSPD = Flags(DWORD, [
"D3DSPD_IUNKNOWN",
])
D3DADAPTER = FakeEnum(UINT, [
"D3DADAPTER_DEFAULT",
])
D3DENUM = FakeEnum(DWORD, [
"D3DENUM_WHQL_LEVEL",
])
D3DSGR = Flags(DWORD, [
"D3DSGR_NO_CALIBRATION",
"D3DSGR_CALIBRATE",
])
D3DCURSOR = Flags(DWORD, [
"D3DCURSOR_IMMEDIATE_UPDATE",
])
D3DPRESENT = Flags(DWORD, [
"D3DPRESENT_DONOTWAIT",
"D3DPRESENT_LINEAR_CONTENT",
"D3DPRESENT_DONOTFLIP",
"D3DPRESENT_FLIPRESTART",
"D3DPRESENT_VIDEO_RESTRICT_TO_MONITOR",
])
HRESULT = MAKE_HRESULT(ok = "D3D_OK", errors = [
"D3DERR_WRONGTEXTUREFORMAT",
"D3DERR_UNSUPPORTEDCOLOROPERATION",
"D3DERR_UNSUPPORTEDCOLORARG",
"D3DERR_UNSUPPORTEDALPHAOPERATION",
"D3DERR_UNSUPPORTEDALPHAARG",
"D3DERR_TOOMANYOPERATIONS",
"D3DERR_CONFLICTINGTEXTUREFILTER",
"D3DERR_UNSUPPORTEDFACTORVALUE",
"D3DERR_CONFLICTINGRENDERSTATE",
"D3DERR_UNSUPPORTEDTEXTUREFILTER",
"D3DERR_CONFLICTINGTEXTUREPALETTE",
"D3DERR_DRIVERINTERNALERROR",
"D3DERR_NOTFOUND",
"D3DERR_MOREDATA",
"D3DERR_DEVICELOST",
"D3DERR_DEVICENOTRESET",
"D3DERR_NOTAVAILABLE",
"D3DERR_OUTOFVIDEOMEMORY",
"D3DERR_INVALIDDEVICE",
"D3DERR_INVALIDCALL",
"D3DERR_DRIVERINVALIDCALL",
"D3DERR_WASSTILLDRAWING",
"D3DOK_NOAUTOGEN",
"D3DERR_DEVICEREMOVED",
"S_NOT_RESIDENT",
"S_RESIDENT_IN_SHARED_MEMORY",
"S_PRESENT_MODE_CHANGED",
"S_PRESENT_OCCLUDED",
"D3DERR_DEVICEHUNG",
])
# If we ever swizzle shared handles, it will have to be done manually
SHARED_HANDLE = RAW_HANDLE
# System memory textures
# https://msdn.microsoft.com/en-us/library/windows/desktop/bb219800.aspx#Textures
SHARED_HANDLE_SYSMEM = Polymorphic('Pool', [
('D3DPOOL_SYSTEMMEM', Blob(Void, '_getLockSize(Format, false, Width, Height)'))
], SHARED_HANDLE, contextLess=False)
IDirect3D9 = Interface("IDirect3D9", IUnknown)
IDirect3DDevice9 = Interface("IDirect3DDevice9", IUnknown)
IDirect3DStateBlock9 = Interface("IDirect3DStateBlock9", IUnknown)
IDirect3DSwapChain9 = Interface("IDirect3DSwapChain9", IUnknown)
IDirect3DResource9 = Interface("IDirect3DResource9", IUnknown)
IDirect3DVertexDeclaration9 = Interface("IDirect3DVertexDeclaration9", IUnknown)
IDirect3DVertexShader9 = Interface("IDirect3DVertexShader9", IUnknown)
IDirect3DPixelShader9 = Interface("IDirect3DPixelShader9", IUnknown)
IDirect3DBaseTexture9 = Interface("IDirect3DBaseTexture9", IDirect3DResource9)
IDirect3DTexture9 = Interface("IDirect3DTexture9", IDirect3DBaseTexture9)
IDirect3DVolumeTexture9 = Interface("IDirect3DVolumeTexture9", IDirect3DBaseTexture9)
IDirect3DCubeTexture9 = Interface("IDirect3DCubeTexture9", IDirect3DBaseTexture9)
IDirect3DVertexBuffer9 = Interface("IDirect3DVertexBuffer9", IDirect3DResource9)
IDirect3DIndexBuffer9 = Interface("IDirect3DIndexBuffer9", IDirect3DResource9)
IDirect3DSurface9 = Interface("IDirect3DSurface9", IDirect3DResource9)
IDirect3DVolume9 = Interface("IDirect3DVolume9", IUnknown)
IDirect3DQuery9 = Interface("IDirect3DQuery9", IUnknown)
IDirect3D9Ex = Interface("IDirect3D9Ex", IDirect3D9)
IDirect3DDevice9Ex = Interface("IDirect3DDevice9Ex", IDirect3DDevice9)
IDirect3DSwapChain9Ex = Interface("IDirect3DSwapChain9Ex", IDirect3DSwapChain9)
PDIRECT3D9 = ObjPointer(IDirect3D9)
PDIRECT3DDEVICE9 = ObjPointer(IDirect3DDevice9)
PDIRECT3DSTATEBLOCK9 = ObjPointer(IDirect3DStateBlock9)
PDIRECT3DSWAPCHAIN9 = ObjPointer(IDirect3DSwapChain9)
PDIRECT3DRESOURCE9 = ObjPointer(IDirect3DResource9)
PDIRECT3DVERTEXDECLARATION9 = ObjPointer(IDirect3DVertexDeclaration9)
PDIRECT3DVERTEXSHADER9 = ObjPointer(IDirect3DVertexShader9)
PDIRECT3DPIXELSHADER9 = ObjPointer(IDirect3DPixelShader9)
PDIRECT3DBASETEXTURE9 = ObjPointer(IDirect3DBaseTexture9)
PDIRECT3DTEXTURE9 = ObjPointer(IDirect3DTexture9)
PDIRECT3DVOLUMETEXTURE9 = ObjPointer(IDirect3DVolumeTexture9)
PDIRECT3DCUBETEXTURE9 = ObjPointer(IDirect3DCubeTexture9)
PDIRECT3DVERTEXBUFFER9 = ObjPointer(IDirect3DVertexBuffer9)
PDIRECT3DINDEXBUFFER9 = ObjPointer(IDirect3DIndexBuffer9)
PDIRECT3DSURFACE9 = ObjPointer(IDirect3DSurface9)
PDIRECT3DVOLUME9 = ObjPointer(IDirect3DVolume9)
PDIRECT3DQUERY9 = ObjPointer(IDirect3DQuery9)
PDIRECT3D9EX = ObjPointer(IDirect3D9Ex)
PDIRECT3DDEVICE9EX = ObjPointer(IDirect3DDevice9Ex)
PDIRECT3DSWAPCHAIN9EX = ObjPointer(IDirect3DSwapChain9Ex)
IDirect3D9.methods += [
StdMethod(HRESULT, "RegisterSoftwareDevice", [(OpaquePointer(Void), "pInitializeFunction")], sideeffects=False),
StdMethod(UINT, "GetAdapterCount", [], sideeffects=False),
StdMethod(HRESULT, "GetAdapterIdentifier", [(D3DADAPTER, "Adapter"), (D3DENUM, "Flags"), Out(Pointer(D3DADAPTER_IDENTIFIER9), "pIdentifier")], sideeffects=False),
StdMethod(UINT, "GetAdapterModeCount", [(D3DADAPTER, "Adapter"), (D3DFORMAT, "Format")], sideeffects=False),
StdMethod(HRESULT, "EnumAdapterModes", [(D3DADAPTER, "Adapter"), (D3DFORMAT, "Format"), (UINT, "Mode"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False),
StdMethod(HRESULT, "GetAdapterDisplayMode", [(D3DADAPTER, "Adapter"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False),
StdMethod(HRESULT, "CheckDeviceType", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DevType"), (D3DFORMAT, "AdapterFormat"), (D3DFORMAT, "BackBufferFormat"), (BOOL, "bWindowed")], sideeffects=False),
StdMethod(HRESULT, "CheckDeviceFormat", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "AdapterFormat"), (D3DUSAGE, "Usage"), (D3DRESOURCETYPE, "RType"), (D3DFORMAT, "CheckFormat")], sideeffects=False),
StdMethod(HRESULT, "CheckDeviceMultiSampleType", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "SurfaceFormat"), (BOOL, "Windowed"), (D3DMULTISAMPLE_TYPE, "MultiSampleType"), Out(Pointer(DWORD), "pQualityLevels")], sideeffects=False),
StdMethod(HRESULT, "CheckDepthStencilMatch", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "AdapterFormat"), (D3DFORMAT, "RenderTargetFormat"), (D3DFORMAT, "DepthStencilFormat")], sideeffects=False),
StdMethod(HRESULT, "CheckDeviceFormatConversion", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "SourceFormat"), (D3DFORMAT, "TargetFormat")], sideeffects=False),
StdMethod(HRESULT, "GetDeviceCaps", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), Out(Pointer(D3DCAPS9), "pCaps")], sideeffects=False),
StdMethod(HMONITOR, "GetAdapterMonitor", [(D3DADAPTER, "Adapter")], sideeffects=False),
StdMethod(HRESULT, "CreateDevice", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (HWND, "hFocusWindow"), (D3DCREATE, "BehaviorFlags"), InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(PDIRECT3DDEVICE9), "ppReturnedDeviceInterface")]),
]
IDirect3DDevice9.methods += [
StdMethod(HRESULT, "TestCooperativeLevel", []),
StdMethod(UINT, "GetAvailableTextureMem", [], sideeffects=False),
StdMethod(HRESULT, "EvictManagedResources", []),
StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(PDIRECT3D9), "ppD3D9")]),
StdMethod(HRESULT, "GetDeviceCaps", [Out(Pointer(D3DCAPS9), "pCaps")], sideeffects=False),
StdMethod(HRESULT, "GetDisplayMode", [(UINT, "iSwapChain"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False),
StdMethod(HRESULT, "GetCreationParameters", [Out(Pointer(D3DDEVICE_CREATION_PARAMETERS), "pParameters")], sideeffects=False),
StdMethod(HRESULT, "SetCursorProperties", [(UINT, "XHotSpot"), (UINT, "YHotSpot"), (PDIRECT3DSURFACE9, "pCursorBitmap")]),
StdMethod(Void, "SetCursorPosition", [(Int, "X"), (Int, "Y"), (D3DCURSOR, "Flags")]),
StdMethod(BOOL, "ShowCursor", [(BOOL, "bShow")]),
StdMethod(HRESULT, "CreateAdditionalSwapChain", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(PDIRECT3DSWAPCHAIN9), "pSwapChain")]),
StdMethod(HRESULT, "GetSwapChain", [(UINT, "iSwapChain"), Out(Pointer(PDIRECT3DSWAPCHAIN9), "pSwapChain")]),
StdMethod(UINT, "GetNumberOfSwapChains", [], sideeffects=False),
StdMethod(HRESULT, "Reset", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters")]),
StdMethod(HRESULT, "Present", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion")]),
StdMethod(HRESULT, "GetBackBuffer", [(UINT, "iSwapChain"), (UINT, "iBackBuffer"), (D3DBACKBUFFER_TYPE, "Type"), Out(Pointer(PDIRECT3DSURFACE9), "ppBackBuffer")]),
StdMethod(HRESULT, "GetRasterStatus", [(UINT, "iSwapChain"), Out(Pointer(D3DRASTER_STATUS), "pRasterStatus")], sideeffects=False),
StdMethod(HRESULT, "SetDialogBoxMode", [(BOOL, "bEnableDialogs")]),
StdMethod(Void, "SetGammaRamp", [(UINT, "iSwapChain"), (D3DSGR, "Flags"), (ConstPointer(D3DGAMMARAMP), "pRamp")]),
StdMethod(Void, "GetGammaRamp", [(UINT, "iSwapChain"), Out(Pointer(D3DGAMMARAMP), "pRamp")], sideeffects=False),
StdMethod(HRESULT, "CreateTexture", [(UINT, "Width"), (UINT, "Height"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DTEXTURE9), "ppTexture"), InOut(Pointer(SHARED_HANDLE_SYSMEM), "pSharedHandle")]),
StdMethod(HRESULT, "CreateVolumeTexture", [(UINT, "Width"), (UINT, "Height"), (UINT, "Depth"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DVOLUMETEXTURE9), "ppVolumeTexture"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "CreateCubeTexture", [(UINT, "EdgeLength"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DCUBETEXTURE9), "ppCubeTexture"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "CreateVertexBuffer", [(UINT, "Length"), (D3DUSAGE, "Usage"), (D3DFVF, "FVF"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DVERTEXBUFFER9), "ppVertexBuffer"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "CreateIndexBuffer", [(UINT, "Length"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DINDEXBUFFER9), "ppIndexBuffer"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "CreateRenderTarget", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Lockable"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "CreateDepthStencilSurface", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Discard"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "UpdateSurface", [(PDIRECT3DSURFACE9, "pSourceSurface"), (ConstPointer(RECT), "pSourceRect"), (PDIRECT3DSURFACE9, "pDestinationSurface"), (ConstPointer(POINT), "pDestPoint")]),
StdMethod(HRESULT, "UpdateTexture", [(PDIRECT3DBASETEXTURE9, "pSourceTexture"), (PDIRECT3DBASETEXTURE9, "pDestinationTexture")]),
StdMethod(HRESULT, "GetRenderTargetData", [(PDIRECT3DSURFACE9, "pRenderTarget"), (PDIRECT3DSURFACE9, "pDestSurface")]),
StdMethod(HRESULT, "GetFrontBufferData", [(UINT, "iSwapChain"), (PDIRECT3DSURFACE9, "pDestSurface")]),
StdMethod(HRESULT, "StretchRect", [(PDIRECT3DSURFACE9, "pSourceSurface"), (ConstPointer(RECT), "pSourceRect"), (PDIRECT3DSURFACE9, "pDestSurface"), (ConstPointer(RECT), "pDestRect"), (D3DTEXTUREFILTERTYPE, "Filter")]),
StdMethod(HRESULT, "ColorFill", [(PDIRECT3DSURFACE9, "pSurface"), (ConstPointer(RECT), "pRect"), (D3DCOLOR, "color")]),
StdMethod(HRESULT, "CreateOffscreenPlainSurface", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "SetRenderTarget", [(DWORD, "RenderTargetIndex"), (PDIRECT3DSURFACE9, "pRenderTarget")]),
StdMethod(HRESULT, "GetRenderTarget", [(DWORD, "RenderTargetIndex"), Out(Pointer(PDIRECT3DSURFACE9), "ppRenderTarget")]),
StdMethod(HRESULT, "SetDepthStencilSurface", [(PDIRECT3DSURFACE9, "pNewZStencil")]),
StdMethod(HRESULT, "GetDepthStencilSurface", [Out(Pointer(PDIRECT3DSURFACE9), "ppZStencilSurface")]),
StdMethod(HRESULT, "BeginScene", []),
StdMethod(HRESULT, "EndScene", []),
StdMethod(HRESULT, "Clear", [(DWORD, "Count"), (Array(Const(D3DRECT), "Count"), "pRects"), (D3DCLEAR, "Flags"), (D3DCOLOR, "Color"), (Float, "Z"), (DWORD, "Stencil")]),
StdMethod(HRESULT, "SetTransform", [(D3DTRANSFORMSTATETYPE, "State"), (ConstPointer(D3DMATRIX), "pMatrix")]),
StdMethod(HRESULT, "GetTransform", [(D3DTRANSFORMSTATETYPE, "State"), Out(Pointer(D3DMATRIX), "pMatrix")], sideeffects=False),
StdMethod(HRESULT, "MultiplyTransform", [(D3DTRANSFORMSTATETYPE, "State"), (ConstPointer(D3DMATRIX), "pMatrix")]),
StdMethod(HRESULT, "SetViewport", [(ConstPointer(D3DVIEWPORT9), "pViewport")]),
StdMethod(HRESULT, "GetViewport", [Out(Pointer(D3DVIEWPORT9), "pViewport")], sideeffects=False),
StdMethod(HRESULT, "SetMaterial", [(ConstPointer(D3DMATERIAL9), "pMaterial")]),
StdMethod(HRESULT, "GetMaterial", [Out(Pointer(D3DMATERIAL9), "pMaterial")], sideeffects=False),
StdMethod(HRESULT, "SetLight", [(DWORD, "Index"), (ConstPointer(D3DLIGHT9), "pLight")]),
StdMethod(HRESULT, "GetLight", [(DWORD, "Index"), Out(Pointer(D3DLIGHT9), "pLight")], sideeffects=False),
StdMethod(HRESULT, "LightEnable", [(DWORD, "Index"), (BOOL, "Enable")]),
StdMethod(HRESULT, "GetLightEnable", [(DWORD, "Index"), Out(Pointer(BOOL), "pEnable")], sideeffects=False),
StdMethod(HRESULT, "SetClipPlane", [(DWORD, "Index"), (Array(Const(Float), 4), "pPlane")]),
StdMethod(HRESULT, "GetClipPlane", [(DWORD, "Index"), Out(Array(Float, 4), "pPlane")], sideeffects=False),
StdMethod(HRESULT, "SetRenderState", [(D3DRENDERSTATETYPE, "State"), (D3DRENDERSTATEVALUE, "Value")]),
StdMethod(HRESULT, "GetRenderState", [(D3DRENDERSTATETYPE, "State"), Out(Pointer(D3DRENDERSTATEVALUE), "pValue")], sideeffects=False),
StdMethod(HRESULT, "CreateStateBlock", [(D3DSTATEBLOCKTYPE, "Type"), Out(Pointer(PDIRECT3DSTATEBLOCK9), "ppSB")]),
StdMethod(HRESULT, "BeginStateBlock", []),
StdMethod(HRESULT, "EndStateBlock", [Out(Pointer(PDIRECT3DSTATEBLOCK9), "ppSB")]),
StdMethod(HRESULT, "SetClipStatus", [(ConstPointer(D3DCLIPSTATUS9), "pClipStatus")]),
StdMethod(HRESULT, "GetClipStatus", [Out(Pointer(D3DCLIPSTATUS9), "pClipStatus")], sideeffects=False),
StdMethod(HRESULT, "GetTexture", [(DWORD, "Stage"), Out(Pointer(PDIRECT3DBASETEXTURE9), "ppTexture")]),
StdMethod(HRESULT, "SetTexture", [(DWORD, "Stage"), (PDIRECT3DBASETEXTURE9, "pTexture")]),
StdMethod(HRESULT, "GetTextureStageState", [(DWORD, "Stage"), (D3DTEXTURESTAGESTATETYPE, "Type"), Out(Pointer(D3DTEXTURESTAGESTATEVALUE), "pValue")], sideeffects=False),
StdMethod(HRESULT, "SetTextureStageState", [(DWORD, "Stage"), (D3DTEXTURESTAGESTATETYPE, "Type"), (D3DTEXTURESTAGESTATEVALUE, "Value")]),
StdMethod(HRESULT, "GetSamplerState", [(DWORD, "Sampler"), (D3DSAMPLERSTATETYPE, "Type"), Out(Pointer(D3DSAMPLERSTATEVALUE), "pValue")], sideeffects=False),
StdMethod(HRESULT, "SetSamplerState", [(DWORD, "Sampler"), (D3DSAMPLERSTATETYPE, "Type"), (D3DSAMPLERSTATEVALUE, "Value")]),
StdMethod(HRESULT, "ValidateDevice", [Out(Pointer(DWORD), "pNumPasses")]),
StdMethod(HRESULT, "SetPaletteEntries", [(UINT, "PaletteNumber"), (Array(Const(PALETTEENTRY), 256), "pEntries")]),
StdMethod(HRESULT, "GetPaletteEntries", [(UINT, "PaletteNumber"), Out(Array(PALETTEENTRY, 256), "pEntries")], sideeffects=False),
StdMethod(HRESULT, "SetCurrentTexturePalette", [(UINT, "PaletteNumber")]),
StdMethod(HRESULT, "GetCurrentTexturePalette", [Out(Pointer(UINT), "PaletteNumber")], sideeffects=False),
StdMethod(HRESULT, "SetScissorRect", [(ConstPointer(RECT), "pRect")]),
StdMethod(HRESULT, "GetScissorRect", [Out(Pointer(RECT), "pRect")]),
StdMethod(HRESULT, "SetSoftwareVertexProcessing", [(BOOL, "bSoftware")]),
StdMethod(BOOL, "GetSoftwareVertexProcessing", [], sideeffects=False),
StdMethod(HRESULT, "SetNPatchMode", [(Float, "nSegments")]),
StdMethod(Float, "GetNPatchMode", [], sideeffects=False),
StdMethod(HRESULT, "DrawPrimitive", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "StartVertex"), (UINT, "PrimitiveCount")]),
StdMethod(HRESULT, "DrawIndexedPrimitive", [(D3DPRIMITIVETYPE, "PrimitiveType"), (INT, "BaseVertexIndex"), (UINT, "MinVertexIndex"), (UINT, "NumVertices"), (UINT, "startIndex"), (UINT, "primCount")]),
StdMethod(HRESULT, "DrawPrimitiveUP", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "PrimitiveCount"), (Blob(Const(Void), "_vertexDataSize(PrimitiveType, PrimitiveCount, VertexStreamZeroStride)"), "pVertexStreamZeroData"), (UINT, "VertexStreamZeroStride")]),
StdMethod(HRESULT, "DrawIndexedPrimitiveUP", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "MinVertexIndex"), (UINT, "NumVertices"), (UINT, "PrimitiveCount"), (Blob(Const(Void), "_indexDataSize(PrimitiveType, PrimitiveCount, IndexDataFormat)"), "pIndexData"), (D3DFORMAT, "IndexDataFormat"), (Blob(Const(Void), "(MinVertexIndex + NumVertices)*VertexStreamZeroStride"), "pVertexStreamZeroData"), (UINT, "VertexStreamZeroStride")]),
StdMethod(HRESULT, "ProcessVertices", [(UINT, "SrcStartIndex"), (UINT, "DestIndex"), (UINT, "VertexCount"), (PDIRECT3DVERTEXBUFFER9, "pDestBuffer"), (PDIRECT3DVERTEXDECLARATION9, "pVertexDecl"), (D3DPV, "Flags")]),
StdMethod(HRESULT, "CreateVertexDeclaration", [(Array(Const(D3DVERTEXELEMENT9), "_declCount(pVertexElements)"), "pVertexElements"), Out(Pointer(PDIRECT3DVERTEXDECLARATION9), "ppDecl")]),
StdMethod(HRESULT, "SetVertexDeclaration", [(PDIRECT3DVERTEXDECLARATION9, "pDecl")]),
StdMethod(HRESULT, "GetVertexDeclaration", [Out(Pointer(PDIRECT3DVERTEXDECLARATION9), "ppDecl")]),
StdMethod(HRESULT, "SetFVF", [(D3DFVF, "FVF")]),
StdMethod(HRESULT, "GetFVF", [Out(Pointer(D3DFVF), "pFVF")], sideeffects=False),
StdMethod(HRESULT, "CreateVertexShader", [(D3DSHADER9, "pFunction"), Out(Pointer(PDIRECT3DVERTEXSHADER9), "ppShader")]),
StdMethod(HRESULT, "SetVertexShader", [(PDIRECT3DVERTEXSHADER9, "pShader")]),
StdMethod(HRESULT, "GetVertexShader", [Out(Pointer(PDIRECT3DVERTEXSHADER9), "ppShader")]),
StdMethod(HRESULT, "SetVertexShaderConstantF", [(UINT, "StartRegister"), (Array(Const(Float), "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")]),
StdMethod(HRESULT, "GetVertexShaderConstantF", [(UINT, "StartRegister"), Out(Array(Float, "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")], sideeffects=False),
StdMethod(HRESULT, "SetVertexShaderConstantI", [(UINT, "StartRegister"), (Array(Const(Int), "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")]),
StdMethod(HRESULT, "GetVertexShaderConstantI", [(UINT, "StartRegister"), Out(Array(Int, "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")], sideeffects=False),
StdMethod(HRESULT, "SetVertexShaderConstantB", [(UINT, "StartRegister"), (Array(Const(BOOL), "BoolCount"), "pConstantData"), (UINT, "BoolCount")]),
StdMethod(HRESULT, "GetVertexShaderConstantB", [(UINT, "StartRegister"), Out(Array(BOOL, "BoolCount"), "pConstantData"), (UINT, "BoolCount")], sideeffects=False),
StdMethod(HRESULT, "SetStreamSource", [(UINT, "StreamNumber"), (PDIRECT3DVERTEXBUFFER9, "pStreamData"), (UINT, "OffsetInBytes"), (UINT, "Stride")]),
StdMethod(HRESULT, "GetStreamSource", [(UINT, "StreamNumber"), Out(Pointer(PDIRECT3DVERTEXBUFFER9), "ppStreamData"), Out(Pointer(UINT), "pOffsetInBytes"), Out(Pointer(UINT), "pStride")]),
StdMethod(HRESULT, "SetStreamSourceFreq", [(UINT, "StreamNumber"), (UINT, "Setting")]),
StdMethod(HRESULT, "GetStreamSourceFreq", [(UINT, "StreamNumber"), Out(Pointer(UINT), "pSetting")], sideeffects=False),
StdMethod(HRESULT, "SetIndices", [(PDIRECT3DINDEXBUFFER9, "pIndexData")]),
StdMethod(HRESULT, "GetIndices", [Out(Pointer(PDIRECT3DINDEXBUFFER9), "ppIndexData")]),
StdMethod(HRESULT, "CreatePixelShader", [(D3DSHADER9, "pFunction"), Out(Pointer(PDIRECT3DPIXELSHADER9), "ppShader")]),
StdMethod(HRESULT, "SetPixelShader", [(PDIRECT3DPIXELSHADER9, "pShader")]),
StdMethod(HRESULT, "GetPixelShader", [Out(Pointer(PDIRECT3DPIXELSHADER9), "ppShader")]),
StdMethod(HRESULT, "SetPixelShaderConstantF", [(UINT, "StartRegister"), (Array(Const(Float), "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")]),
StdMethod(HRESULT, "GetPixelShaderConstantF", [(UINT, "StartRegister"), Out(Array(Float, "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")], sideeffects=False),
StdMethod(HRESULT, "SetPixelShaderConstantI", [(UINT, "StartRegister"), (Array(Const(Int), "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")]),
StdMethod(HRESULT, "GetPixelShaderConstantI", [(UINT, "StartRegister"), Out(Array(Int, "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")], sideeffects=False),
StdMethod(HRESULT, "SetPixelShaderConstantB", [(UINT, "StartRegister"), (Array(Const(BOOL), "BoolCount"), "pConstantData"), (UINT, "BoolCount")]),
StdMethod(HRESULT, "GetPixelShaderConstantB", [(UINT, "StartRegister"), Out(Array(BOOL, "BoolCount"), "pConstantData"), (UINT, "BoolCount")], sideeffects=False),
StdMethod(HRESULT, "DrawRectPatch", [(UINT, "Handle"), (ConstPointer(Float), "pNumSegs"), (ConstPointer(D3DRECTPATCH_INFO), "pRectPatchInfo")]),
StdMethod(HRESULT, "DrawTriPatch", [(UINT, "Handle"), (ConstPointer(Float), "pNumSegs"), (ConstPointer(D3DTRIPATCH_INFO), "pTriPatchInfo")]),
StdMethod(HRESULT, "DeletePatch", [(UINT, "Handle")]),
StdMethod(HRESULT, "CreateQuery", [(D3DQUERYTYPE, "Type"), Out(Pointer(PDIRECT3DQUERY9), "ppQuery")]),
]
IDirect3DStateBlock9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "Capture", []),
StdMethod(HRESULT, "Apply", []),
]
IDirect3DSwapChain9.methods += [
StdMethod(HRESULT, "Present", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion"), (D3DPRESENT, "dwFlags")]),
StdMethod(HRESULT, "GetFrontBufferData", [(PDIRECT3DSURFACE9, "pDestSurface")]),
StdMethod(HRESULT, "GetBackBuffer", [(UINT, "iBackBuffer"), (D3DBACKBUFFER_TYPE, "Type"), Out(Pointer(PDIRECT3DSURFACE9), "ppBackBuffer")]),
StdMethod(HRESULT, "GetRasterStatus", [Out(Pointer(D3DRASTER_STATUS), "pRasterStatus")], sideeffects=False),
StdMethod(HRESULT, "GetDisplayMode", [Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False),
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "GetPresentParameters", [Out(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters")], sideeffects=False),
]
IDirect3DResource9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "refguid"), (OpaqueBlob(Const(Void), "SizeOfData"), "pData"), (DWORD, "SizeOfData"), (D3DSPD, "Flags")], sideeffects=False),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "refguid"), Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), InOut(Pointer(DWORD), "pSizeOfData")], sideeffects=False),
StdMethod(HRESULT, "FreePrivateData", [(REFGUID, "refguid")], sideeffects=False),
StdMethod(D3D9_RESOURCE_PRIORITY, "SetPriority", [(D3D9_RESOURCE_PRIORITY, "PriorityNew")]),
StdMethod(D3D9_RESOURCE_PRIORITY, "GetPriority", [], sideeffects=False),
StdMethod(Void, "PreLoad", []),
StdMethod(D3DRESOURCETYPE, "GetType", [], sideeffects=False),
]
IDirect3DVertexDeclaration9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "GetDeclaration", [Out(Array(D3DVERTEXELEMENT9, "*pNumElements"), "pElement"), InOut(Pointer(UINT), "pNumElements")], sideeffects=False),
]
IDirect3DVertexShader9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "GetFunction", [Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), Out(Pointer(UINT), "pSizeOfData")], sideeffects=False),
]
IDirect3DPixelShader9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "GetFunction", [Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), Out(Pointer(UINT), "pSizeOfData")], sideeffects=False),
]
IDirect3DBaseTexture9.methods += [
StdMethod(DWORD, "SetLOD", [(DWORD, "LODNew")]),
StdMethod(DWORD, "GetLOD", [], sideeffects=False),
StdMethod(DWORD, "GetLevelCount", [], sideeffects=False),
StdMethod(HRESULT, "SetAutoGenFilterType", [(D3DTEXTUREFILTERTYPE, "FilterType")]),
StdMethod(D3DTEXTUREFILTERTYPE, "GetAutoGenFilterType", [], sideeffects=False),
StdMethod(Void, "GenerateMipSubLevels", []),
]
IDirect3DTexture9.methods += [
StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "GetSurfaceLevel", [(UINT, "Level"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurfaceLevel")]),
StdMethod(HRESULT, "LockRect", [(UINT, "Level"), Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "UnlockRect", [(UINT, "Level")]),
StdMethod(HRESULT, "AddDirtyRect", [(ConstPointer(RECT), "pDirtyRect")]),
]
IDirect3DVolumeTexture9.methods += [
StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DVOLUME_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "GetVolumeLevel", [(UINT, "Level"), Out(Pointer(PDIRECT3DVOLUME9), "ppVolumeLevel")]),
StdMethod(HRESULT, "LockBox", [(UINT, "Level"), Out(Pointer(D3DLOCKED_BOX), "pLockedVolume"), (ConstPointer(D3DBOX), "pBox"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "UnlockBox", [(UINT, "Level")]),
StdMethod(HRESULT, "AddDirtyBox", [(ConstPointer(D3DBOX), "pDirtyBox")]),
]
IDirect3DCubeTexture9.methods += [
StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "GetCubeMapSurface", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level"), Out(Pointer(PDIRECT3DSURFACE9), "ppCubeMapSurface")]),
StdMethod(HRESULT, "LockRect", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level"), Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "UnlockRect", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level")]),
StdMethod(HRESULT, "AddDirtyRect", [(D3DCUBEMAP_FACES, "FaceType"), (ConstPointer(RECT), "pDirtyRect")]),
]
IDirect3DVertexBuffer9.methods += [
StdMethod(HRESULT, "Lock", [(UINT, "OffsetToLock"), (UINT, "SizeToLock"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppbData"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "Unlock", []),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DVERTEXBUFFER_DESC), "pDesc")], sideeffects=False),
]
IDirect3DIndexBuffer9.methods += [
StdMethod(HRESULT, "Lock", [(UINT, "OffsetToLock"), (UINT, "SizeToLock"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppbData"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "Unlock", []),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DINDEXBUFFER_DESC), "pDesc")], sideeffects=False),
]
IDirect3DSurface9.methods += [
StdMethod(HRESULT, "GetContainer", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppContainer")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "LockRect", [Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "UnlockRect", []),
StdMethod(HRESULT, "GetDC", [Out(Pointer(HDC), "phdc")], sideeffects=False),
StdMethod(HRESULT, "ReleaseDC", [(HDC, "hdc")], sideeffects=False),
]
IDirect3DVolume9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "refguid"), (OpaqueBlob(Const(Void), "SizeOfData"), "pData"), (DWORD, "SizeOfData"), (D3DSPD, "Flags")], sideeffects=False),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "refguid"), Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), InOut(Pointer(DWORD), "pSizeOfData")], sideeffects=False),
StdMethod(HRESULT, "FreePrivateData", [(REFGUID, "refguid")], sideeffects=False),
StdMethod(HRESULT, "GetContainer", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppContainer")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DVOLUME_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "LockBox", [Out(Pointer(D3DLOCKED_BOX), "pLockedVolume"), (ConstPointer(D3DBOX), "pBox"), (D3DLOCK, "Flags")]),
StdMethod(HRESULT, "UnlockBox", []),
]
IDirect3DQuery9.methods += [
StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]),
StdMethod(D3DQUERYTYPE, "GetType", [], sideeffects=False),
StdMethod(DWORD, "GetDataSize", [], sideeffects=False),
StdMethod(HRESULT, "Issue", [(D3DISSUE, "dwIssueFlags")]),
StdMethod(HRESULT, "GetData", [Out(D3DQUERYDATA, "pData"), (DWORD, "dwSize"), (D3DGETDATA, "dwGetDataFlags")], sideeffects=False),
]
IDirect3D9Ex.methods += [
StdMethod(UINT, "GetAdapterModeCountEx", [(D3DADAPTER, "Adapter"), (ConstPointer(D3DDISPLAYMODEFILTER), "pFilter") ], sideeffects=False),
StdMethod(HRESULT, "EnumAdapterModesEx", [(D3DADAPTER, "Adapter"), (ConstPointer(D3DDISPLAYMODEFILTER), "pFilter"), (UINT, "Mode"), Out(Pointer(D3DDISPLAYMODEEX), "pMode")], sideeffects=False),
StdMethod(HRESULT, "GetAdapterDisplayModeEx", [(D3DADAPTER, "Adapter"), Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False),
StdMethod(HRESULT, "CreateDeviceEx", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (HWND, "hFocusWindow"), (D3DCREATE, "BehaviorFlags"), InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), InOut(Pointer(D3DDISPLAYMODEEX), "pFullscreenDisplayMode"), Out(Pointer(PDIRECT3DDEVICE9EX), "ppReturnedDeviceInterface")]),
StdMethod(HRESULT, "GetAdapterLUID", [(D3DADAPTER, "Adapter"), Out(Pointer(LUID), "pLUID")], sideeffects=False),
]
IDirect3DDevice9Ex.methods += [
StdMethod(HRESULT, "SetConvolutionMonoKernel", [(UINT, "width"), (UINT, "height"), (Array(Float, "width"), "rows"), (Array(Float, "height"), "columns")]),
StdMethod(HRESULT, "ComposeRects", [(PDIRECT3DSURFACE9, "pSrc"), (PDIRECT3DSURFACE9, "pDst"), (PDIRECT3DVERTEXBUFFER9, "pSrcRectDescs"), (UINT, "NumRects"), (PDIRECT3DVERTEXBUFFER9, "pDstRectDescs"), (D3DCOMPOSERECTSOP, "Operation"), (Int, "Xoffset"), (Int, "Yoffset")]),
StdMethod(HRESULT, "PresentEx", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion"), (D3DPRESENT, "dwFlags")]),
StdMethod(HRESULT, "GetGPUThreadPriority", [Out(Pointer(INT), "pPriority")], sideeffects=False),
StdMethod(HRESULT, "SetGPUThreadPriority", [(INT, "Priority")]),
StdMethod(HRESULT, "WaitForVBlank", [(UINT, "iSwapChain")]),
StdMethod(HRESULT, "CheckResourceResidency", [(Array(PDIRECT3DRESOURCE9, "NumResources"), "pResourceArray"), (UINT32, "NumResources")]),
StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]),
StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False),
StdMethod(HRESULT, "CheckDeviceState", [(HWND, "hDestinationWindow")], sideeffects=False),
StdMethod(HRESULT, "CreateRenderTargetEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Lockable"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]),
StdMethod(HRESULT, "CreateOffscreenPlainSurfaceEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]),
StdMethod(HRESULT, "CreateDepthStencilSurfaceEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Discard"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]),
StdMethod(HRESULT, "ResetEx", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), InOut(Pointer(D3DDISPLAYMODEEX), "pFullscreenDisplayMode")]),
StdMethod(HRESULT, "GetDisplayModeEx", [(UINT, "iSwapChain"), Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False),
]
IDirect3DSwapChain9Ex.methods += [
StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False),
StdMethod(HRESULT, "GetPresentStats", [Out(Pointer(D3DPRESENTSTATS), "pPresentationStatistics")], sideeffects=False),
StdMethod(HRESULT, "GetDisplayModeEx", [Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False),
]
d3d9 = Module("d3d9")
d3d9.addFunctions([
StdFunction(PDIRECT3D9, "Direct3DCreate9", [(UINT, "SDKVersion")], fail='NULL'),
StdFunction(HRESULT, "Direct3DCreate9Ex", [(UINT, "SDKVersion"), Out(Pointer(PDIRECT3D9EX), "ppD3D")], fail='D3DERR_NOTAVAILABLE'),
])
d3d9.addInterfaces([
IDirect3DSwapChain9Ex,
])
# D3DPERF_* functions can also be used by D3D10 applications, so keep them in a
# separate module to be merged as necessary
# See http://web.archive.org/web/20110510070258/http://msdn.microsoft.com/en-us/library/ee417071%28v=VS.85%29.aspx
d3dperf = Module("d3d9")
d3dperf.addFunctions([
StdFunction(Int, "D3DPERF_BeginEvent", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], fail='-1', sideeffects=False),
StdFunction(Int, "D3DPERF_EndEvent", [], fail='-1', sideeffects=False),
StdFunction(Void, "D3DPERF_SetMarker", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], sideeffects=False),
StdFunction(Void, "D3DPERF_SetRegion", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], sideeffects=False),
StdFunction(BOOL, "D3DPERF_QueryRepeatFrame", [], fail='FALSE', sideeffects=False),
StdFunction(Void, "D3DPERF_SetOptions", [(DWORD, "dwOptions")], sideeffects=False),
StdFunction(DWORD, "D3DPERF_GetStatus", [], fail='0', sideeffects=False),
])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseBijectorTest(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.assertRaisesRegex(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super(_BareBonesBijector, self).__init__(forward_min_event_ndims=0)
bij = _BareBonesBijector()
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegex(NotImplementedError, "inverse not implemented"):
bij.inverse(0)
with self.assertRaisesRegex(NotImplementedError, "forward not implemented"):
bij.forward(0)
with self.assertRaisesRegex(NotImplementedError,
"inverse_log_det_jacobian not implemented"):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegex(NotImplementedError,
"forward_log_det_jacobian not implemented"):
bij.forward_log_det_jacobian(0, event_ndims=0)
class IntentionallyMissingError(Exception):
pass
class BrokenBijector(bijector.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(
self, forward_missing=False, inverse_missing=False, validate_args=False):
super(BrokenBijector, self).__init__(
validate_args=validate_args, forward_min_event_ndims=0, name="broken")
self._forward_missing = forward_missing
self._inverse_missing = inverse_missing
def _forward(self, x):
if self._forward_missing:
raise IntentionallyMissingError
return 2 * x
def _inverse(self, y):
if self._inverse_missing:
raise IntentionallyMissingError
return y / 2.
def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument
if self._inverse_missing:
raise IntentionallyMissingError
return -math_ops.log(2.)
def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument
if self._forward_missing:
raise IntentionallyMissingError
return math_ops.log(2.)
class BijectorTestEventNdims(test.TestCase):
def testBijectorNonIntegerEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegex(ValueError, "Expected integer"):
bij.forward_log_det_jacobian(1., event_ndims=1.5)
with self.assertRaisesRegex(ValueError, "Expected integer"):
bij.inverse_log_det_jacobian(1., event_ndims=1.5)
def testBijectorArrayEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegex(ValueError, "Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
with self.assertRaisesRegex(ValueError, "Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
@test_util.run_deprecated_v1
def testBijectorDynamicEventNdims(self):
bij = BrokenBijector(validate_args=True)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=None)
with self.cached_session():
with self.assertRaisesOpError("Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
with self.assertRaisesOpError("Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
@six.add_metaclass(abc.ABCMeta)
class BijectorCachingTestBase(object):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Now, everything should be cached if the argument is y.
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.inverse_log_det_jacobian(y, event_ndims=1)
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Now, everything should be cached if the argument is x.
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.forward_log_det_jacobian(x, event_ndims=1)
class BijectorCachingTest(BijectorCachingTestBase, test.TestCase):
"""Test caching with BrokenBijector."""
@property
def broken_bijector_cls(self):
return BrokenBijector
class ExpOnlyJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ExpOnlyJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
name="exp")
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y)
def _forward_log_det_jacobian(self, x):
return math_ops.log(x)
class ConstantJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ConstantJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
name="c")
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(-2., x.dtype)
class BijectorReduceEventDimsTest(test.TestCase):
"""Test caching with BrokenBijector."""
def testReduceEventNdimsForward(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(np.log(x), axis=(-1, -2)),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsForwardRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegex(ValueError, "must be larger than"):
bij.forward_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsInverse(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
-np.log(x),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(-np.log(x), axis=(-1, -2)),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegex(ValueError, "must be larger than"):
bij.inverse_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsForwardConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
-2.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
-4.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-8.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
2.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
4.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
8.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
@test_util.run_deprecated_v1
def testHandlesNonStaticEventNdims(self):
x_ = [[[1., 2.], [3., 4.]]]
x = array_ops.placeholder_with_default(x_, shape=None)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=[])
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
with self.cached_session() as sess:
ildj = sess.run(bij.inverse_log_det_jacobian(x, event_ndims=event_ndims),
feed_dict={event_ndims: 1})
self.assertAllClose(-np.log(x_), ildj)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
base class for interfaces to indexing engines for pootle
"""
import translate.lang.data
import os
__revision__ = "$Id: CommonIndexer.py 8507 2008-09-27 09:15:08Z dwaynebailey $"
def is_available():
"""check if this indexing engine interface is usable
this function must exist in every module that contains indexing engine
interfaces
@return: is this interface usable?
@rtype: bool
"""
return False
class CommonDatabase(object):
"""base class for indexing support
any real implementation must override most methods of this class
"""
field_analyzers = {}
"""mapping of field names and analyzers - see 'set_field_analyzers'"""
ANALYZER_EXACT = 0
"""exact matching: the query string must equal the whole term string"""
ANALYZER_PARTIAL = 1<<1
"""partial matching: a document matches, even if the query string only
matches the beginning of the term value."""
ANALYZER_TOKENIZE = 1<<2
"""tokenize terms and queries automatically"""
ANALYZER_DEFAULT = ANALYZER_TOKENIZE | ANALYZER_PARTIAL
"""the default analyzer to be used if nothing is configured"""
QUERY_TYPE = None
"""override this with the query class of the implementation"""
INDEX_DIRECTORY_NAME = None
"""override this with a string to be used as the name of the indexing
directory/file in the filesystem
"""
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""initialize or open an indexing database
Any derived class must override __init__.
Any implementation can rely on the "self.location" attribute to be set
by the __init__ function of the super class.
@raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different indexing engine)
@raise OSError: the database failed to initialize
@param basedir: the parent directory of the database
@type basedir: str
@param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this database. Leave it empty
to use the system default analyzer (self.ANALYZER_DEFAULT).
see self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
@type analyzer: int
@param create_allowed: create the database, if necessary; default: True
@type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'QUERY_TYPE' is undefined")
if self.INDEX_DIRECTORY_NAME is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'INDEX_DIRECTORY_NAME' is undefined")
self.location = os.path.join(basedir, self.INDEX_DIRECTORY_NAME)
if (not create_allowed) and (not os.path.exists(self.location)):
raise OSError("Indexer: the database does not exist - and I am" \
+ " not configured to create it.")
if analyzer is None:
self.analyzer = self.ANALYZER_DEFAULT
else:
self.analyzer = analyzer
self.field_analyzers = {}
def flush(self, optimize=False):
"""flush the content of the database - to force changes to be written
to disk
some databases also support index optimization
@param optimize: should the index be optimized if possible?
@type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
def make_query(self, args, require_all=True, analyzer=None):
"""create simple queries (strings or field searches) or
combine multiple queries (AND/OR)
To specifiy rules for field searches, you may want to take a look at
'set_field_analyzers'. The parameter 'match_text_partial' can override
the previously defined default setting.
@param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
@type args: list of queries | single query | str | dict
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: boolean
@param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact matching, tokenizing,
...) as bitwise combinations of CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: the combined query
@rtype: query type of the specific implemention
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
args = args.items()
# turn 'args' into a list if necessary
if not isinstance(args, list):
args = [args]
# combine all given queries
result = []
for query in args:
# just add precompiled queries
if isinstance(query, self.QUERY_TYPE):
result.append(self._create_query_for_query(query))
# create field/value queries out of a tuple
elif isinstance(query, tuple):
field, value = query
# perform unicode normalization
field = translate.lang.data.normalize(unicode(field))
value = translate.lang.data.normalize(unicode(value))
# check for the choosen match type
if analyzer is None:
analyzer = self.get_field_analyzers(field)
result.append(self._create_query_for_field(field, value,
analyzer=analyzer))
# parse plaintext queries
elif isinstance(query, basestring):
if analyzer is None:
analyzer = self.analyzer
# perform unicode normalization
query = translate.lang.data.normalize(unicode(query))
result.append(self._create_query_for_string(query,
require_all=require_all, analyzer=analyzer))
else:
# other types of queries are not supported
raise ValueError("Unable to handle query type: %s" \
% str(type(query)))
# return the combined query
return self._create_query_combined(result, require_all)
def _create_query_for_query(self, query):
"""generate a query based on an existing query object
basically this function should just create a copy of the original
@param query: the original query object
@type query: xapian.Query
@return: the resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""generate a query for a plain term of a string query
basically this function parses the string and returns the resulting
query
@param text: the query string
@type text: str
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: bool
@param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
def _create_query_for_field(self, field, value, analyzer=None):
"""generate a field query
this functions creates a field->value query
@param field: the fieldname to be used
@type field: str
@param value: the wanted value of the field
@type value: str
@param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
@param queries: list of the original queries
@type queries: list of xapian.Query
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: bool
@return: the resulting combined query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
def index_document(self, data):
"""add the given data to the database
@param data: the data to be indexed.
A dictionary will be treated as fieldname:value combinations.
If the fieldname is None then the value will be interpreted as a
plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
@type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
data = data.items()
# add all data
for dataset in data:
if isinstance(dataset, tuple):
# the dataset tuple consists of '(key, value)'
key, value = dataset
if key is None:
if isinstance(value, list):
terms = value[:]
elif isinstance(value, basestring):
terms = [value]
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
for one_term in terms:
self._add_plain_term(doc, self._decode(one_term),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
analyze_settings = self.get_field_analyzers(key)
# handle multiple terms
if not isinstance(value, list):
value = [value]
for one_term in value:
self._add_field_term(doc, key, self._decode(one_term),
(analyze_settings & self.ANALYZER_TOKENIZE > 0))
elif isinstance(dataset, basestring):
self._add_plain_term(doc, self._decode(dataset),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
self._add_document_to_index(doc)
def _create_empty_document(self):
"""create an empty document to be filled and added to the index later
@return: the new document object
@rtype: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
@param document: the document to be changed
@type document: xapian.Document | PyLucene.Document
@param term: a single term to be added
@type term: str
@param tokenize: should the term be tokenized automatically
@type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
@param document: the document to be changed
@type document: xapian.Document | PyLucene.Document
@param field: name of the field
@type field: str
@param term: term to be associated to the field
@type term: str
@param tokenize: should the term be tokenized automatically
@type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
def _add_document_to_index(self, document):
"""add a prepared document to the index database
@param document: the document to be added
@type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
def begin_transaction(self):
"""begin a transaction
You can group multiple modifications of a database as a transaction.
This prevents time-consuming database flushing and helps, if you want
that a changeset is committed either completely or not at all.
No changes will be written to disk until 'commit_transaction'.
'cancel_transaction' can be used to revert an ongoing transaction.
Database types that do not support transactions may silently ignore it.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'begin_transaction' is missing")
def cancel_transaction(self):
"""cancel an ongoing transaction
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'cancel_transaction' is missing")
def commit_transaction(self):
"""submit the currently ongoing transaction and write changes to disk
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'commit_transaction' is missing")
def get_query_result(self, query):
"""return an object containing the results of a query
@param query: a pre-compiled query
@type query: a query object of the real implementation
@return: an object that allows access to the results
@rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
def delete_document_by_id(self, docid):
"""delete a specified document
@param docid: the document ID to be deleted
@type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
def search(self, query, fieldnames):
"""return a list of the contents of specified fields for all matches of
a query
@param query: the query to be issued
@type query: a query object of the real implementation
@param fieldnames: the name(s) of a field of the document content
@type fieldnames: string | list of strings
@return: a list of dicts containing the specified field(s)
@rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
def delete_doc(self, ident):
"""delete the documents returned by a query
@param ident: [list of] document IDs | dict describing a query | query
@type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
if isinstance(ident, list):
# it is already a list
ident_list = ident
else:
ident_list = [ident]
if len(ident_list) == 0:
# no matching items
return 0
if isinstance(ident_list[0], int):
# create a list of IDs of all successfully removed documents
success_delete = [match for match in ident_list
if self.delete_document_by_id(match)]
return len(success_delete)
if isinstance(ident_list[0], dict):
# something like: { "msgid": "foobar" }
# assemble all queries
query = self.make_query([self.make_query(query_dict,
require_all=True) for query_dict in ident_list],
require_all=True)
elif isinstance(ident_list[0], object):
# assume a query object (with 'AND')
query = self.make_query(ident_list, require_all=True)
else:
# invalid element type in list (not necessarily caught in the
# lines above)
raise TypeError("description of documents to-be-deleted is not " \
+ "supported: list of %s" % type(ident_list[0]))
# we successfully created a query - now iterate through the result
# no documents deleted so far ...
remove_list = []
# delete all resulting documents step by step
def add_docid_to_list(match):
"""collect every document ID"""
remove_list.append(match["docid"])
self._walk_matches(query, add_docid_to_list)
return self.delete_doc(remove_list)
def _walk_matches(self, query, function, arg_for_function=None):
"""use this function if you want to do something with every single match
of a query
example::
self._walk_matches(query, function_for_match, arg_for_func)
'function_for_match' expects only one argument: the matched object
@param query: a query object of the real implementation
@type query: xapian.Query | PyLucene.Query
@param function: the function to execute with every match
@type function: function
@param arg_for_function: an optional argument for the function
@type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
# start with the first element
start = 0
# do the loop at least once
size, avail = (0, 1)
# how many results per 'get_matches'?
steps = 2
while start < avail:
(size, avail, matches) = enquire.get_matches(start, steps)
for match in matches:
if arg_for_function is None:
function(match)
else:
function(match, arg_for_function)
start += size
def set_field_analyzers(self, field_analyzers):
"""set the analyzers for different fields of the database documents
All bitwise combinations of CommonIndexer.ANALYZER_??? are possible.
@param field_analyzers: mapping of field names and analyzers
@type field_analyzers: dict containing field names and analyzers
@raise TypeError: invalid values in 'field_analyzers'
"""
for field, analyzer in field_analyzers.items():
# check for invald input types
if not isinstance(field, (str, unicode)):
raise TypeError("field name must be a string")
if not isinstance(analyzer, int):
raise TypeError("the analyzer must be a whole number (int)")
# map the analyzer to the field name
self.field_analyzers[field] = analyzer
def get_field_analyzers(self, fieldnames=None):
"""return the analyzer that was mapped to a specific field
see 'set_field_analyzers' for details
@param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or "None") to request all fields
@type fieldnames: str | list of str | None
@return: the analyzer setting of the field - see
CommonDatabase.ANALYZER_??? or a dict of field names and analyzers
@rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
# return a copy
return dict(self.field_analyzers)
# one field is requested
if isinstance(fieldnames, (str, unicode)):
if self.field_analyzers.has_key(fieldnames):
return self.field_analyzers[fieldnames]
else:
return self.analyzer
# a list of fields is requested
if isinstance(fieldnames, list):
result = {}
for field in fieldnames:
result[field] = self.get_field_analyzers(field)
return result
return self.analyzer
def _decode(self, text):
"""decode the string from utf-8 or charmap
perform unicde normalization
"""
if isinstance(text, str):
try:
result = unicode(text.decode("UTF-8"))
except UnicodeEncodeError, e:
result = unicode(text.decode("charmap"))
elif not isinstance(text, unicode):
result = unicode(text)
else:
result = text
# perform unicode normalization
return translate.lang.data.normalize(result)
class CommonEnquire(object):
"""an enquire object contains the information about the result of a request
"""
def __init__(self, enquire):
"""intialization of a wrapper around enquires of different backends
@param enquire: a previous enquire
@type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""return a specified number of qualified matches of a previous query
@param start: index of the first match to return (starting from zero)
@type start: int
@param number: the number of matching entries to return
@type number: int
@return: a set of matching entries and some statistics
@rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
raise NotImplementedError("Incomplete indexing implementation: " \
+ "'get_matches' for the 'Enquire' class is missing")
def get_matches_count(self):
"""return the estimated number of matches
use "CommonIndexer.search" to retrieve the exact number of matches
@return: the estimaed number of matches
@rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Triangular distribution class."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
class Triangular(distribution.AutoCompositeTensorDistribution):
r"""Triangular distribution with `low`, `high` and `peak` parameters.
#### Mathematical Details
The Triangular distribution is specified by two line segments in the plane,
such that:
* The first line segment starts at `(a, 0)` and ends at `(c, z)`.
* The second line segment starts at `(c, z)` and ends at `(b, 0)`.
```none
y
^
z | o (c,z)
| / \
| / \
| / \
| (a,0) / \ (b,0)
0 +------o---------o-------> x
0 a c b
```
where:
* a <= c <= b, a < b
* `low = a`,
* `high = b`,
* `peak = c`,
* `z = 2 / (b - a)`
The parameters `low`, `high` and `peak` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify a single Triangular distribution.
u1 = tfd.Triangular(low=3., high=4., peak=3.5)
u1.mean()
# ==> 3.5
# Specify two different Triangular distributions.
u2 = tfd.Triangular(low=[1., 2.], high=[3., 4.], peak=[2., 3.])
u2.mean()
# ==> [2., 3.]
# Specify three different Triangular distributions by leveraging broadcasting.
u3 = tfd.Triangular(low=3., high=[5., 6., 7.], peak=3.)
u3.mean()
# ==> [3.6666, 4., 4.3333]
```
"""
def __init__(self,
low=0.,
high=1.,
peak=0.5,
validate_args=False,
allow_nan_stats=True,
name='Triangular'):
"""Initialize a batch of Triangular distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
Default value: `0`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
Default value: `1`.
peak: Floating point tensor, mode of the output interval. Must have
`low <= peak` and `peak <= high`.
Default value: `0.5`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
Default value: `True`.
name: Python `str` name prefixed to Ops created by this class.
Default value: `'Triangular'`.
Raises:
InvalidArgumentError: if `validate_args=True` and one of the following is
True:
* `low >= high`.
* `peak > high`.
* `low > peak`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([low, high, peak], tf.float32)
self._low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
self._high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
self._peak = tensor_util.convert_nonref_to_tensor(
peak, name='peak', dtype=dtype)
super(Triangular, self).__init__(
dtype=self._low.dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
low=parameter_properties.ParameterProperties(),
# TODO(b/169874884): Support decoupled parameterization.
high=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,),
# TODO(b/169874884): Support decoupled parameterization.
peak=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,))
@property
def low(self):
"""Lower boundary of the interval."""
return self._low
@property
def high(self):
"""Upper boundary of the interval."""
return self._high
@property
def peak(self):
"""Peak of the distribution. Lies in the interval."""
return self._peak
def _pdf_at_peak(self):
"""Pdf evaluated at the peak."""
return (self.peak - self.low) / (self.high - self.low)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
seed = samplers.sanitize_seed(seed, salt='triangular')
shape = ps.concat([[n], self._batch_shape_tensor(
low=low, high=high, peak=peak)], axis=0)
samples = samplers.uniform(shape=shape, dtype=self.dtype, seed=seed)
# We use Inverse CDF sampling here. Because the CDF is a quadratic function,
# we must use sqrts here.
interval_length = high - low
return tf.where(
# Note the CDF on the left side of the peak is
# (x - low) ** 2 / ((high - low) * (peak - low)).
# If we plug in peak for x, we get that the CDF at the peak
# is (peak - low) / (high - low). Because of this we decide
# which part of the piecewise CDF we should use based on the cdf samples
# we drew.
samples < (peak - low) / interval_length,
# Inverse of (x - low) ** 2 / ((high - low) * (peak - low)).
low + tf.sqrt(samples * interval_length * (peak - low)),
# Inverse of 1 - (high - x) ** 2 / ((high - low) * (high - peak))
high - tf.sqrt((1. - samples) * interval_length * (high - peak)))
def _prob(self, x):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
interval_length = high - low
# This is the pdf function when a low <= high <= x. This looks like
# a triangle, so we have to treat each line segment separately.
result_inside_interval = tf.where(
(x >= low) & (x <= peak),
# Line segment from (low, 0) to (peak, 2 / (high - low)).
2. * (x - low) / (interval_length * (peak - low)),
# Line segment from (peak, 2 / (high - low)) to (high, 0).
2. * (high - x) / (interval_length * (high - peak)))
return tf.where((x < low) | (x > high),
tf.zeros_like(x),
result_inside_interval)
def _cdf(self, x):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
interval_length = high - low
# Due to the PDF being not smooth at the peak, we have to treat each side
# somewhat differently. The PDF is two line segments, and thus we get
# quadratics here for the CDF.
result_inside_interval = tf.where(
(x >= low) & (x <= peak),
# (x - low) ** 2 / ((high - low) * (peak - low))
tf.math.squared_difference(x, low) / (interval_length * (peak - low)),
# 1 - (high - x) ** 2 / ((high - low) * (high - peak))
1. - tf.math.squared_difference(high, x) / (
interval_length * (high - peak)))
# We now add that the left tail is 0 and the right tail is 1.
result_if_not_big = tf.where(
x < low, tf.zeros_like(x), result_inside_interval)
return tf.where(x >= high, tf.ones_like(x), result_if_not_big)
def _entropy(self):
ans = 0.5 - np.log(2.) + tf.math.log(self.high - self.low)
return tf.broadcast_to(ans, self._batch_shape_tensor())
def _mean(self):
return (self.low + self.high + self.peak) / 3.
def _variance(self):
# ((high - low) ** 2 + (peak - low) ** 2 + (peak - high) ** 2) / 36
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
return (tf.math.squared_difference(high, low) +
tf.math.squared_difference(high, peak) +
tf.math.squared_difference(peak, low)) / 36.
def _default_event_space_bijector(self):
return sigmoid_bijector.Sigmoid(
low=self.low, high=self.high, validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
assertions = []
if (is_init != tensor_util.is_ref(self.low) and
is_init != tensor_util.is_ref(self.high)):
assertions.append(assert_util.assert_less(
low, high, message='triangular not defined when low >= high.'))
if (is_init != tensor_util.is_ref(self.low) and
is_init != tensor_util.is_ref(self.peak)):
assertions.append(
assert_util.assert_less_equal(
low, peak, message='triangular not defined when low > peak.'))
if (is_init != tensor_util.is_ref(self.high) and
is_init != tensor_util.is_ref(self.peak)):
assertions.append(
assert_util.assert_less_equal(
peak, high, message='triangular not defined when peak > high.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_greater_equal(
x, self.low, message='Sample must be greater than or equal to `low`.'))
assertions.append(assert_util.assert_less_equal(
x, self.high, message='Sample must be less than or equal to `high`.'))
return assertions
|
|
"""
Oracle DataBase connection module
:maintainer: Vladimir Bormotov <bormotov@gmail.com>
:maturity: new
:depends: cx_Oracle
:platform: all
:configuration: module provide connections for multiple Oracle DB instances.
**OS Environment**
.. code-block:: yaml
ORACLE_HOME: path to oracle product
PATH: path to Oracle Client libs need to be in PATH
**pillar**
.. code-block:: yaml
oracle:
dbs:
<db>:
uri: connection credentials in format:
user/password@host[:port]/sid[ servicename as {sysdba|sysoper}]
optional keyword servicename will determine whether it is a sid or service_name
<db>:
uri: .....
"""
import logging
import os
from salt.utils.decorators import depends
log = logging.getLogger(__name__)
try:
import cx_Oracle
MODE = {"sysdba": cx_Oracle.SYSDBA, "sysoper": cx_Oracle.SYSOPER}
HAS_CX_ORACLE = True
except ImportError:
MODE = {"sysdba": 2, "sysoper": 4}
HAS_CX_ORACLE = False
__virtualname__ = "oracle"
def __virtual__():
"""
Load module only if cx_Oracle installed
"""
if HAS_CX_ORACLE:
return __virtualname__
return (
False,
"The oracle execution module not loaded: python oracle library not found.",
)
def _cx_oracle_req():
"""
Fallback function stub
"""
return 'Need "cx_Oracle" and Oracle Client installed for this function exist'
def _unicode_output(cursor, name, default_type, size, precision, scale):
"""
Return strings values as python unicode string
http://www.oracle.com/technetwork/articles/dsl/tuininga-cx-oracle-084866.html
"""
if default_type in (
cx_Oracle.STRING,
cx_Oracle.LONG_STRING,
cx_Oracle.FIXED_CHAR,
cx_Oracle.CLOB,
):
return cursor.var(str, size, cursor.arraysize)
def _connect(uri):
"""
uri = user/password@host[:port]/sid[ as {sysdba|sysoper}]
Return cx_Oracle.Connection instance
"""
# cx_Oracle.Connection() not support 'as sysdba' syntax
uri_l = uri.rsplit(" as ", 1)
if len(uri_l) == 2:
credentials, mode = uri_l
mode = MODE[mode]
else:
credentials = uri_l[0]
mode = 0
serv_name = False
userpass, hostportsid = credentials.split("@")
user, password = userpass.split("/")
hostport, sid = hostportsid.split("/")
if "servicename" in sid:
serv_name = True
sid = sid.split("servicename")[0].strip()
hostport_l = hostport.split(":")
if len(hostport_l) == 2:
host, port = hostport_l
else:
host = hostport_l[0]
port = 1521
log.debug("connect: %s", (user, password, host, port, sid, mode))
# force UTF-8 client encoding
os.environ["NLS_LANG"] = ".AL32UTF8"
if serv_name:
conn = cx_Oracle.connect(
user, password, cx_Oracle.makedsn(host, port, service_name=sid), mode
)
else:
conn = cx_Oracle.connect(
user, password, cx_Oracle.makedsn(host, port, sid), mode
)
conn.outputtypehandler = _unicode_output
return conn
@depends("cx_Oracle", fallback_function=_cx_oracle_req)
def run_query(db, query):
"""
Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table"
"""
log.debug("run query on %s: %s", db, query)
conn = _connect(show_dbs(db)[db]["uri"])
return conn.cursor().execute(query).fetchall()
def show_dbs(*dbs):
"""
Show databases configuration from pillar. Filter by `*args`
CLI Example:
.. code-block:: bash
salt '*' oracle.show_dbs
salt '*' oracle.show_dbs my_db
"""
if dbs:
log.debug("get dbs from pillar: %s", dbs)
result = {}
for db in dbs:
result[db] = __salt__["pillar.get"]("oracle:dbs:" + db)
return result
else:
pillar_dbs = __salt__["pillar.get"]("oracle:dbs")
log.debug("get all (%s) dbs from pillar", len(pillar_dbs))
return pillar_dbs
@depends("cx_Oracle", fallback_function=_cx_oracle_req)
def version(*dbs):
"""
Server Version (select banner from v$version)
CLI Example:
.. code-block:: bash
salt '*' oracle.version
salt '*' oracle.version my_db
"""
pillar_dbs = __salt__["pillar.get"]("oracle:dbs")
get_version = lambda x: [
r[0] for r in run_query(x, "select banner from v$version order by banner")
]
result = {}
if dbs:
log.debug("get db versions for: %s", dbs)
for db in dbs:
if db in pillar_dbs:
result[db] = get_version(db)
else:
log.debug("get all (%s) dbs versions", len(dbs))
for db in dbs:
result[db] = get_version(db)
return result
@depends("cx_Oracle", fallback_function=_cx_oracle_req)
def client_version():
"""
Oracle Client Version
CLI Example:
.. code-block:: bash
salt '*' oracle.client_version
"""
return ".".join(str(x) for x in cx_Oracle.clientversion())
def show_pillar(item=None):
"""
Show Pillar segment oracle.* and subitem with notation "item:subitem"
CLI Example:
.. code-block:: bash
salt '*' oracle.show_pillar
salt '*' oracle.show_pillar dbs:my_db
"""
if item:
return __salt__["pillar.get"]("oracle:" + item)
else:
return __salt__["pillar.get"]("oracle")
def show_env():
"""
Show Environment used by Oracle Client
CLI Example:
.. code-block:: bash
salt '*' oracle.show_env
.. note::
at first _connect() ``NLS_LANG`` will forced to '.AL32UTF8'
"""
envs = ["PATH", "ORACLE_HOME", "TNS_ADMIN", "NLS_LANG"]
result = {}
for env in envs:
if env in os.environ:
result[env] = os.environ[env]
return result
|
|
################################################################################
# Copyright (C) 2011-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
from bayespy.utils import misc
from .node import Node
from .deterministic import Deterministic
from .gaussian import Gaussian, GaussianMoments
from .gaussian import GaussianGammaISOMoments
class SumMultiply(Deterministic):
r"""
Node for computing general products and sums of Gaussian nodes.
The node is similar to `numpy.einsum`, which is a very general
function for computing dot products, sums, products and other sums
of products of arrays.
For instance, consider the following arrays:
>>> import numpy as np
>>> X = np.random.randn(2, 3, 4)
>>> Y = np.random.randn(3, 5)
>>> Z = np.random.randn(4, 2)
Then, the Einstein summation can be used as:
>>> np.einsum('abc,bd,ca->da', X, Y, Z)
array([[...]])
SumMultiply node can be used similarly for Gaussian nodes. For instance,
consider the following Gaussian nodes:
>>> from bayespy.nodes import GaussianARD
>>> X = GaussianARD(0, 1, shape=(2, 3, 4))
>>> Y = GaussianARD(0, 1, shape=(3, 5))
>>> Z = GaussianARD(0, 1, shape=(4, 2))
Then, similarly to `numpy.einsum`, SumMultiply could be used as:
>>> from bayespy.nodes import SumMultiply
>>> SumMultiply('abc,bd,ca->da', X, Y, Z)
<bayespy.inference.vmp.nodes.dot.SumMultiply object at 0x...>
or
>>> SumMultiply(X, [0,1,2], Y, [1,3], Z, [2,0], [3,0])
<bayespy.inference.vmp.nodes.dot.SumMultiply object at 0x...>
which is similar to the alternative syntax of numpy.einsum.
This node operates similarly as numpy.einsum. However, you must use all the
elements of each node, that is, an operation like np.einsum('ii->i',X) is
not allowed. Thus, for each node, each axis must be given unique id. The id
identifies which axes correspond to which axes between the different
nodes. Also, Ellipsis ('...') is not yet supported for simplicity. It would
also have some problems with constant inputs (because how to determine
ndim), so let us just forget it for now.
Each output axis must appear in the input mappings.
The keys must refer to variable dimension axes only, not plate axes.
The input nodes may be Gaussian-gamma (isotropic) nodes.
The output message is Gaussian-gamma (isotropic) if any of the input nodes
is Gaussian-gamma.
Examples
--------
Sum over the rows:
'ij->j'
Inner product of three vectors:
'i,i,i'
Matrix-vector product:
'ij,j->i'
Matrix-matrix product:
'ik,kj->ij'
Outer product:
'i,j->ij'
Vector-matrix-vector product:
'i,ij,j'
Notes
-----
This operation can be extremely slow if not used wisely. For large and
complex operations, it is sometimes more efficient to split the operation
into multiple nodes. For instance, the example above could probably be
computed faster by
>>> XZ = SumMultiply(X, [0,1,2], Z, [2,0], [0,1])
>>> F = SumMultiply(XZ, [0,1], Y, [1,2], [2,0])
because the third axis ('c') could be summed out already in the first
operation. This same effect applies also to numpy.einsum in general.
"""
def __init__(self, *args, iterator_axis=None, **kwargs):
"""
SumMultiply(Node1, map1, Node2, map2, ..., NodeN, mapN [, map_out])
"""
args = list(args)
if len(args) < 2:
raise ValueError("Not enough inputs")
if iterator_axis is not None:
raise NotImplementedError("Iterator axis not implemented yet")
if iterator_axis is not None and not isinstance(iterator_axis, int):
raise ValueError("Iterator axis must be integer")
# Two different parsing methods, depends on how the arguments are given
if misc.is_string(args[0]):
# This is the format:
# SumMultiply('ik,k,kj->ij', X, Y, Z)
strings = args[0]
nodes = args[1:]
# Remove whitespace
strings = misc.remove_whitespace(strings)
# Split on '->' (should contain only one '->' or none)
strings = strings.split('->')
if len(strings) > 2:
raise ValueError('The string contains too many ->')
strings_in = strings[0]
if len(strings) == 2:
string_out = strings[1]
else:
string_out = ''
# Split former part on ',' (the number of parts should be equal to
# nodes)
strings_in = strings_in.split(',')
if len(strings_in) != len(nodes):
raise ValueError('Number of given input nodes is different '
'from the input keys in the string')
# Split strings into key lists using single character keys
keysets = [list(string_in) for string_in in strings_in]
keys_out = list(string_out)
else:
# This is the format:
# SumMultiply(X, [0,2], Y, [2], Z, [2,1], [0,1])
# If given, the output mapping is the last argument
if len(args) % 2 == 0:
keys_out = []
else:
keys_out = args.pop(-1)
# Node and axis mapping are given in turns
nodes = args[::2]
keysets = args[1::2]
# Find all the keys (store only once each)
full_keyset = []
for keyset in keysets:
full_keyset += keyset
#full_keyset += list(keyset.keys())
full_keyset = list(set(full_keyset))
# Input and output messages are Gaussian unless there is at least one
# Gaussian-gamma message from the parents
self._moments = GaussianMoments(len(keys_out))
self._parent_moments = [GaussianMoments(len(keyset))
for keyset in keysets]
self.gaussian_gamma = False
for n in range(len(nodes)):
try:
# Try to convert into Gaussian moments
nodes[n] = self._ensure_moments(nodes[n],
self._parent_moments[n])
except GaussianMoments.NoConverterError:
# Failure. It is probably a Gaussian-gamma node
self.gaussian_gamma = True
if self.gaussian_gamma:
self._moments = GaussianGammaISOMoments(len(keys_out))
self._parent_moments = [GaussianGammaISOMoments(len(keyset))
for keyset in keysets]
for n in range(len(nodes)):
# Convert into Gaussian-gamma nodes
nodes[n] = self._ensure_moments(nodes[n],
self._parent_moments[n])
#
# Check the validity of each node
#
for n in range(len(nodes)):
# Check that the maps and the size of the variable are consistent
if len(nodes[n].dims[0]) != len(keysets[n]):
raise ValueError("Wrong number of keys (%d) for the node "
"number %d with %d dimensions"
% (len(keysets[n]),
n,
len(nodes[n].dims[0])))
# Check that the keys are unique
if len(set(keysets[n])) != len(keysets[n]):
raise ValueError("Axis keys for node number %d are not unique"
% n)
# Check the validity of output keys: each output key must be included in
# the input keys
if len(keys_out) != len(set(keys_out)):
raise ValueError("Output keys are not unique")
for key in keys_out:
if key not in full_keyset:
raise ValueError("Output key %s does not appear in any input"
% key)
# Check the validity of the nodes with respect to the key mapping.
# Check that the node dimensions map and broadcast properly, that is,
# all the nodes using the same key for axes must have equal size for
# those axes (or size 1).
broadcasted_size = {}
for key in full_keyset:
broadcasted_size[key] = 1
for (node, keyset) in zip(nodes, keysets):
try:
# Find the axis for the key
index = keyset.index(key)
except ValueError:
# OK, this node doesn't use this key for any axis
pass
else:
# Length of the axis for that key
node_size = node.dims[0][index]
if node_size != broadcasted_size[key]:
if broadcasted_size[key] == 1:
# Apply broadcasting
broadcasted_size[key] = node_size
elif node_size != 1:
# Different sizes and neither has size 1
raise ValueError("Axes using key %s do not "
"broadcast properly"
% key)
# Compute the shape of the output
dim0 = [broadcasted_size[key] for key in keys_out]
dim1 = dim0 + dim0
dims = ( tuple(dim0), tuple(dim1) )
if self.gaussian_gamma:
dims = dims + ( (), () )
# Rename the keys to [0,1,...,N-1] where N is the total number of keys
self.N_keys = len(full_keyset)
self.out_keys = [full_keyset.index(key) for key in keys_out]
self.in_keys = [ [full_keyset.index(key) for key in keyset]
for keyset in keysets ]
super().__init__(*nodes,
dims=dims,
**kwargs)
def _compute_moments(self, *u_parents):
# Compute the number of plate axes for each node
plate_counts0 = [(np.ndim(u_parent[0]) - len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
plate_counts1 = [(np.ndim(u_parent[1]) - 2*len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
# The number of plate axes for the output
N0 = max(plate_counts0)
N1 = max(plate_counts1)
# The total number of unique keys used (keys are 0,1,...,N_keys-1)
D = self.N_keys
#
# Compute the mean
#
out_all_keys = list(range(D+N0-1, D-1, -1)) + self.out_keys
#nodes_dim_keys = self.nodes_dim_keys
in_all_keys = [list(range(D+plate_count-1, D-1, -1)) + keys
for (plate_count, keys) in zip(plate_counts0,
self.in_keys)]
u0 = [u[0] for u in u_parents]
args = misc.zipper_merge(u0, in_all_keys) + [out_all_keys]
x0 = np.einsum(*args)
#
# Compute the covariance
#
out_all_keys = (list(range(2*D+N1-1, 2*D-1, -1))
+ [D+key for key in self.out_keys]
+ self.out_keys)
in_all_keys = [list(range(2*D+plate_count-1, 2*D-1, -1))
+ [D+key for key in node_keys]
+ node_keys
for (plate_count, node_keys) in zip(plate_counts1,
self.in_keys)]
u1 = [u[1] for u in u_parents]
args = misc.zipper_merge(u1, in_all_keys) + [out_all_keys]
x1 = np.einsum(*args)
if not self.gaussian_gamma:
return [x0, x1]
# Compute Gaussian-gamma specific moments
x2 = 1
x3 = 0
for i in range(len(u_parents)):
x2 = x2 * u_parents[i][2]
x3 = x3 + u_parents[i][3]
return [x0, x1, x2, x3]
def get_parameters(self):
# Compute mean and variance
u = self.get_moments()
u[1] -= u[0]**2
return u
def _message_to_parent(self, index):
"""
Compute the message and mask to a parent node.
"""
# Check index
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Get messages from other parents and children
u_parents = self._message_from_parents(exclude=index)
m = self._message_from_children()
mask = self.mask
# Normally we don't need to care about masks when computing the
# message. However, in this node we want to avoid computing huge message
# arrays so we sum some axes already here. Thus, we need to apply the
# mask.
#
# Actually, we don't need to care about masks because the message from
# children has already been masked.
parent = self.parents[index]
#
# Compute the first message
#
msg = [None, None]
# Compute the two messages
for ind in range(2):
# The total number of keys for the non-plate dimensions
N = (ind+1) * self.N_keys
parent_num_dims = len(parent.dims[ind])
parent_num_plates = len(parent.plates)
parent_plate_keys = list(range(N + parent_num_plates,
N,
-1))
parent_dim_keys = self.in_keys[index]
if ind == 1:
parent_dim_keys = ([key + self.N_keys
for key in self.in_keys[index]]
+ parent_dim_keys)
args = []
# This variable counts the maximum number of plates of the
# arguments, thus it will tell the number of plates in the result
# (if the artificially added plates above were ignored).
result_num_plates = 0
result_plates = ()
# Mask and its keysr
mask_num_plates = np.ndim(mask)
mask_plates = np.shape(mask)
mask_plate_keys = list(range(N + mask_num_plates,
N,
-1))
result_num_plates = max(result_num_plates,
mask_num_plates)
result_plates = misc.broadcasted_shape(result_plates,
mask_plates)
# Moments and keys of other parents
for (k, u) in enumerate(u_parents):
if k != index:
num_dims = (ind+1) * len(self.in_keys[k])
num_plates = np.ndim(u[ind]) - num_dims
plates = np.shape(u[ind])[:num_plates]
plate_keys = list(range(N + num_plates,
N,
-1))
dim_keys = self.in_keys[k]
if ind == 1:
dim_keys = ([key + self.N_keys
for key in self.in_keys[k]]
+ dim_keys)
args.append(u[ind])
args.append(plate_keys + dim_keys)
result_num_plates = max(result_num_plates, num_plates)
result_plates = misc.broadcasted_shape(result_plates,
plates)
# Message and keys from children
child_num_dims = (ind+1) * len(self.out_keys)
child_num_plates = np.ndim(m[ind]) - child_num_dims
child_plates = np.shape(m[ind])[:child_num_plates]
child_plate_keys = list(range(N + child_num_plates,
N,
-1))
child_dim_keys = self.out_keys
if ind == 1:
child_dim_keys = ([key + self.N_keys
for key in self.out_keys]
+ child_dim_keys)
args.append(m[ind])
args.append(child_plate_keys + child_dim_keys)
result_num_plates = max(result_num_plates, child_num_plates)
result_plates = misc.broadcasted_shape(result_plates,
child_plates)
# Output keys, that is, the keys of the parent[index]
parent_keys = parent_plate_keys + parent_dim_keys
# Performance trick: Check which axes can be summed because they
# have length 1 or are non-existing in parent[index]. Thus, remove
# keys corresponding to unit length axes in parent[index] so that
# einsum sums over those axes. After computations, these axes must
# be added back in order to get the correct shape for the message.
# Also, remove axes/keys that are in output (parent[index]) but not in
# any inputs (children and other parents).
parent_shape = parent.get_shape(ind)
removed_axes = []
for j in range(len(parent_keys)):
if parent_shape[j] == 1:
# Remove the key (take into account the number of keys that
# have already been removed)
del parent_keys[j-len(removed_axes)]
removed_axes.append(j)
else:
# Remove the key if it doesn't appear in any of the
# messages from children or other parents.
if not np.any([parent_keys[j-len(removed_axes)] in keys
for keys in args[1::2]]):
del parent_keys[j-len(removed_axes)]
removed_axes.append(j)
args.append(parent_keys)
# THE BEEF: Compute the message
msg[ind] = np.einsum(*args)
# Find the correct shape for the message array
message_shape = list(np.shape(msg[ind]))
# First, add back the axes with length 1
for ax in removed_axes:
message_shape.insert(ax, 1)
# Second, remove leading axes for plates that were not present in
# the child nor other parents' messages. This is not really
# necessary, but it is just elegant to remove the leading unit
# length axes that we added artificially at the beginning just
# because we wanted the key mapping to be simple.
if parent_num_plates > result_num_plates:
del message_shape[:(parent_num_plates-result_num_plates)]
# Then, the actual reshaping
msg[ind] = np.reshape(msg[ind], message_shape)
# Broadcasting is not supported for variable dimensions, thus force
# explicit correct shape for variable dimensions
var_dims = parent.dims[ind]
msg[ind] = msg[ind] * np.ones(var_dims)
# Apply plate multiplier: If this node has non-unit plates that are
# unit plates in the parent, those plates are summed. However, if
# the message has unit axis for that plate, it should be first
# broadcasted to the plates of this node and then summed to the
# plates of the parent. In order to avoid this broadcasting and
# summing, it is more efficient to just multiply by the correct
# factor.
r = self.broadcasting_multiplier(self.plates,
result_plates,
parent.plates)
if r != 1:
msg[ind] *= r
if self.gaussian_gamma:
alphas = [u_parents[i][2]
for i in range(len(u_parents)) if i != index]
## logalphas = [u_parents[i][3]
## for i in range(len(u_parents)) if i != index]
m2 = self._compute_message(m[2], *alphas,
ndim=0,
plates_from=self.plates,
plates_to=parent.plates)
m3 = self._compute_message(m[3],#*logalphas,
ndim=0,
plates_from=self.plates,
plates_to=parent.plates)
msg = msg + [m2, m3]
return msg
def Dot(*args, **kwargs):
"""
Node for computing inner product of several Gaussian vectors.
This is a simple wrapper of the much more general SumMultiply. For now, it
is here for backward compatibility.
"""
einsum = 'i' + ',i'*(len(args)-1)
return SumMultiply(einsum, *args, **kwargs)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
import os
import sys
import warnings
from distutils.errors import DistutilsError
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from pkg_resources import normalize_path
from pkg_resources import to_filename
from setuptools import Command
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.test import test
class mypy(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
def get_project_path(self):
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
project_path = normalize_path(ei_cmd.egg_base)
return os.path.join(project_path, to_filename(ei_cmd.egg_name))
def run(self):
import subprocess
args = ['mypy', self.get_project_path()]
result = subprocess.call(args)
if result != 0:
raise DistutilsError("mypy exited with status %d" % result)
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = 'dev@beam.apache.org'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
# Avro 1.9.2 for python3 was broken. The issue was fixed in version 1.9.2.1
'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'crcmod>=1.7,<2.0',
# dataclasses backport for python_version<3.7. No version bound because this
# is Python standard since Python 3.7 and each Python version is compatible
# with a specific dataclasses version.
'dataclasses;python_version<"3.7"',
# Dill doesn't have forwards-compatibility guarantees within minor version.
# Pickles created with a new version of dill may not unpickle using older
# version of dill. It is best to use the same version of dill on client and
# server, therefore list of allowed versions is very narrow.
# See: https://github.com/uqfoundation/dill/issues/341.
'dill>=0.3.1.1,<0.3.2',
'fastavro>=0.21.4,<2',
'future>=0.18.2,<1.0.0',
'grpcio>=1.29.0,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<0.20.0',
'numpy>=1.14.3,<1.21.0',
'pymongo>=3.8.0,<4.0.0',
'oauth2client>=2.0.1,<5',
'protobuf>=3.12.2,<4',
'pyarrow>=0.15.1,<4.0.0',
'pydot>=1.2.0,<2',
'python-dateutil>=2.8.0,<3',
'pytz>=2018.3',
'requests>=2.24.0,<3.0.0',
'typing-extensions>=3.7.0,<3.8.0',
]
# [BEAM-8181] pyarrow cannot be installed on 32-bit Windows platforms.
if sys.platform == 'win32' and sys.maxsize <= 2**32:
REQUIRED_PACKAGES = [
p for p in REQUIRED_PACKAGES if not p.startswith('pyarrow')
]
REQUIRED_TEST_PACKAGES = [
'freezegun>=0.3.12',
'mock>=1.0.1,<3.0.0',
'nose>=1.3.7',
'nose_xunitmp>=0.4.1',
'pandas>=1.0,<1.3.0',
'parameterized>=0.7.1,<0.8.0',
'pyhamcrest>=1.9,!=1.10.0,<2.0.0',
'pyyaml>=3.12,<6.0.0',
'requests_mock>=1.7,<2.0',
'tenacity>=5.0.2,<6.0',
'pytest>=4.4.0,<5.0',
'pytest-xdist>=1.29.0,<2',
'pytest-timeout>=1.3.3,<2',
'sqlalchemy>=1.3,<2.0',
'psycopg2-binary>=2.8.5,<3.0.0',
'testcontainers>=3.0.3,<4.0.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<5',
'google-apitools>=0.5.31,<0.5.32',
'google-auth>=1.18.0,<2',
'google-cloud-datastore>=1.7.1,<2',
'google-cloud-pubsub>=0.39.0,<2',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<3',
'google-cloud-core>=0.28.1,<2',
'google-cloud-bigtable>=0.31.1,<2',
'google-cloud-spanner>=1.13.0,<2',
'grpcio-gcp>=0.2.2,<1',
# GCP Packages required by ML functionality
'google-cloud-dlp>=0.12.0,<2',
'google-cloud-language>=1.3.0,<2',
'google-cloud-videointelligence>=1.8.0,<2',
'google-cloud-vision>=0.38.0,<2',
# GCP Package required by Google Cloud Profiler.
'google-cloud-profiler>=3.0.4,<4'
]
INTERACTIVE_BEAM = [
'facets-overview>=1.0.0,<2',
'ipython>=5.8.0,<8',
'ipykernel>=5.2.0,<6',
# Skip version 6.1.13 due to
# https://github.com/jupyter/jupyter_client/issues/637
'jupyter-client>=6.1.11,<6.1.13',
'timeloop>=1.0.2,<2',
]
INTERACTIVE_BEAM_TEST = [
# notebok utils
'nbformat>=5.0.5,<6',
'nbconvert>=5.6.1,<6',
# headless chrome based integration tests
'selenium>=3.141.0,<4',
'needle>=0.5.0,<1',
'chromedriver-binary>=88,<89',
# use a fixed major version of PIL for different python versions
'pillow>=7.1.1,<8',
]
AWS_REQUIREMENTS = [
'boto3 >=1.9'
]
AZURE_REQUIREMENTS = [
'azure-storage-blob >=12.3.2',
'azure-core >=1.7.0',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=3.6'
if sys.version_info.major == 3 and sys.version_info.minor >= 9:
warnings.warn(
'This version of Apache Beam has not been sufficiently tested on '
'Python %s.%s. You may encounter bugs or missing features.' % (
sys.version_info.major, sys.version_info.minor))
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', '*/*.h', '*/*/*.h',
'testing/data/*.yaml', 'portability/api/*.yaml']},
ext_modules=cythonize([
# Make sure to use language_level=3 cython directive in files below.
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/cells.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/transforms/stats.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
# BEAM-8840: Do NOT use tests_require or setup_requires.
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
'interactive': INTERACTIVE_BEAM,
'interactive_test': INTERACTIVE_BEAM_TEST,
'aws': AWS_REQUIREMENTS,
'azure': AZURE_REQUIREMENTS
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# When updating vesion classifiers, also update version warnings
# above and in apache_beam/__init__.py.
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'test': generate_protos_first(test),
'mypy': generate_protos_first(mypy),
},
)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from case import Mock
from vine.abstract import Thenable
from kombu.exceptions import HttpError
from kombu.five import WhateverIO
from kombu.asynchronous import http
from kombu.asynchronous.aws.connection import (
AsyncHTTPSConnection,
AsyncHTTPResponse,
AsyncConnection,
AsyncAWSQueryConnection,
)
from kombu.asynchronous.aws.ext import boto3
from .case import AWSCase
from t.mocks import PromiseMock
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs # noqa
# Not currently working
VALIDATES_CERT = False
def passthrough(*args, **kwargs):
m = Mock(*args, **kwargs)
def side_effect(ret):
return ret
m.side_effect = side_effect
return m
class test_AsyncHTTPSConnection(AWSCase):
def test_http_client(self):
x = AsyncHTTPSConnection()
assert x.http_client is http.get_client()
client = Mock(name='http_client')
y = AsyncHTTPSConnection(http_client=client)
assert y.http_client is client
def test_args(self):
x = AsyncHTTPSConnection(
strict=True, timeout=33.3,
)
assert x.strict
assert x.timeout == 33.3
def test_request(self):
x = AsyncHTTPSConnection('aws.vandelay.com')
x.request('PUT', '/importer-exporter')
assert x.path == '/importer-exporter'
assert x.method == 'PUT'
def test_request_with_body_buffer(self):
x = AsyncHTTPSConnection('aws.vandelay.com')
body = Mock(name='body')
body.read.return_value = 'Vandelay Industries'
x.request('PUT', '/importer-exporter', body)
assert x.method == 'PUT'
assert x.path == '/importer-exporter'
assert x.body == 'Vandelay Industries'
body.read.assert_called_with()
def test_request_with_body_text(self):
x = AsyncHTTPSConnection('aws.vandelay.com')
x.request('PUT', '/importer-exporter', 'Vandelay Industries')
assert x.method == 'PUT'
assert x.path == '/importer-exporter'
assert x.body == 'Vandelay Industries'
def test_request_with_headers(self):
x = AsyncHTTPSConnection()
headers = {'Proxy': 'proxy.vandelay.com'}
x.request('PUT', '/importer-exporter', None, headers)
assert 'Proxy' in dict(x.headers)
assert dict(x.headers)['Proxy'] == 'proxy.vandelay.com'
def assert_request_created_with(self, url, conn):
conn.Request.assert_called_with(
url, method=conn.method,
headers=http.Headers(conn.headers), body=conn.body,
connect_timeout=conn.timeout, request_timeout=conn.timeout,
validate_cert=VALIDATES_CERT,
)
def test_getresponse(self):
client = Mock(name='client')
client.add_request = passthrough(name='client.add_request')
x = AsyncHTTPSConnection(http_client=client)
x.Response = Mock(name='x.Response')
request = x.getresponse()
x.http_client.add_request.assert_called_with(request)
assert isinstance(request, Thenable)
assert isinstance(request.on_ready, Thenable)
response = Mock(name='Response')
request.on_ready(response)
x.Response.assert_called_with(response)
def test_getresponse__real_response(self):
client = Mock(name='client')
client.add_request = passthrough(name='client.add_request')
callback = PromiseMock(name='callback')
x = AsyncHTTPSConnection(http_client=client)
request = x.getresponse(callback)
x.http_client.add_request.assert_called_with(request)
buf = WhateverIO()
buf.write('The quick brown fox jumps')
headers = http.Headers({'X-Foo': 'Hello', 'X-Bar': 'World'})
response = http.Response(request, 200, headers, buf)
request.on_ready(response)
callback.assert_called()
wresponse = callback.call_args[0][0]
assert wresponse.read() == 'The quick brown fox jumps'
assert wresponse.status == 200
assert wresponse.getheader('X-Foo') == 'Hello'
headers_dict = wresponse.getheaders()
assert dict(headers_dict) == headers
assert wresponse.msg
assert repr(wresponse)
def test_repr(self):
assert repr(AsyncHTTPSConnection())
def test_putrequest(self):
x = AsyncHTTPSConnection()
x.putrequest('UPLOAD', '/new')
assert x.method == 'UPLOAD'
assert x.path == '/new'
def test_putheader(self):
x = AsyncHTTPSConnection()
x.putheader('X-Foo', 'bar')
assert x.headers == [('X-Foo', 'bar')]
x.putheader('X-Bar', 'baz')
assert x.headers == [
('X-Foo', 'bar'),
('X-Bar', 'baz'),
]
def test_send(self):
x = AsyncHTTPSConnection()
x.send('foo')
assert x.body == 'foo'
x.send('bar')
assert x.body == 'foobar'
def test_interface(self):
x = AsyncHTTPSConnection()
assert x.set_debuglevel(3) is None
assert x.connect() is None
assert x.close() is None
assert x.endheaders() is None
class test_AsyncHTTPResponse(AWSCase):
def test_with_error(self):
r = Mock(name='response')
r.error = HttpError(404, 'NotFound')
x = AsyncHTTPResponse(r)
assert x.reason == 'NotFound'
r.error = None
assert not x.reason
class test_AsyncConnection(AWSCase):
def test_client(self):
sqs = Mock(name='sqs')
x = AsyncConnection(sqs)
assert x._httpclient is http.get_client()
client = Mock(name='client')
y = AsyncConnection(sqs, http_client=client)
assert y._httpclient is client
def test_get_http_connection(self):
sqs = Mock(name='sqs')
x = AsyncConnection(sqs)
assert isinstance(
x.get_http_connection(),
AsyncHTTPSConnection,
)
conn = x.get_http_connection()
assert conn.http_client is x._httpclient
class test_AsyncAWSQueryConnection(AWSCase):
def setup(self):
session = boto3.session.Session(
aws_access_key_id='AAA',
aws_secret_access_key='AAAA',
region_name='us-west-2',
)
sqs_client = session.client('sqs')
self.x = AsyncAWSQueryConnection(sqs_client,
http_client=Mock(name='client'))
def test_make_request(self):
_mexe, self.x._mexe = self.x._mexe, Mock(name='_mexe')
Conn = self.x.get_http_connection = Mock(name='get_http_connection')
callback = PromiseMock(name='callback')
self.x.make_request(
'action', {'foo': 1}, 'https://foo.com/', 'GET', callback=callback,
)
self.x._mexe.assert_called()
request = self.x._mexe.call_args[0][0]
parsed = urlparse(request.url)
params = parse_qs(parsed.query)
assert params['Action'][0] == 'action'
ret = _mexe(request, callback=callback)
assert ret is callback
Conn.return_value.request.assert_called()
Conn.return_value.getresponse.assert_called_with(
callback=callback,
)
def test_make_request__no_action(self):
self.x._mexe = Mock(name='_mexe')
self.x.get_http_connection = Mock(name='get_http_connection')
callback = PromiseMock(name='callback')
self.x.make_request(
None, {'foo': 1}, 'http://foo.com/', 'GET', callback=callback,
)
self.x._mexe.assert_called()
request = self.x._mexe.call_args[0][0]
parsed = urlparse(request.url)
params = parse_qs(parsed.query)
assert 'Action' not in params
def Response(self, status, body):
r = Mock(name='response')
r.status = status
r.read.return_value = body
return r
@contextmanager
def mock_make_request(self):
self.x.make_request = Mock(name='make_request')
callback = PromiseMock(name='callback')
yield callback
def assert_make_request_called(self):
self.x.make_request.assert_called()
return self.x.make_request.call_args[1]['callback']
|
|
# jsb.plugs.common/forward.py
#
#
""" forward incoming trafic on a bot to another bot through xmpp. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.callbacks import callbacks, remote_callbacks, last_callbacks, first_callbacks
from jsb.lib.eventbase import EventBase
from jsb.lib.persist import PlugPersist
from jsb.utils.lazydict import LazyDict
from jsb.lib.examples import examples
from jsb.lib.fleet import getfleet
from jsb.lib.container import Container
from jsb.lib.errors import NoProperDigest
from jsb.utils.exception import handle_exception
from jsb.utils.locking import locked
from jsb.utils.generic import strippedtxt, stripcolor
## jsb.plugs.common imports
from jsb.plugs.common.twitter import postmsg
## xmpp import
from jsb.contrib.xmlstream import NodeBuilder, XMLescape, XMLunescape
## basic imports
import logging
import copy
import time
import types
import hmac
import hashlib
## defines
forward = PlugPersist("forward-core")
if not forward.data.allowin:
forward.data.allowin = []
if not forward.data.channels:
forward.data.channels = {}
if not forward.data.outs:
forward.data.outs = {}
if not forward.data.whitelist:
forward.data.whitelist = {}
cpy = copy.deepcopy
## forward precondition
def forwardoutpre(bot, event):
""" preconditon to check if forward callbacks is to be fired. """
if event.how == "background": return False
chan = unicode(event.channel).lower()
if not chan: return
logging.debug("forward - pre - %s" % event.channel)
if chan in forward.data.channels and not event.isremote() and not event.forwarded:
if event.how != u"background": return True
return False
## forward callbacks
def forwardoutcb(bot, event):
""" forward callback. """
e = cpy(event)
logging.debug("forward - cbtype is %s - %s" % (event.cbtype, event.how))
e.forwarded = True
e.source = bot.jid
e.botname = bot.server or bot.name
if not event.chan: event.bind(bot)
if event.chan: e.allowwatch = event.chan.data.allowwatch
fleet = getfleet()
for jid in forward.data.channels[event.channel.lower()]:
logging.info("forward - sending to %s" % jid)
if jid == "twitter":
try: postmsg(forward.data.outs[jid], e.txt)
except Exception, ex: handle_exception()
continue
outbot = fleet.getfirstjabber(bot.isgae)
if not outbot and bot.isgae: outbot = fleet.makebot('xmpp', 'forwardbot')
if outbot:
e.source = outbot.jid
txt = outbot.normalize(e.tojson())
txt = stripcolor(txt)
#txt = e.tojson()
container = Container(outbot.jid, txt)
outbot.outnocb(jid, container.tojson())
else: logging.info("forward - no xmpp bot found in fleet".upper())
first_callbacks.add('BLIP_SUBMITTED', forwardoutcb, forwardoutpre)
first_callbacks.add('MESSAGE', forwardoutcb, forwardoutpre)
#first_callbacks.add('PRESENCE', forwardoutcb, forwardoutpre)
first_callbacks.add('PRIVMSG', forwardoutcb, forwardoutpre)
first_callbacks.add('JOIN', forwardoutcb, forwardoutpre)
first_callbacks.add('PART', forwardoutcb, forwardoutpre)
first_callbacks.add('QUIT', forwardoutcb, forwardoutpre)
first_callbacks.add('NICK', forwardoutcb, forwardoutpre)
first_callbacks.add('CONSOLE', forwardoutcb, forwardoutpre)
first_callbacks.add('WEB', forwardoutcb, forwardoutpre)
first_callbacks.add('DISPATCH', forwardoutcb, forwardoutpre)
first_callbacks.add('OUTPUT', forwardoutcb, forwardoutpre)
## forward-add command
def handle_forwardadd(bot, event):
""" add a new forward. """
if not event.rest:
event.missing('<JID>')
return
if "@" in event.rest:
forward.data.outs[event.rest] = event.user.data.name
forward.save()
if not event.rest in event.chan.data.forwards: event.chan.data.forwards.append(event.rest)
if event.rest:
event.chan.save()
event.done()
cmnds.add("forward-add", handle_forwardadd, 'OPER')
examples.add("forward-add" , "add an JID to forward to", "forward-add jsoncloud@appspot.com")
## forward-del command
def handle_forwarddel(bot, event):
""" delete a forward. """
if not event.rest:
event.missing('<JID>')
return
try: del forward.data.outs[event.rest]
except KeyError: event.reply("no forward out called %s" % event.rest) ; return
forward.save()
if event.rest in event.chan.data.forwards: event.chan.data.forwards.remove(event.rest) ; event.chan.save()
event.done()
cmnds.add("forward-del", handle_forwarddel, 'OPER')
examples.add("forward-del" , "delete an JID to forward to", "forward-del jsoncloud@appspot.com")
## forward-allow command
def handle_forwardallow(bot, event):
""" allow a remote bot to forward to us. """
if not event.rest:
event.missing("<JID>")
return
if forward.data.whitelist.has_key(event.rest):
forward.data.whitelist[event.rest] = bot.type
forward.save()
event.done()
cmnds.add("forward-allow", handle_forwardallow, 'OPER')
examples.add("forward-allow" , "allow an JID to forward to us", "forward-allow jsoncloud@appspot.com")
## forward-list command
def handle_forwardlist(bot, event):
""" list forwards. """
try: event.reply("forwards for %s: " % event.channel, forward.data.channels[event.channel])
except KeyError: event.reply("no forwards for %s" % event.channel)
cmnds.add("forward-list", handle_forwardlist, 'OPER')
examples.add("forward-list" , "list all forwards of a channel", "forward-list")
## forward command
def handle_forward(bot, event):
""" forward the channel tot another bot. """
if not event.args:
event.missing("<JID>")
return
forward.data.channels[event.channel.lower()] = event.args
for jid in event.args:
forward.data.outs[jid] = event.user.data.name
if not jid in event.chan.data.forwards: event.chan.data.forwards = event.args
if event.args: event.chan.save()
forward.save()
event.done()
cmnds.add("forward", handle_forward, 'OPER')
examples.add("forward" , "forward a channel to provided JIDS", "forward jsoncloud@appspot.com")
## forward-stop command
def handle_forwardstop(bot, event):
""" stop forwarding the channel to another bot. """
if not event.args:
event.missing("<JID>")
return
try:
for jid in event.args:
try:
forward.data.channels[event.channel].remove(jid)
del forward.data.outs[jid]
if jid in event.chan.data.forwards: event.chan.data.forwards.remove(jid)
except ValueError: pass
forward.save()
event.done()
except KeyError, ex: event.reply("we are not forwarding %s" % str(ex))
cmnds.add("forward-stop", handle_forwardstop, 'OPER')
examples.add("forward-stop" , "stop forwarding a channel to provided JIDS", "forward-stop jsoncloud@appspot.com")
|
|
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
from ss_lib import vmac_next_hop_mask, vmac_participant_match, vmac_participant_mask, vmac_next_hop_match, vmac_part_port_match
# PRIORITIES (Values can be in [0,65535], 0 is miss)
FLOW_MISS_PRIORITY = 0
# outbound switch priorities
OUTBOUND_HIT_PRIORITY = 2
# inbound switch priorities
INBOUND_HIT_PRIORITY = 4
INBOUND_MISS_PRIORITY = 1
# create new outbound rules in response to superset changes
def update_outbound_rules(sdx_msgs, policies, ss_instance, my_mac):
supersets = ss_instance.supersets
rules = []
if 'outbound' not in policies:
return rules
outbound = policies['outbound']
# map each participant to a list of our policies which forward to them
part_2_policy = {}
# build this mapping
for policy in outbound:
if "fwd" in policy["action"]:
part = int(policy["action"]["fwd"])
if part not in part_2_policy:
part_2_policy[part] = []
part_2_policy[part].append(policy)
updates = sdx_msgs["changes"]
for update in updates:
part = int(update["participant_id"])
superset_id = int(update["superset"])
bit_position = int(update["position"])
# if we have no rules regarding this participant, skip
if part not in part_2_policy:
continue
# for all policies involving this participant
for policy in part_2_policy[part]:
# vmac and mask which check if part is reachable
vmac = vmac_participant_match(superset_id, bit_position, ss_instance)
vmac_bitmask = vmac_participant_mask(bit_position, ss_instance)
# the vmac which will be written on a policy match
next_hop_mac = vmac_next_hop_match(part, ss_instance, inbound_bit = True)
match_args = policy["match"]
match_args["eth_dst"] = (vmac, vmac_bitmask)
match_args["eth_src"] = my_mac
actions = {"set_eth_dst":next_hop_mac, "fwd":["inbound"]}
rule = {"rule_type":"outbound", "priority":OUTBOUND_HIT_PRIORITY,
"match":match_args , "action":actions, "mod_type":"insert",
"cookie":(policy["cookie"],2**16-1)}
rules.append(rule)
return rules
def build_outbound_rules_for(out_policies, ss_instance, my_mac):
"Given a subset of outbound policies, return all the resulting rules."
rules = []
part_2_superset = {}
for ss_id, superset in enumerate(ss_instance.supersets):
for part_index, part in enumerate(superset):
if part not in part_2_superset:
part_2_superset[part] = []
part_2_superset.append((ss_id, part_index))
for policy in out_policies:
if "fwd" not in policy["action"]:
continue
part = policy["action"]["fwd"]
for ss_id, part_index in part_2_superset[part]:
vmac = vmac_participant_match(ss_id,
part_index, ss_instance)
vmac_bitmask = vmac_participant_mask(part_index, ss_instance)
match_args = policy["match"]
match_args["eth_dst"] = (vmac, vmac_bitmask)
match_args["eth_src"] = my_mac
actions = {"set_eth_dst":next_hop_mac, "fwd":["inbound"]}
rule = {"rule_type":"outbound", "priority":OUTBOUND_HIT_PRIORITY,
"match":match_args , "action":actions, "mod_type":"insert",
"cookie":(policy["cookie"],2**16-1)}
rules.append(rule)
return rules
def build_inbound_rules_for(participant_id, in_policies, ss_instance, final_switch):
"Given a subset of inbound policies, return all the resulting rules."
rules = []
for policy in in_policies:
match_args = policy["match"]
actions = {}
# match on the next-hop
vmac_bitmask = vmac_next_hop_mask(ss_instance)
vmac = vmac_next_hop_match(participant_id, ss_instance)
match_args["eth_dst"] = (vmac, vmac_bitmask)
if "fwd" in policy["action"]:
port_num = policy["action"]["fwd"]
port_num = policy["action"]["fwd"]
new_vmac = vmac_part_port_match(participant_id, port_num, ss_instance)
actions = {"set_eth_dst":new_vmac, "fwd":[final_switch]}
rule = {"rule_type":"inbound", "priority":INBOUND_HIT_PRIORITY,
"match":match_args, "action":actions, "mod_type":"insert",
"cookie":(policy["cookie"],2**16-1)}
rules.append(rule)
# Build rule for dropping traffic
if "drop" in policy["action"]:
rule = {"rule_type":"inbound", "priority":INBOUND_HIT_PRIORITY+1,
"match":match_args, "action":actions, "mod_type":"insert",
"cookie":(policy["cookie"],2**16-1)}
rules.append(rule)
return rules
# initialize all inbound rules
def init_inbound_rules(participant_id, policies, ss_instance, final_switch):
dp_msgs = {"type": "new",
"changes": []}
# do we even have inbound policies?
if ('inbound' not in policies):
return {}
else:
in_policies = policies['inbound']
rules = build_inbound_rules_for(participant_id, in_policies,
ss_instance, final_switch)
dp_msgs["changes"] = rules
return dp_msgs
# initialize all outbound rules
def init_outbound_rules(pctrl, participant_id, policies, ss_instance, final_switch):
dp_msgs = {"type": "new", "changes": []}
if ('outbound' not in policies):
return {}
else:
sdx_msgs = ss_instance.initial_computation(pctrl)
if len(sdx_msgs['changes']) > 0:
flow_msgs = update_outbound_rules(sdx_msgs, policies,
ss_instance, pctrl.port0_mac)
dp_msgs["changes"] = flow_msgs
return dp_msgs
def msg_clear_all_outbound(policies, port0_mac):
"Construct and return a flow mod which removes all our outbound rules"
mods = []
if 'outbound' not in policies:
return mods
# compile all cookies used by our policies
cookies = []
for policy in policies['outbound']:
cookies.append(policy['cookie'])
match_args = {"eth_src":port0_mac}
for cookie in cookies:
mod = {"rule_type":"outbound", "priority":0,
"match":match_args , "action":{},
"cookie":(cookie, 2**16-1), "mod_type":"remove"}
mods.append(mod)
return mods
def ss_process_policy_change(supersets, add_policies, remove_policies, policies, port_count, port0_mac):
"Process the changes in participants' policies"
return 0
# TODO: Implement the logic of dynamically changing participants' outbound and inbound policy
# Partially done. Need to handle expansion of active set
# has the set of active participants expanded?
#-old_rulecounts = supersets.recompute_rulecounts(self.policies)
#-new_rulecounts = supersets.recompute_rulecounts(complete_policies)
#-new_active = set(new_rulecounts.keys())
# new_parts will contain all participants that now appear that did not appear previously
#-new_parts = new_active.difference(old_rulecounts.keys())
port_count = len(self.participant_2_portmac[self.id])
# we remove rules first, because the supersets might change when adding rules
removal_rules = []
if 'outbound' in remove_policies:
removal_out = build_outbound_rules_for(remove_policies['outbound'],
self.supersets, self.port0_mac)
removal_rules.extend(removal_out)
if 'inbound' in remove_policies:
removal_in = build_inbound_rules_for(self.id, remove_policies['outbound'],
self.supersets, port_count)
removal_rules.extend(removal_in)
# set the mod type of these rules to make them deletions, not additions
for rule in removal_rules:
rule['mod_type'] = "remove"
self.dp_queued.extend(removal_rules)
addition_rules = []
if 'outbound' in add_policies:
addition_out = build_outbound_rules_for(add_policies['outbound'],
self.supersets, self.port0_mac)
addition_rules.extend(removal_out)
if 'inbound' in add_policies:
addition_in = build_inbound_rules_for(self.id, add_policies['outbound'],
self.supersets, port_count)
addition_rules.extend(removal_in)
return 0
def ss_process_policy_change_dev(self, remove_policies):
# TODO: Implement the logic of dynamically changing participants' outbound and inbound policy
# Partially done. Need to handle expansion of active set
# has the set of active participants expanded?
#-old_rulecounts = supersets.recompute_rulecounts(self.policies)
#-new_rulecounts = supersets.recompute_rulecounts(complete_policies)
#-new_active = set(new_rulecounts.keys())
# new_parts will contain all participants that now appear that did not appear previously
#-new_parts = new_active.difference(old_rulecounts.keys())
port_count = len(self.participant_2_portmac[self.id])
self.logger.debug("Policy change port count: %s" %port_count)
# we remove rules first, because the supersets might change when adding rules
removal_rules = []
if 'outbound' in remove_policies:
removal_out = build_outbound_rules_for(remove_policies['outbound'],
self.supersets, self.port0_mac)
removal_rules.extend(removal_out)
if 'inbound' in remove_policies:
removal_in = build_inbound_rules_for(self.id, remove_policies['outbound'],
self.supersets, port_count)
removal_rules.extend(removal_in)
# set the mod type of these rules to make them deletions, not additions
for rule in removal_rules:
rule['mod_type'] = "remove"
self.dp_queued.extend(removal_rules)
'''
addition_rules = []
if 'outbound' in add_policies:
addition_out = build_outbound_rules_for(add_policies['outbound'],
self.supersets, self.port0_mac)
addition_rules.extend(removal_out)
if 'inbound' in add_policies:
addition_in = build_inbound_rules_for(self.id, add_policies['outbound'],
self.supersets, port_count)
addition_rules.extend(addition_in)
'''
return 0
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for problem/dataset definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
# Dependency imports
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import data_reader
from tensor2tensor.utils import metrics
import tensorflow as tf
class DatasetSplit(object):
TRAIN = tf.estimator.ModeKeys.TRAIN
EVAL = tf.estimator.ModeKeys.EVAL
TEST = "test"
class SpaceID(object):
"""Input and target space ids. Add more as needed."""
# Generic / unknown output space (default)
GENERIC = 0
# Image labels
IMAGE_LABEL = 1
# English characters
EN_CHR = 2
# English tokens
EN_TOK = 3
# English bpe tokens
EN_BPE_TOK = 4
# French characters
FR_CHR = 5
# French tokens
FR_TOK = 6
# German characters
DE_CHR = 7
# German tokens
DE_TOK = 8
# German bpe tokens
DE_BPE_TOK = 9
# Digit cipher lexicon 0
DIGIT_0 = 10
# Digit cipher lexicon 1
DIGIT_1 = 11
# Audio waveform domain
AUDIO_WAV = 12
# Audio spectral domain
AUDIO_SPECTRAL = 13
# Parse characters
PARSE_CHR = 14
# Parse tokens
PARSE_TOK = 15
# Chinese tokens
ZH_TOK = 16
# Icelandic characters
ICE_CHAR = 17
# Icelandic tokens
ICE_TOK = 18
# Icelandic parse tokens
ICE_PARSE_TOK = 19
# Macedonian tokens
MK_TOK = 20
# Czech tokens
CS_TOK = 21
# Czech characters
CS_CHR = 22
# Genetic bases (ACTG)
DNA = 23
# Real numbers
REAL = 24
# Images
IMAGE = 25
# Peptide
PEPTIDE = 26
# Python
PY_TOK = 27
# C++
CPP_TOK = 28
# Strokes
STROKES = 29
# Pickled Python
PICKLED_PYTHON = 30
def default_model_hparams():
return tf.contrib.training.HParams(
max_input_seq_length=0,
max_target_seq_length=0,
prepend_mode="none",
split_to_length=0,
data_dir=None)
def preprocess_example_common(example, hparams, mode):
"""Preprocessing steps common to all models."""
if hparams.max_input_seq_length > 0:
example["inputs"] = example["inputs"][:hparams.max_input_seq_length]
if hparams.max_target_seq_length > 0:
example["targets"] = example["targets"][:hparams.max_target_seq_length]
if hparams.prepend_mode != "none":
if mode == tf.estimator.ModeKeys.PREDICT:
example["partial_targets"] = tf.concat([example["inputs"], [0]], 0)
else:
example["targets"] = tf.concat(
[example["inputs"], [0], example["targets"]], 0)
if hparams.split_to_length:
example["targets"] = tf.reshape(example["targets"],
[-1, hparams.split_to_length, 1, 1])
if len(example) != 1:
raise ValueError("split_to_length only works for LM problems")
return tf.data.Dataset.from_tensor_slices(example)
return example
def _file_num_records_cached(filename):
"""Return the number of TFRecords in a file."""
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_records_cache[filename] = ret
return ret
_file_num_records_cache = {}
class Problem(object):
"""Problem base class. Specifies a T2T problem.
Problems unify the specification of a problem for data generation, training,
and inference.
New problems are specified by the following methods:
Data generation:
* generate_data(data_dir, tmp_dir)
- Generate training and dev datasets into data_dir.
- Additional files, e.g. vocabulary files, should also be written to
data_dir. Vocab files are newline-separated files with each line
containing a token. The standard convention for the filename is to
set it to be
${Problem.vocab_name}.${Problem.targeted_vocab_size}
- Downloads and other files can be written to tmp_dir
- If you have a training and dev generator, you can generate the
training and dev datasets with
generator_utils.generate_dataset_and_shuffle.
- Use the self.training_filepaths and self.dev_filepaths functions to
get sharded filenames. If shuffled=False, the filenames will contain
an "unshuffled" suffix; you should then shuffle the data
shard-by-shard with generator_utils.shuffle_dataset.
- Allows to specify the number of shards, optionally (can be omitted).
- Subclasses must override
* dataset_filename()
- Base filename for problem.
- Defaults to registered name (self.name).
Training:
* hparams(defaults, model_hparams)
- Specify the problem hyperparameters (see _default_hparams)
- Mutate defaults as needed
* example_reading_spec
- Specify the names and types of the features on disk.
- Specify tf.contrib.slim.tfexample_decoder
* preprocess_example(example, mode)
- Preprocess the example feature dict from feature name to Tensor or
SparseTensor.
- Used in training, eval, and inference (specified by mode).
Eval:
* eval_metrics
- Specify the set of evaluation metrics for this problem.
Inference:
* feature_encoders(data_dir)
- Return a dict of <feature name, TextEncoder> for encoding and decoding
inference input/output.
- Defaults to TextEncoder for inputs and targets.
"""
# ============================================================================
# BEGIN SUBCLASS INTERFACE
# ============================================================================
def generate_data(self, data_dir, tmp_dir, task_id=-1):
raise NotImplementedError()
@property
def multiprocess_generate(self):
"""Whether to generate the data in multiple parallel processes."""
return False
@property
def num_generate_tasks(self):
"""Needed if multiprocess_generate is True."""
raise NotImplementedError()
def prepare_to_generate(self, data_dir, tmp_dir):
"""Prepare to generate data in parallel on different processes.
This function is called if multiprocess_generate is True.
Some things that might need to be done once are downloading the data
if it is not yet downloaded, and building the vocabulary.
Args:
data_dir: a string
tmp_dir: a string
"""
raise NotImplementedError()
def hparams(self, defaults, model_hparams):
pass
def max_length(self, model_hparams):
"""Maximum sequence length.
Problems with fixed length should override.
Args:
model_hparams: model hyperparameters
Returns:
an integer
"""
return (model_hparams.split_to_length or model_hparams.max_length or
model_hparams.batch_size)
def tpu_batch_size_per_shard(self, model_hparams):
"""Batch size in examples per TPU core.
Args:
model_hparams: model hyperparameters
Returns:
an integer
"""
if self.batch_size_means_tokens:
return model_hparams.batch_size // self.max_length(model_hparams)
else:
return model_hparams.batch_size
@property
def batch_size_means_tokens(self):
"""Do we specify hparams.batch_size in tokens per datashard per batch.
This is generally done for text problems.
If False, we assume that batch sizes are specified in examples per
datashard per batch.
TODO(noam): we should be more explicit and replace the hyperparameter
batch size with two hyperparameters:
hparams.examples_per_batch_per_datashard
hparams.tokens_per_batch_per_datashard
Returns:
a boolean
"""
return False
def dataset_filename(self):
return self.name
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
}
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, mode, hparams):
"""Runtime preprocessing.
Return a dict or a tf.Data.Datset.from_tensor_slices (if you want each
example to turn into multiple).
Args:
example: dict, features
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
Returns:
dict or Dataset
"""
return preprocess_example_common(example, hparams, mode)
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
# ============================================================================
# END SUBCLASS INTERFACE
# ============================================================================
def training_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.train_data_filenames(file_basename, data_dir,
num_shards)
def dev_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.dev_data_filenames(file_basename, data_dir,
num_shards)
def test_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.test_data_filenames(file_basename, data_dir,
num_shards)
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* DatasetSplit.TRAIN: train
* DatasetSplit.EVAL: dev
* DatasetSplit.TEST: test
* tf.estimator.ModeKeys.PREDICT: dev
Args:
data_dir: str, data directory.
mode: DatasetSplit
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
path = os.path.join(data_dir, self.dataset_filename())
shard_str = "-%05d" % shard if shard is not None else ""
if mode == DatasetSplit.TRAIN:
suffix = "train"
elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:
suffix = "dev"
else:
assert mode == DatasetSplit.TEST
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
def __init__(self, was_reversed=False, was_copy=False):
"""Create a Problem.
Args:
was_reversed: bool, whether to reverse inputs and targets.
was_copy: bool, whether to copy inputs to targets. Can be composed with
was_reversed so that if both are true, the targets become the inputs,
which are then copied to targets so that the task is targets->targets.
"""
self._was_reversed = was_reversed
self._was_copy = was_copy
self._encoders = None
self._hparams = None
self._feature_info = None
def get_feature_encoders(self, data_dir=None):
if self._encoders is None:
self._encoders = self.feature_encoders(data_dir)
return self._encoders
def get_hparams(self, model_hparams=None):
"""Returns problem_hparams."""
if self._hparams is not None:
return self._hparams
if self._encoders is None:
data_dir = (model_hparams and model_hparams.data_dir) or None
self.get_feature_encoders(data_dir)
hp = _default_hparams()
ret = self.hparams(hp, model_hparams)
if ret is not None:
raise ValueError("The Problem subclass hparams function should mutate "
"the defaults passed in and return None.")
hp.add_hparam("vocabulary", self._encoders)
hp.add_hparam("was_reversed", self._was_reversed)
hp.add_hparam("was_copy", self._was_copy)
if self._was_reversed:
_reverse_problem_hparams(hp)
if self._was_copy:
_copy_problem_hparams(hp)
self._hparams = hp
return self._hparams
def maybe_reverse_features(self, feature_map):
if not self._was_reversed:
return
inputs, targets = feature_map["inputs"], feature_map["targets"]
feature_map["inputs"], feature_map["targets"] = targets, inputs
def maybe_copy_features(self, feature_map):
if not self._was_copy:
return
feature_map["targets"] = feature_map["inputs"]
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None,
partition_id=0,
num_partitions=1):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: tf.contrib.training.HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
shuffle_files = shuffle_files or shuffle_files is None and is_training
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern)
# Functions used in dataset transforms below
def _load_records(filename):
# Load records from file with an 8MiB read buffer.
return tf.data.TFRecordDataset(filename, buffer_size=8 * 1024 * 1024)
def _preprocess(example):
examples = self.preprocess_example(example, mode, hparams)
if not isinstance(examples, tf.data.Dataset):
examples = tf.data.Dataset.from_tensors(examples)
return examples
def _maybe_reverse_and_copy(example):
self.maybe_reverse_features(example)
self.maybe_copy_features(example)
return example
if len(data_files) < num_partitions:
raise ValueError(
"number of data files (%d) must be at least the number of hosts (%d)"
% (len(data_files), num_partitions))
data_files = [f for (i, f) in enumerate(data_files)
if i % num_partitions == partition_id]
tf.logging.info(
"partition: %d num_data_files: %d" % (partition_id, len(data_files)))
if shuffle_files:
random.shuffle(data_files)
dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))
if hasattr(tf.contrib.data, "parallel_interleave"):
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
_load_records, sloppy=is_training, cycle_length=8))
else:
dataset = dataset.interleave(_load_records, cycle_length=8,
block_length=16)
dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)
if preprocess:
if hasattr(tf.contrib.data, "parallel_interleave"):
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
_preprocess, sloppy=is_training, cycle_length=8))
else:
dataset = dataset.interleave(_preprocess, cycle_length=8,
block_length=16)
dataset = dataset.map(
_maybe_reverse_and_copy, num_parallel_calls=num_threads)
if output_buffer_size:
dataset = dataset.prefetch(output_buffer_size)
return dataset
def decode_example(self, serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields, data_items_to_decoders = self.example_reading_spec()
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(data_items_to_decoders)
decoded = decoder.decode(serialized_example, items=decode_items)
return dict(zip(decode_items, decoded))
@property
def has_inputs(self):
return "inputs" in self.get_feature_encoders()
@property
def feature_info(self):
"""Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
"""
if self._feature_info is not None:
return self._feature_info
assert self._hparams is not None
hp = self.get_hparams()
input_mods = hp.input_modality
target_mod = hp.target_modality
vocabs = hp.vocabulary
if self.has_inputs:
in_id = hp.input_space_id
out_id = hp.target_space_id
features = collections.defaultdict(FeatureInfo)
for name, mod_spec in six.iteritems(input_mods):
mod, vocab_size = mod_spec
finfo = features[name]
finfo.modality = mod
finfo.vocab_size = vocab_size
mod, vocab_size = target_mod
features["targets"].modality = mod
features["targets"].vocab_size = vocab_size
for name, encoder in six.iteritems(vocabs):
features[name].encoder = encoder
if self.has_inputs:
features["inputs"].space_id = in_id
features["targets"].space_id = out_id
self._feature_info = features
return features
def make_estimator_input_fn(self,
mode,
hparams,
data_dir=None,
dataset_kwargs=None):
"""Return input_fn wrapped for Estimator."""
def estimator_input_fn(params, config):
return self.input_fn(
mode,
hparams,
data_dir=data_dir,
params=params,
config=config,
dataset_kwargs=dataset_kwargs)
return estimator_input_fn
def _dataset_partition(self, mode, config):
"""Which part of the training data to read.
If there are multiple parallel calls to input_fn (multiple TPU hosts),
then we want each one to read from a separate partition of the training
data.
Args:
mode: tf.estimator.ModeKeys
config: RunConfig
Returns:
partition_id: an integer
num_partitions: an integer
"""
if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"):
# Reset in the case when using TPU but alternating TRAIN and EVAL.
self._next_partition_id = 0
return 0, 1
if config.tpu_config.per_host_input_for_training:
num_partitions = max(config.tpu_config.num_shards // 8, 1)
else:
num_partitions = config.tpu_config.num_shards
partition_id = getattr(self, "_next_partition_id", 0)
self._next_partition_id = partition_id + 1
tf.logging.info("num_partitions = %d partition_id = %d" %
(num_partitions, partition_id))
assert partition_id < num_partitions
return partition_id, num_partitions
def input_fn(self,
mode,
hparams,
data_dir=None,
params=None,
config=None,
dataset_kwargs=None):
"""Builds input pipeline for problem.
Args:
mode: tf.estimator.ModeKeys
hparams: HParams, model hparams
data_dir: str, data directory; if None, will use hparams.data_dir
params: dict, may include "batch_size"
config: RunConfig; should have the data_parallelism attribute if not using
TPU
dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset
method when called
Returns:
(features_dict<str name, Tensor feature>, Tensor targets)
"""
partition_id, num_partitions = self._dataset_partition(mode, config)
is_training = mode == tf.estimator.ModeKeys.TRAIN
if config and config.use_tpu:
num_threads = 64
else:
num_threads = 4 if is_training else 1
max_length = self.max_length(hparams)
def tpu_valid_size(example):
return data_reader.example_valid_size(example, hparams.min_length,
max_length)
def gpu_valid_size(example):
drop_long_sequences = is_training or hparams.eval_drop_long_sequences
return data_reader.example_valid_size(example, hparams.min_length,
max_length
if drop_long_sequences else 10**9)
def define_shapes(example):
batch_size = config and config.use_tpu and params["batch_size"]
return standardize_shapes(example, batch_size=batch_size)
# Read and preprocess
data_dir = data_dir or hparams.data_dir
dataset_kwargs = dataset_kwargs or {}
dataset_kwargs.update({
"mode": mode,
"data_dir": data_dir,
"num_threads": num_threads,
"hparams": hparams,
"partition_id": partition_id,
"num_partitions": num_partitions,
})
dataset = self.dataset(**dataset_kwargs)
if is_training:
# Repeat and skip a random number of records
dataset = dataset.repeat()
data_files = tf.contrib.slim.parallel_reader.get_data_files(
self.filepattern(data_dir, mode))
dataset = skip_random_fraction(dataset, data_files[0])
dataset = dataset.map(
data_reader.cast_int64_to_int32, num_parallel_calls=num_threads)
if self.batch_size_means_tokens:
batch_size_means_tokens = True
else:
if _are_shapes_fully_defined(dataset.output_shapes):
batch_size_means_tokens = False
else:
tf.logging.warning(
"Shapes are not fully defined. Assuming batch_size means tokens. "
"Override batch_size_means_tokens() "
"in your problem subclass if this is undesired behavior.")
batch_size_means_tokens = True
# Batching
if not batch_size_means_tokens:
# Batch size means examples per datashard.
if config and config.use_tpu:
# on TPU, we use params["batch_size"], which specifies the number of
# examples across all datashards
batch_size = params["batch_size"]
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
num_shards = (config and config.data_parallelism.n) or 1
batch_size = hparams.batch_size * num_shards
dataset = dataset.batch(batch_size)
else:
# batch_size means tokens per datashard
if config and config.use_tpu:
dataset = dataset.filter(tpu_valid_size)
padded_shapes = self._pad_for_tpu(dataset.output_shapes, hparams)
# on TPU, we use params["batch_size"], which specifies the number of
# examples across all datashards
batch_size = params["batch_size"]
dataset = dataset.apply(
tf.contrib.data.padded_batch_and_drop_remainder(
batch_size, padded_shapes))
else:
# On GPU, bucket by length
dataset = dataset.filter(gpu_valid_size)
batching_scheme = data_reader.hparams_to_batching_scheme(
hparams,
shard_multiplier=(config and config.data_parallelism.n) or 1,
length_multiplier=self.get_hparams().batch_size_multiplier)
if hparams.use_fixed_batch_size:
# Here batch_size really means examples per datashard.
batching_scheme["batch_sizes"] = [hparams.batch_size]
batching_scheme["boundaries"] = []
dataset = data_reader.bucket_by_sequence_length(
dataset, data_reader.example_length, batching_scheme["boundaries"],
batching_scheme["batch_sizes"])
if not is_training:
def _pad_batch(features):
if not config or config.data_parallelism.n <= 1:
return features
tf.logging.warn(
"Padding the batch to ensure that remainder eval batches have "
"a batch size divisible by the number of data shards. This may "
"lead to incorrect metrics for non-zero-padded features, e.g. "
"images. Use a single datashard (i.e. 1 GPU) in that case.")
return pad_batch(features, config.data_parallelism.n)
dataset = dataset.map(_pad_batch, num_parallel_calls=num_threads)
dataset = dataset.map(define_shapes, num_parallel_calls=num_threads)
dataset = dataset.prefetch(2)
features = dataset.make_one_shot_iterator().get_next()
if not config or not config.use_tpu:
_summarize_features(features, (config and config.data_parallelism.n) or 1)
if mode == tf.estimator.ModeKeys.PREDICT:
features["infer_targets"] = features["targets"]
features["targets"] = None
# This is because of a bug in the Estimator that short-circuits prediction
# if it doesn't see a QueueRunner. DummyQueueRunner implements the
# minimal expected interface but does nothing.
tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS,
data_reader.DummyQueueRunner())
return features, features["targets"]
def serving_input_fn(self, hparams):
"""Input fn for serving export, starting from serialized example."""
mode = tf.estimator.ModeKeys.PREDICT
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
dataset = tf.data.Dataset.from_tensor_slices(serialized_example)
dataset = dataset.map(self.decode_example)
dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams))
dataset = dataset.map(data_reader.cast_int64_to_int32)
dataset = dataset.padded_batch(1000, dataset.output_shapes)
dataset = dataset.map(standardize_shapes)
features = tf.contrib.data.get_single_element(dataset)
if self.has_inputs:
features.pop("targets", None)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
def _pad_for_tpu(self, shapes_dict, hparams):
"""Pads unknown features' dimensions for TPU."""
max_length = self.max_length(hparams)
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes
class FeatureInfo(object):
def __init__(self,
encoder=None,
modality=None,
vocab_size=None,
space_id=None):
self.encoder = encoder
self.modality = modality
self.vocab_size = vocab_size
self.space_id = space_id
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.target_modality = p.input_modality["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
input_modality = p.input_modality["inputs"]
target_modality = p.target_modality
p.input_modality["inputs"] = target_modality
p.target_modality = input_modality
# Swap vocabularies.
input_vocabulary = p.vocabulary["inputs"]
target_vocabulary = p.vocabulary["targets"]
p.vocabulary["inputs"] = target_vocabulary
p.vocabulary["targets"] = input_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
p.input_space_id = target_space_id
p.target_space_id = input_space_id
# Mark that p was reversed.
p.was_reversed = True
def _default_hparams():
"""A set of basic model hyperparameters."""
return tf.contrib.training.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from input features to a space compatible with
# chosen model architecture. One modality spec (which is a 2-tuple,
# (modality_full_name, vocab_size)) per feature key. modality_full_name
# is a string type:name, e.g. class_label:class_label_2d. Leaving off
# the name uses the default modality for that type (e.g. class_label ==
# class_label:default).
input_modality={},
# Modality used to map from hidden representation to the target space.
# Specified as a modality spec, a 2-tuple described above.
target_modality=None,
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
def _are_shapes_fully_defined(shapes_dict):
for shape in shapes_dict.values():
if not shape.is_fully_defined():
return False
return True
def _summarize_features(features, num_shards=1):
with tf.name_scope("input_stats"):
for (k, v) in six.iteritems(features):
if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
def standardize_shapes(features, batch_size=None):
"""Set the right shapes for the features."""
for fname in ["inputs", "targets"]:
if fname not in features:
continue
f = features[fname]
while len(f.get_shape()) < 4:
f = tf.expand_dims(f, axis=-1)
features[fname] = f
if batch_size:
# Ensure batch size is set on all features
for _, t in six.iteritems(features):
shape = t.get_shape().as_list()
shape[0] = batch_size
t.set_shape(t.get_shape().merge_with(shape))
# Assert shapes are fully known
t.get_shape().assert_is_fully_defined()
return features
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = []
for _ in range(rank):
paddings.append([0, 0])
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
def problem_hparams_to_features(problem_hparams):
input_space_id, target_space_id = 0, 0
if problem_hparams:
input_space_id = problem_hparams.input_space_id
target_space_id = problem_hparams.target_space_id
return {
"problem_choice": 0,
"input_space_id": input_space_id,
"target_space_id": target_space_id,
}
def skip_random_fraction(dataset, data_file):
# Skip a random fraction at the beginning of the stream. The skip is
# essential for synchronous highly-parallel training to avoid multiple
# replicas reading the same data in lock-step.
num_skip = random.randint(0, _file_num_records_cached(data_file))
return dataset.skip(num_skip)
|
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet.timeout import Timeout
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common.strategies.cluster import base
from trove.common.strategies.cluster.experimental.vertica.api import \
VerticaCluster
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.taskmanager import api as task_api
import trove.taskmanager.models as task_models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds.
class VerticaTaskManagerStrategy(base.BaseTaskManagerStrategy):
@property
def task_manager_api_class(self):
return VerticaTaskManagerAPI
@property
def task_manager_cluster_tasks_class(self):
return VerticaClusterTasks
class VerticaClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("Begin create_cluster for id: %s." % cluster_id)
def _create_cluster():
# Fetch instances by cluster_id against instances table.
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
instance_ids = [db_instance.id for db_instance in db_instances]
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(instance_ids, cluster_id):
return
LOG.debug("All members ready, proceeding for cluster setup.")
instances = [Instance.load(context, instance_id) for instance_id
in instance_ids]
member_ips = [self.get_ip(instance) for instance in instances]
guests = [self.get_guest(instance) for instance in instances]
# Users to be configured for password-less SSH.
authorized_users_without_password = ['root', 'dbadmin']
# Configuring password-less SSH for cluster members.
# Strategy for setting up SSH:
# get public keys for user from member-instances in cluster,
# combine them, finally push it back to all instances,
# and member instances add them to authorized keys.
LOG.debug("Configuring password-less SSH on cluster members.")
try:
for user in authorized_users_without_password:
pub_key = [guest.get_public_keys(user) for guest in guests]
for guest in guests:
guest.authorize_public_keys(user, pub_key)
LOG.debug("Installing cluster with members: %s." % member_ips)
for db_instance in db_instances:
if db_instance['type'] == 'master':
master_instance = Instance.load(context,
db_instance.id)
self.get_guest(master_instance).install_cluster(
member_ips)
break
LOG.debug("Finalizing cluster configuration.")
for guest in guests:
guest.cluster_complete()
except Exception:
LOG.exception(_("Error creating cluster."))
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
try:
_create_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("Timeout for building cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("End create_cluster for id: %s." % cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids):
def _grow_cluster():
LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id)
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
instance_ids = [db_instance.id for db_instance in db_instances]
# Wait for new cluster members to get to cluster-ready status.
if not self._all_instances_ready(new_instance_ids, cluster_id):
return
new_insts = [Instance.load(context, instance_id)
for instance_id in new_instance_ids]
existing_instances = [Instance.load(context, instance_id)
for instance_id
in instance_ids
if instance_id not in new_instance_ids]
existing_guests = [self.get_guest(i) for i in existing_instances]
new_guests = [self.get_guest(i) for i in new_insts]
all_guests = new_guests + existing_guests
authorized_users_without_password = ['root', 'dbadmin']
new_ips = [self.get_ip(instance) for instance in new_insts]
for user in authorized_users_without_password:
pub_key = [guest.get_public_keys(user) for guest in all_guests]
for guest in all_guests:
guest.authorize_public_keys(user, pub_key)
for db_instance in db_instances:
if db_instance['type'] == 'master':
LOG.debug("Found 'master' instance, calling grow on guest")
master_instance = Instance.load(context,
db_instance.id)
self.get_guest(master_instance).grow_cluster(new_ips)
break
for guest in new_guests:
guest.cluster_complete()
timeout = Timeout(CONF.cluster_usage_timeout)
try:
_grow_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("Timeout for growing cluster."))
self.update_statuses_on_failure(cluster_id)
except Exception:
LOG.exception(_("Error growing cluster %s.") % cluster_id)
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
def shrink_cluster(self, context, cluster_id, instance_ids):
def _shrink_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
all_instance_ids = [db_instance.id for db_instance in db_instances]
remove_instances = [Instance.load(context, instance_id)
for instance_id in instance_ids]
left_instances = [Instance.load(context, instance_id)
for instance_id
in all_instance_ids
if instance_id not in instance_ids]
remove_member_ips = [self.get_ip(instance)
for instance in remove_instances]
k = VerticaCluster.k_safety(len(left_instances))
for db_instance in db_instances:
if db_instance['type'] == 'master':
master_instance = Instance.load(context,
db_instance.id)
if self.get_ip(master_instance) in remove_member_ips:
raise RuntimeError(_("Cannot remove master instance!"))
LOG.debug(_("Marking cluster k-safety: %s") % k)
self.get_guest(master_instance).mark_design_ksafe(k)
self.get_guest(master_instance).shrink_cluster(
remove_member_ips)
break
for r in remove_instances:
Instance.delete(r)
timeout = Timeout(CONF.cluster_usage_timeout)
try:
_shrink_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise
LOG.exception(_("Timeout for shrinking cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("end shrink_cluster for Vertica cluster id %s" % self.id)
class VerticaTaskManagerAPI(task_api.API):
def _cast(self, method_name, version, **kwargs):
LOG.debug("Casting %s" % method_name)
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, method_name, **kwargs)
|
|
# Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import itertools
import json
import os
import struct
SIZES = {
# accessor.type
'SCALAR': 1, 'VEC2': 2, 'VEC3': 3, 'VEC4': 4,
# accessor.componentType
5120: 1, 5121: 1, # BYTE, UBYTE
5122: 2, 5123: 2, # SHORT, USHORT
5124: 4, 5125: 4, # INT, UINT
5126: 4 # FLOAT
}
# struct format characters, for accessor.componentType
STRUCT_FORMAT = {
5120: 'b', 5121: 'B', # BYTE, UBYTE
5122: 'h', 5123: 'H', # SHORT, USHORT
5124: 'i', 5125: 'I', # INT, UINT
5126: 'f' # FLOAT
}
# From itertools docs
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
class binfile(object):
# Helper for parsing
def __init__(self, inf):
self.inf = inf
def read(self, n):
data = self.inf.read(n)
if len(data) < n:
raise Exception("Short read %s < %s" % (len(data), n))
return data
def write(self, data):
return self.inf.write(data)
def read_length_prefixed(self):
n, = self.unpack("<I")
return self.read(n)
def write_length_prefixed(self, data):
self.pack("<I", len(data))
self.inf.write(data)
def unpack(self, fmt):
n = struct.calcsize(fmt)
data = self.read(n)
return struct.unpack(fmt, data)
def pack(self, fmt, *args):
data = struct.pack(fmt, *args)
return self.inf.write(data)
class BaseGltf(object):
"""Abstract subclass for classes that parse:
- gltf+bin
- glb version 1
- glb version 2"""
# Jeez
PLURAL_SUFFIX = { 'mesh': 'es' }
@staticmethod
def create(filename):
"""Returns a Gltf, Glb1, or Glb2 instance."""
bf = binfile(open(filename, 'rb'))
first_bytes = bf.read(4)
if first_bytes == 'glTF':
version, = bf.unpack("I")
if version == 1: return Glb1(filename)
elif version == 2: return Glb2(filename)
raise Exception("Bad version %d" % version)
elif filename.lower().endswith('.gltf') or first_bytes.startswith("{"):
return Gltf(filename)
else:
raise Exception("Unknown format")
def __init__(self, filename):
self.filename = filename
# subclass will init version, json_chunk, json, and bin_chunk
def dereference(self):
"""Converts (some) inter-object references from ints/strings to
actual Python references. The Python reference will have a '_' appended.
For example, accessor['bufferView_']."""
def deref_property(obj, prop, dest_type=None):
# Deref obj[prop]
dest_type = dest_type or prop # prop name is usually the obj type
lookup_table_name = dest_type + 's'
try: idx_or_name = obj[prop]
except KeyError: pass
else: obj[prop+'_'] = self.json[lookup_table_name][idx_or_name]
def deref_all(source_type, prop, dest_type=None):
# Deref obj[prop] for all objs of type source_type
for name_or_idx, obj in self.iter_objs(source_type):
deref_property(obj, prop, dest_type)
deref_all('accessor', 'bufferView')
deref_all('bufferView', 'buffer')
for _, mesh in self.iter_objs('mesh'):
for prim in mesh['primitives']:
attrs = prim['attributes']
for attr_name in attrs.keys():
deref_property(attrs, attr_name, 'accessor')
deref_property(prim, 'indices', 'accessor')
deref_property(prim, 'material')
def iter_objs(self, obj_type):
"""Yields (key, value) tuples.
In gltf1 the keys are names; in gltf2 the keys are indices."""
if self.version == 1:
plural = self.PLURAL_SUFFIX.get(obj_type, 's')
return self.json[obj_type + plural].items()
elif self.version == 2:
plural = self.PLURAL_SUFFIX.get(obj_type, 's')
return enumerate(self.json[obj_type + plural])
else:
raise Exception("Unknown gltf version; cannot iterate objects")
# backwards-compat
def get_json(self): return self.json_chunk
def get_mesh_by_name(self, name):
if self.version == 1:
return self.json['meshes'][name]
else:
for m in self.json['meshes']:
if m['name'] == name: return m
raise LookupError(name)
def get_bufferView_data(self, buffer_view):
"""Returns a hunk of bytes."""
start = buffer_view['byteOffset']
end = start + buffer_view['byteLength']
return self.bin_chunk[start:end]
def get_accessor_data(self, accessor):
"""Returns accessor data, decoded according to accessor.componentType,
and grouped according accessor.type."""
componentType = accessor['componentType']
start = accessor['byteOffset']
count_per_element = SIZES[accessor['type']] # eg 2 for VEC2
# Parse a flat array of (int/float/whatever) and group it after, maybe
flat_count = accessor['count'] * count_per_element
byte_length = flat_count * SIZES[componentType]
bufferview_data = self.get_bufferView_data(accessor['bufferView_'])
attr_data = bufferview_data[start : start + byte_length]
struct_format = '<' + str(flat_count) + STRUCT_FORMAT[componentType]
flat = struct.unpack(struct_format, attr_data)
if count_per_element == 1: return flat
else: return list(grouper(count_per_element, flat))
class Gltf(BaseGltf):
def __init__(self, filename):
super(Gltf, self).__init__(filename)
# Not fully general; just good enough to work for TB .gltf/bin pairs
bin_name = os.path.splitext(filename)[0] + '.bin'
if not os.path.exists(bin_name):
raise Exception('No %s to go with %s' % (bin_name, filename))
self.total_len = None # Only meaningful for glb files
self.json_chunk = open(filename, 'rb').read()
self.bin_chunk = open(bin_name, 'rb').read()
self.json = json.loads(self.json_chunk)
version_str = self.json['asset'].get('version', "0")
self.version = int(float(version_str))
class Glb1(BaseGltf):
def __init__(self, filename):
super(Glb1, self).__init__(filename)
bf = binfile(open(self.filename, 'rb'))
assert bf.read(4) == 'glTF'
self.version, self.total_len, json_len, json_fmt = bf.unpack("<4I")
assert self.version == 1 and json_len % 4 == 0 and json_fmt == 0
self.json_chunk = bf.read(json_len)
self.bin_chunk = bf.inf.read()
self.json = json.loads(self.json_chunk)
class Glb2(BaseGltf):
def __init__(self, filename):
self.filename = filename
bf = binfile(open(self.filename, 'rb'))
assert bf.read(4) == 'glTF'
self.version, self.total_len = bf.unpack("II")
assert self.version == 2
assert self.total_len == os.stat(self.filename).st_size
self.json_chunk = self._read_chunk(bf, 'JSON')
self.bin_chunk = self._read_chunk(bf, 'BIN\0')
self.json = json.loads(self.json_chunk)
def _read_chunk(self, bf, expect_tag):
length, = bf.unpack("I")
tag = bf.read(4)
assert tag == expect_tag, tag
data = bf.read(length)
return data
#
# Testing
#
def load(version, name):
ROOT = os.path.expanduser('~/Documents/Tilt Brush/Exports/Baseline 22.0b4')
formatname = 'glb1' if (version == 1) else 'glb'
return BaseGltf.create(os.path.join(ROOT, name, formatname, name+'.glb'))
def test(version):
# It's CelVinyl texcoord 0 that has the NaNs
glb = load(version, 'ET_All')
glb.dereference()
mesh = glb.get_mesh_by_name("mesh_CelVinyl_700f3aa8-9a7c-2384-8b8a-ea028905dd8c_0_i0")
bad_accessor = mesh['primitives'][0]['attributes']['TEXCOORD_0_']
print(glb.get_accessor_data(bad_accessor)[0:3])
if __name__ == '__main__':
test(2)
|
|
import uuid
import unittest
import pickle
import datetime
from .. import connector
from .. import queue
from .. import encoder
class QueueTestCase:
@classmethod
def setUpClass(
cls,
):
cls.test_queue_name = 'test_queue_name'
cls.enqueued_value_first = {
'str': 'string1',
'date': datetime.datetime.utcnow().timestamp(),
'array': [
1,
2,
3,
4,
],
}
cls.enqueued_value_second = {
'str': 'string2',
'date': datetime.datetime.utcnow().timestamp(),
'array': [
2,
3,
4,
5,
],
}
cls.enqueued_value_third = {
'str': 'string3',
'date': datetime.datetime.utcnow().timestamp(),
'array': [
3,
4,
5,
6,
],
}
cls.test_result_id = str(uuid.uuid4())
def test_no_compression_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_zlib_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='zlib',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_gzip_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='gzip',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_bzip2_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='bzip2',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_lzma_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='lzma',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_pickle_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_msgpack_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='dummy',
serializer_name='msgpack',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_msgpack_compressed_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='zlib',
serializer_name='msgpack',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def test_pickle_compressed_queue(
self,
):
test_queue = queue.Queue(
connector=self.connector,
encoder=encoder.encoder.Encoder(
compressor_name='zlib',
serializer_name='pickle',
),
)
self.queue_functionality(
test_queue=test_queue,
)
pickled_queue = pickle.dumps(test_queue)
pickled_queue = pickle.loads(pickled_queue)
self.queue_functionality(
test_queue=pickled_queue,
)
def queue_functionality(
self,
test_queue,
):
test_queue.flush(
queue_name=self.test_queue_name,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=0,
)
test_queue.enqueue(
queue_name=self.test_queue_name,
items=[self.enqueued_value_first],
priority='NORMAL',
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=1,
)
test_queue.enqueue(
queue_name=self.test_queue_name,
items=[self.enqueued_value_second],
priority='NORMAL',
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=2,
)
test_queue.enqueue(
queue_name=self.test_queue_name,
items=[self.enqueued_value_third],
priority='NORMAL',
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=3,
)
returned_value_first = test_queue.dequeue(
queue_name=self.test_queue_name,
number_of_items=1,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=2,
)
returned_value_second = test_queue.dequeue(
queue_name=self.test_queue_name,
number_of_items=1,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=1,
)
returned_value_third = test_queue.dequeue(
queue_name=self.test_queue_name,
number_of_items=1,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=0,
)
self.assertIn(
member=returned_value_first[0],
container=[
self.enqueued_value_first,
self.enqueued_value_second,
self.enqueued_value_third,
],
)
self.assertIn(
member=returned_value_second[0],
container=[
self.enqueued_value_first,
self.enqueued_value_second,
self.enqueued_value_third,
],
)
self.assertIn(
member=returned_value_third[0],
container=[
self.enqueued_value_first,
self.enqueued_value_second,
self.enqueued_value_third,
],
)
for i in range(100):
test_queue.enqueue(
queue_name=self.test_queue_name,
items=[self.enqueued_value_first],
priority='NORMAL',
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=100,
)
test_queue.flush(
queue_name=self.test_queue_name,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=0,
)
test_queue.enqueue(
queue_name=self.test_queue_name,
items=[self.enqueued_value_first] * 1000,
priority='NORMAL',
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=1000,
)
values = test_queue.dequeue(
queue_name=self.test_queue_name,
number_of_items=100,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=900,
)
self.assertEqual(
first=values,
second=[self.enqueued_value_first] * 100,
)
values = test_queue.dequeue(
queue_name=self.test_queue_name,
number_of_items=900,
)
queue_length = test_queue.length(
queue_name=self.test_queue_name,
)
self.assertEqual(
first=queue_length,
second=0,
)
self.assertEqual(
first=values,
second=[self.enqueued_value_first] * 900,
)
test_queue.remove_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
result_exists = test_queue.has_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertFalse(
expr=result_exists,
)
result_added = test_queue.add_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertTrue(
expr=result_added,
)
result_added = test_queue.add_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertFalse(
expr=result_added,
)
result_exists = test_queue.has_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertTrue(
expr=result_exists,
)
result_removed = test_queue.remove_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertTrue(
expr=result_removed,
)
result_exists = test_queue.has_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertFalse(
expr=result_exists,
)
result_removed = test_queue.remove_result(
queue_name=self.test_queue_name,
result_id=self.test_result_id,
)
self.assertFalse(
expr=result_removed,
)
class SingleMongoQueueTestCase(
QueueTestCase,
unittest.TestCase,
):
def setUp(
self,
):
self.connector = connector.mongo.Connector(
mongodb_uri='mongodb://localhost:27030/',
)
class SingleRedisQueueTestCase(
QueueTestCase,
unittest.TestCase,
):
def setUp(
self,
):
self.connector = connector.redis.Connector(
host='127.0.0.1',
port=6379,
password='e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
database=0,
)
class RedisClusterMultipleServersQueueTestCase(
QueueTestCase,
unittest.TestCase,
):
def setUp(
self,
):
self.connector = connector.redis_cluster.Connector(
nodes=[
{
'host': '127.0.0.1',
'port': 6379,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
{
'host': '127.0.0.1',
'port': 6380,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
]
)
class RedisClusterSingleServerQueueTestCase(
QueueTestCase,
unittest.TestCase,
):
def setUp(
self,
):
self.connector = connector.redis_cluster.Connector(
nodes=[
{
'host': '127.0.0.1',
'port': 6379,
'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',
'database': 0,
},
]
)
class TaskerServerQueueTestCase(
QueueTestCase,
unittest.TestCase,
):
def setUp(
self,
):
self.connector = connector.tasker.Connector(
host='127.0.0.1',
port=50001,
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations:
"""RouteFiltersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteFilter":
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.RouteFilter",
**kwargs: Any
) -> "_models.RouteFilter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.RouteFilter",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilter"]:
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2016_12_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_12_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.PatchRouteFilter",
**kwargs: Any
) -> "_models.RouteFilter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'PatchRouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.PatchRouteFilter",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilter"]:
"""Updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the update route filter operation.
:type route_filter_parameters: ~azure.mgmt.network.v2016_12_01.models.PatchRouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_12_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterListResult"]:
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterListResult"]:
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import os
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash, parallel_execute
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'log_opt',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
if 'image' in options and 'build' in options:
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
if 'image' not in options and 'build' not in options:
raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False):
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
force_recreate=False):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if not allow_recreate:
return ConvergencePlan('start', containers)
if force_recreate or self._containers_have_diverged(containers):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash()
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s...' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
numbers = [
Container.from_ps(self.client, container).number
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return None
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
else:
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash()
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port in all_ports:
port = str(port)
if ':' in port:
port = port.split(':')[-1]
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', 'json-file'),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=False,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self):
if 'image' not in self.options:
return
repo, tag = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = parts[2] if len(parts) == 3 else 'rw'
return VolumeSpec(external, internal, mode)
# Ports
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port, external = split_port(port)
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
return port_bindings
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
raise ConfigError('Invalid port "%s", should be '
'[[remote_ip:]remote_port:]port[/protocol]' % port)
if len(parts) == 1:
internal_port, = parts
return internal_port, None
if len(parts) == 2:
external_port, internal_port = parts
return internal_port, external_port
external_ip, external_port, internal_port = parts
return internal_port, (external_ip, external_port or None)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
|
'''
Copyright 2016, EMC, Inc.
Purpose: This test script consists of tests to verify the task service API returned data.
'''
import os
import sys
import subprocess
import json
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common")
import fit_common
#Utiities for this script
def print_taskid_data(taskid, taskid_json):
"""
This utility displays the taskjson data for the user
:param taskjson: valid taskid json structure
"""
print "\n\tTaskId: ", taskid
print "\tSystem ID: ", taskid_json["Oem"]["RackHD"].get('SystemId', "")
print "\tTask State ", taskid_json.get('TaskState', "")
print "\tTask Status ", taskid_json.get('TaskStatus', "")
print "\tStartTime: ", taskid_json.get('StartTime', "")
print "\tEndTime: ", taskid_json.get('EndTime', "")
print "\tName: ", taskid_json.get('Name', "")
def get_task_srv_tasklist():
"""
This utility returns the list of all tasks currently in the system
reported by the Onrack API /redfish/v1/TaskService/Tasks
:return:
List of task ids on success
Otherwise empty on failure or error
"""
on_url = "/redfish/v1/TaskService/Tasks"
on_data = fit_common.rackhdapi(url_cmd=on_url)
tasklist = []
if on_data['status'] == 200:
members = on_data['json']["Members"]
for member in members:
tasklist.append(member['Id'])
else:
if fit_common.VERBOSITY >= 2:
print "Error in API command. Task Service command returned error."
return tasklist
def get_node_tasklist(nodeid):
"""
This utility returns the list of all tasks for a given node id
reported by the Onrack API /redfish/v1/TaskService/Oem/Tasks
:return:
List of task ids on success
Otherwise empty on failure or error
"""
on_url = "/redfish/v1/TaskService/Oem/Tasks/"+nodeid
on_data = fit_common.rackhdapi(url_cmd=on_url)
tasklist = []
if on_data['status'] == 200:
members = on_data['json']["Members"]
for member in members:
tasklist.append(member['Id'])
else:
if fit_common.VERBOSITY >= 2:
print "Error in API command. TaskService/Oem/Tasks/"+nodeid+" returned error."
return tasklist
def get_taskid_data(taskid):
"""
This utility returns the data associated with the taskid
reported by the Onrack API /redfish/v1/TaskService/Tasks/<taskid>
:param taskid: task id returned by a command
:return:
taskid dictionary
Otherwise empty on failure or error
"""
taskid_json = {}
on_url = "/redfish/v1/TaskService/Tasks/"+taskid
on_data = fit_common.rackhdapi(url_cmd=on_url)
if on_data['status'] == 200:
try:
taskid_json = on_data['json']
except ValueError:
print "No TaskID data returned"
else:
if fit_common.VERBOSITY >= 2:
print "Error in API command. TaskService/Oem/Tasks/"+taskid+" returned error."
return taskid_json
# Test Cases
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class redfish10_api_task_suite(fit_common.unittest.TestCase):
def test_redfish_v1_taskservice_tasklist(self):
# The API /redfish/v1/TaskService will display the list of tasks in the system
tasklist = []
# This test checks the TaskService API
if fit_common.VERBOSITY >= 2:
msg = "Description: Get TaskService data"
print("\t{0}".format(msg))
on_url = "/redfish/v1/TaskService"
on_data = fit_common.rackhdapi(url_cmd=on_url)
self.assertEqual(on_data['status'], 200,
"Incorrect HTTP return code, expecting 200, received ".format(on_data['status']))
for item in ["CompletedTaskOverWritePolicy", "DateTime", "Id", "LifeCycleEventOnTaskStateChange",
"Name", "ServiceEnabled", "Status", "Tasks"]:
if fit_common.VERBOSITY >= 2:
print "Checking:", item
self.assertIn(item, on_data['json'], item + ' field not present')
# Check a few fields
self.assertEqual(on_data['json']['Id'], "TaskService",
"Id field set incorrectly in response data: {0}".format(on_data['json']['Id']))
writepolicy = on_data['json']['CompletedTaskOverWritePolicy']
if fit_common.VERBOSITY >= 2:
print "Checking CompletedTasksOverWritePolicy field"
self.assertIn(writepolicy, ["Manual", "Oldest"], "Unexpected policy per specification {0}".format(writepolicy))
tasklist = []
members = on_data['json']['Tasks']['Members']
for member in members:
tasklist.append(member['Id'])
if fit_common.VERBOSITY >= 2:
print ("Task Service contains {0} tasks.".format(len(tasklist)))
def test_redfish_v1_taskservice_check_all_tasks(self):
# The API TaskService/Tasks will display a list of all the tasks
# that were run and are active in the system. This includes tasks for everything,
# managed and unmanaged nodes and non-node specific tasks.
tasklist = []
if fit_common.VERBOSITY >= 2:
msg = "Description: Display the list of Tasks in the System."
print("\n\t{0}".format(msg))
on_data = fit_common.rackhdapi('/redfish/v1/TaskService/Tasks')
self.assertIn(on_data['status'], [200], "Incorrect HTTP return code")
for item in ["@odata.id", "Name", "@odata.type"]:
if fit_common.VERBOSITY >= 2:
print "Checking:", item
self.assertIn(item, on_data['json'], item + ' field not present')
if item not in ["Members@odata.count", "Members"]:
self.assertGreater(len(on_data['json'][item]), 0,
"Required field {0} empty".format(item))
tasklist = []
if fit_common.VERBOSITY >= 2:
print("\tTaskIDs: ")
members = on_data['json']["Members"]
for member in members:
taskid = member['Id']
tasklist.append(taskid)
if fit_common.VERBOSITY >= 2:
print(taskid)
self.assertNotEqual(tasklist, [], 'No Tasks listed in system.')
#Check reported Member count equals number of task ids in the list
membercount = int(on_data['json']['Members@odata.count'])
listcount = len(tasklist)
self.assertEqual(membercount, listcount,
"Reported member count of {0} not equal length of tasklist {1}".format(membercount, listcount))
if fit_common.VERBOSITY >= 2:
print "\tNumber of tasks in the system", membercount
def test_redfish_v1_taskservice_tasks_per_node(self):
# The API TaskService/Oem/Tasks/<systemid> will display a list of all tasks that
# are associated with the specified node id for managed systems.
# if verbose
if fit_common.VERBOSITY >= 2:
msg = "Description: Display the list of Tasks for each System"
print("\n\t{0}".format(msg))
nodelist = fit_common.node_select()
if fit_common.VERBOSITY >= 2:
print "Nodelist: "
print json.dumps(nodelist, indent=4)
self.assertNotEqual(nodelist, [], 'No Nodes reported for this stack.')
for node in nodelist:
tasklist = get_node_tasklist(node)
self.assertNotEqual(tasklist, [], 'No Tasks listed for node.')
for taskid in tasklist:
taskdata = get_taskid_data(taskid)
if fit_common.VERBOSITY >= 2:
print_taskid_data(taskid, taskdata)
def test_redfish_v1_taskservice_task_count_per_node(self):
# The API /redfish/v1/TaskService/Oem/Tasks/<id> will check the count for each list of tasks
# associated with all node ids.
if fit_common.VERBOSITY >= 2:
msg = "Description: Check the reported task count in the list of Tasks for each System"
print("\n\t{0}".format(msg))
nodelist = fit_common.node_select()
self.assertNotEqual(nodelist, [], 'No Nodes reported for this stack.')
for node in nodelist:
on_url = "/redfish/v1/TaskService/Oem/Tasks/"+node
on_data = fit_common.rackhdapi(url_cmd=on_url)
tasklist = []
if on_data['status'] == 200:
members = on_data['json']["Members"]
for member in members:
taskid = member['Id']
tasklist.append(taskid)
taskcount = int(on_data['json']['Members@odata.count'])
listcount = len(tasklist)
self.assertEqual(taskcount, listcount,
"Reported task count {0} not equal length of tasklist {1}".format(taskcount, listcount))
if fit_common.VERBOSITY >= 2:
print("\tNodeID: {0} Number of tasks reported {1}".format(node, taskcount))
def test_redfish_v1_taskservice_check_task_data_fields(self):
# The API TaskSerive/Tasks will display the task data associated with the specified task.
if fit_common.VERBOSITY >= 2:
msg = "Description: Display the data for each taskid contained in the System."
print("\n\t{0}".format(msg))
tasklist = get_task_srv_tasklist()
self.assertNotEqual(tasklist, [], 'No Tasks found in the system')
for task in tasklist:
on_data = fit_common.rackhdapi('/redfish/v1/TaskService/Tasks/'+task)
self.assertIn(on_data['status'], [200], "Incorrect HTTP return code")
# check if required fields exist
for item in ["@odata.id", "Name", "@odata.type", "TaskState", "TaskStatus", "StartTime", "Id"]:
if fit_common.VERBOSITY >= 2:
print ("Task: {} Checking: {}".format(task,item))
self.assertIn(item, on_data['json'], item + ' field not present')
# check if task completed, endtime should be populated
taskstates = ["Completed", "Exception", "Killed"]
taskstate = on_data['json'].get('TaskState', "")
if taskstate in taskstates:
for item in ["EndTime"]:
if fit_common.VERBOSITY >= 2:
print ("Task: {} Checking: {}".format(task,item))
self.assertIn(item, on_data['json'], item + ' field not present')
if fit_common.VERBOSITY >= 3:
print_taskid_data(task, on_data['json'])
def test_redfish_v1_taskservice_check_task_return_status_validity(self):
# Check the return status in the tasks to be in the valid list
# Mapping of RackHD 1.1 to Redfish v1 status is:
# running : Running
# succeeded : Completed
# finished : Completed
# failed : Exception
# timeout : Exception
# cancelled : Killed
# pending : Pending
status = []
def task_code_check(tasklist):
# Following is list as stated by OnRack developers
validtaskstatus = ["Running", "Pending", "Completed", "Exception", "Killed"]
#validtaskstatus = ["Running", "Cancelled", "Aborted", "Completed", "Exception", "Killed", "Pending"]
# Following is list defined in Redfish specs
#validtaskstatus = ["New", "Starting", "Running", "Suspended", "Interrupted", "Pending",
# "Stopping", "Completed", "Killed", "Exception", "Service"]
#Enumeration Description for TaskStates from Redfish 1.0.0 spec:
#New: A new task
#Starting: Task is starting
#Running: Task is running normally
#Suspended: Task has been suspended
#Interrupted: Task has been interrupted
#Pending: Task is pending and has not started
#Stopping: Task is in the process of stopping
#Completed: Task has completed
#Killed: Task was terminated
#Exception: Task has stopped due to an exception condition
#Service: Task is running as a service
errorlist = []
if fit_common.VERBOSITY >= 2:
print("\tValid Task States per Redfish 1.0 {0}".format(validtaskstatus))
# Check the task id task state is in list of valid task status codes
for task in tasklist:
on_data = fit_common.rackhdapi('/redfish/v1/TaskService/Tasks/'+task)
if on_data['status'] != 200:
errorlist.append("TaskId: {} Incorrect HTTP return code, expecting 200, received {}".format(task,on_data['status']))
if on_data['json']['TaskState'] not in validtaskstatus:
print_taskid_data(task, on_data['json'])
errorlist.append("TaskID: {} Invalid Task State of : {}".format(task,on_data['json']['TaskState']))
return errorlist
if fit_common.VERBOSITY >= 2:
msg = "Description: Check the return status codes are in list of valid status"
print("\n\t{0}".format(msg))
tasklist = get_task_srv_tasklist()
self.assertNotEqual(tasklist, [], 'No Tasks listed.')
status = task_code_check(tasklist)
if status != []:
print ("Errors reported {} ".format(json.dumps(status,indent=4)))
self.assertEqual(status, [], "Errors in Returned Task Status.")
def test_redfish_v1_taskservice_check_library_test_list(self):
# Return the task list libary from rackhd
if fit_common.VERBOSITY >= 2:
msg = "Description: Get list of supported tasks via monorail workflow task library"
print("\n\t{0}".format(msg))
supported_tasks = []
get_task_url = "/api/1.1/workflows/tasks/library"
mon_data = fit_common.rackhdapi(url_cmd=get_task_url)
if mon_data['status'] != 200:
print 'No data returned from monorail, status = {0}'.format(mon_data['status'])
else:
for task in mon_data['json']:
# handle key error if injectableName not in json
if task.get('injectableName') != None:
supported_tasks.append(task['injectableName'])
self.assertNotEqual(supported_tasks, [], 'No tasks listed in task library.')
if fit_common.VERBOSITY >= 2:
for key in supported_tasks:
print("Key: {}".format(key))
if __name__ == '__main__':
fit_common.unittest.main()
|
|
from xmltodict import parse, ParsingInterrupted
import collections
import unittest
try:
from io import BytesIO as StringIO
except ImportError:
from xmltodict import StringIO
from xml.parsers.expat import ParserCreate
from xml.parsers import expat
def _encode(s):
try:
return bytes(s, 'ascii')
except (NameError, TypeError):
return s
class XMLToDictTestCase(unittest.TestCase):
def test_string_vs_file(self):
xml = '<a>data</a>'
self.assertEqual(parse(xml),
parse(StringIO(_encode(xml))))
def test_minimal(self):
self.assertEqual(parse('<a/>'),
{'a': None})
self.assertEqual(parse('<a/>', force_cdata=True),
{'a': None})
def test_simple(self):
self.assertEqual(parse('<a>data</a>'),
{'a': 'data'})
def test_force_cdata(self):
self.assertEqual(parse('<a>data</a>', force_cdata=True),
{'a': {'#text': 'data'}})
def test_custom_cdata(self):
self.assertEqual(parse('<a>data</a>',
force_cdata=True,
cdata_key='_CDATA_'),
{'a': {'_CDATA_': 'data'}})
def test_list(self):
self.assertEqual(parse('<a><b>1</b><b>2</b><b>3</b></a>'),
{'a': {'b': ['1', '2', '3']}})
def test_attrib(self):
self.assertEqual(parse('<a href="xyz"/>'),
{'a': {'@href': 'xyz'}})
def test_skip_attrib(self):
self.assertEqual(parse('<a href="xyz"/>', xml_attribs=False),
{'a': None})
def test_custom_attrib(self):
self.assertEqual(parse('<a href="xyz"/>',
attr_prefix='!'),
{'a': {'!href': 'xyz'}})
def test_attrib_and_cdata(self):
self.assertEqual(parse('<a href="xyz">123</a>'),
{'a': {'@href': 'xyz', '#text': '123'}})
def test_semi_structured(self):
self.assertEqual(parse('<a>abc<b/>def</a>'),
{'a': {'b': None, '#text': 'abcdef'}})
self.assertEqual(parse('<a>abc<b/>def</a>',
cdata_separator='\n'),
{'a': {'b': None, '#text': 'abc\ndef'}})
def test_nested_semi_structured(self):
self.assertEqual(parse('<a>abc<b>123<c/>456</b>def</a>'),
{'a': {'#text': 'abcdef', 'b': {
'#text': '123456', 'c': None}}})
def test_skip_whitespace(self):
xml = """
<root>
<emptya> </emptya>
<emptyb attr="attrvalue">
</emptyb>
<value>hello</value>
</root>
"""
self.assertEqual(
parse(xml),
{'root': {'emptya': None,
'emptyb': {'@attr': 'attrvalue'},
'value': 'hello'}})
def test_keep_whitespace(self):
xml = "<root> </root>"
self.assertEqual(parse(xml), dict(root=None))
self.assertEqual(parse(xml, strip_whitespace=False),
dict(root=' '))
def test_streaming(self):
def cb(path, item):
cb.count += 1
self.assertEqual(path, [('a', {'x': 'y'}), ('b', None)])
self.assertEqual(item, str(cb.count))
return True
cb.count = 0
parse('<a x="y"><b>1</b><b>2</b><b>3</b></a>',
item_depth=2, item_callback=cb)
self.assertEqual(cb.count, 3)
def test_streaming_interrupt(self):
cb = lambda path, item: False
self.assertRaises(ParsingInterrupted,
parse, '<a>x</a>',
item_depth=1, item_callback=cb)
def test_streaming_generator(self):
def cb(path, item):
cb.count += 1
self.assertEqual(path, [('a', {'x': 'y'}), ('b', None)])
self.assertEqual(item, str(cb.count))
return True
cb.count = 0
parse((n for n in '<a x="y"><b>1</b><b>2</b><b>3</b></a>'),
item_depth=2, item_callback=cb)
self.assertEqual(cb.count, 3)
def test_postprocessor(self):
def postprocessor(path, key, value):
try:
return key + ':int', int(value)
except (ValueError, TypeError):
return key, value
self.assertEqual({'a': {'b:int': [1, 2], 'b': 'x'}},
parse('<a><b>1</b><b>2</b><b>x</b></a>',
postprocessor=postprocessor))
def test_postprocessor_attribute(self):
def postprocessor(path, key, value):
try:
return key + ':int', int(value)
except (ValueError, TypeError):
return key, value
self.assertEqual({'a': {'@b:int': 1}},
parse('<a b="1"/>',
postprocessor=postprocessor))
def test_postprocessor_skip(self):
def postprocessor(path, key, value):
if key == 'b':
value = int(value)
if value == 3:
return None
return key, value
self.assertEqual({'a': {'b': [1, 2]}},
parse('<a><b>1</b><b>2</b><b>3</b></a>',
postprocessor=postprocessor))
def test_unicode(self):
try:
value = unichr(39321)
except NameError:
value = chr(39321)
self.assertEqual({'a': value},
parse('<a>%s</a>' % value))
def test_encoded_string(self):
try:
value = unichr(39321)
except NameError:
value = chr(39321)
xml = '<a>%s</a>' % value
self.assertEqual(parse(xml),
parse(xml.encode('utf-8')))
def test_namespace_support(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x a:attr="val">1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
d = {
'http://defaultns.com/:root': {
'http://defaultns.com/:x': {
'@xmlns': {
'': 'http://defaultns.com/',
'a': 'http://a.com/',
'b': 'http://b.com/',
},
'@http://a.com/:attr': 'val',
'#text': '1',
},
'http://a.com/:y': '2',
'http://b.com/:z': '3',
}
}
res = parse(xml, process_namespaces=True)
self.assertEqual(res, d)
def test_namespace_collapse(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x a:attr="val">1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
namespaces = {
'http://defaultns.com/': '',
'http://a.com/': 'ns_a',
}
d = {
'root': {
'x': {
'@xmlns': {
'': 'http://defaultns.com/',
'a': 'http://a.com/',
'b': 'http://b.com/',
},
'@ns_a:attr': 'val',
'#text': '1',
},
'ns_a:y': '2',
'http://b.com/:z': '3',
},
}
res = parse(xml, process_namespaces=True, namespaces=namespaces)
self.assertEqual(res, d)
def test_namespace_collapse_all(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x a:attr="val">1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
namespaces = collections.defaultdict(lambda: None)
d = {
'root': {
'x': {
'@xmlns': {
'': 'http://defaultns.com/',
'a': 'http://a.com/',
'b': 'http://b.com/',
},
'@attr': 'val',
'#text': '1',
},
'y': '2',
'z': '3',
},
}
res = parse(xml, process_namespaces=True, namespaces=namespaces)
self.assertEqual(res, d)
def test_namespace_ignore(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x>1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
d = {
'root': {
'@xmlns': 'http://defaultns.com/',
'@xmlns:a': 'http://a.com/',
'@xmlns:b': 'http://b.com/',
'x': '1',
'a:y': '2',
'b:z': '3',
},
}
self.assertEqual(parse(xml), d)
def test_force_list_basic(self):
xml = """
<servers>
<server>
<name>server1</name>
<os>os1</os>
</server>
</servers>
"""
expectedResult = {
'servers': {
'server': [
{
'name': 'server1',
'os': 'os1',
},
],
}
}
self.assertEqual(parse(xml, force_list=('server',)), expectedResult)
def test_force_list_callable(self):
xml = """
<config>
<servers>
<server>
<name>server1</name>
<os>os1</os>
</server>
</servers>
<skip>
<server></server>
</skip>
</config>
"""
def force_list(path, key, value):
"""Only return True for servers/server, but not for skip/server."""
if key != 'server':
return False
return path and path[-1][0] == 'servers'
expectedResult = {
'config': {
'servers': {
'server': [
{
'name': 'server1',
'os': 'os1',
},
],
},
'skip': {
'server': None,
},
},
}
self.assertEqual(parse(xml, force_list=force_list, dict_constructor=dict), expectedResult)
def test_disable_entities_true_ignores_xmlbomb(self):
xml = """
<!DOCTYPE xmlbomb [
<!ENTITY a "1234567890" >
<!ENTITY b "&a;&a;&a;&a;&a;&a;&a;&a;">
<!ENTITY c "&b;&b;&b;&b;&b;&b;&b;&b;">
]>
<bomb>&c;</bomb>
"""
expectedResult = {'bomb': None}
try:
parse_attempt = parse(xml, disable_entities=True)
except expat.ExpatError:
self.assertTrue(True)
else:
self.assertEqual(parse_attempt, expectedResult)
def test_disable_entities_false_returns_xmlbomb(self):
xml = """
<!DOCTYPE xmlbomb [
<!ENTITY a "1234567890" >
<!ENTITY b "&a;&a;&a;&a;&a;&a;&a;&a;">
<!ENTITY c "&b;&b;&b;&b;&b;&b;&b;&b;">
]>
<bomb>&c;</bomb>
"""
bomb = "1234567890" * 64
expectedResult = {'bomb': bomb}
self.assertEqual(parse(xml, disable_entities=False), expectedResult)
def test_disable_entities_true_ignores_external_dtd(self):
xml = """
<!DOCTYPE external [
<!ENTITY ee SYSTEM "http://www.python.org/">
]>
<root>ⅇ</root>
"""
expectedResult = {'root': None}
try:
parse_attempt = parse(xml, disable_entities=True)
except expat.ExpatError:
self.assertTrue(True)
else:
self.assertEqual(parse_attempt, expectedResult)
def test_disable_entities_true_attempts_external_dtd(self):
xml = """
<!DOCTYPE external [
<!ENTITY ee SYSTEM "http://www.python.org/">
]>
<root>ⅇ</root>
"""
def raising_external_ref_handler(*args, **kwargs):
parser = ParserCreate(*args, **kwargs)
parser.ExternalEntityRefHandler = lambda *x: 0
try:
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
pass
return parser
expat.ParserCreate = raising_external_ref_handler
# Using this try/catch because a TypeError is thrown before
# the ExpatError, and Python 2.6 is confused by that.
try:
parse(xml, disable_entities=False, expat=expat)
except expat.ExpatError:
self.assertTrue(True)
else:
self.assertTrue(False)
expat.ParserCreate = ParserCreate
def test_comments(self):
xml = """
<a>
<b>
<!-- b comment -->
<c>
<!-- c comment -->
1
</c>
<d>2</d>
</b>
</a>
"""
expectedResult = {
'a': {
'b': {
'#comment': 'b comment',
'c': {
'#comment': 'c comment',
'#text': '1',
},
'd': '2',
},
}
}
self.assertEqual(parse(xml, process_comments=True), expectedResult)
|
|
from itertools import product
import glob
import os
import os.path as op
import pickle
import shutil
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal)
import pytest
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_adjacency,
read_surface, random_parcellation, morph_labels,
labels_to_stc)
from mne.label import (Label, _blend_colors, label_sign_flip, _load_vert_pos,
select_sources)
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, check_version)
from mne.label import _n_colors
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, str):
subject = src
else:
subject = stc.subject
if isinstance(src, str):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
"""Assert two labels are equal."""
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying."""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction."""
label = read_label(label_fname)
assert label.subject is None
assert ('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert (label.subject == 'fsaverage')
assert ('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition."""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
pytest.raises(ValueError, l_good.__add__, l_bad)
pytest.raises(TypeError, l_good.__add__, 'foo')
pytest.raises(ValueError, l_good.__sub__, l_bad)
pytest.raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlapping labels
l02 = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l02.vertices == 6)[0][0]
assert_equal(l02.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l02.values[0], l0.values[0])
assert_array_equal(np.unique(l02.vertices), np.unique(idx0 + idx2))
assert_equal(l02.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l02.color)
assert ('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
pytest.raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
@pytest.mark.parametrize('fname', (real_label_fname, v1_label_fname))
def test_label_fill_restrict(fname):
"""Test label in fill and restrict."""
src = read_source_spaces(src_fname)
label = read_label(fname)
# construct label from source space vertices
label_src = label.restrict(src)
vert_in_src = label_src.vertices
values_in_src = label_src.values
if check_version('scipy', '1.3') and fname == real_label_fname:
# Check that we can auto-fill patch info quickly for one condition
for s in src:
s['nearest'] = None
with pytest.warns(None):
label_src = label_src.fill(src)
else:
label_src = label_src.fill(src)
assert src[0]['nearest'] is not None
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
with pytest.raises(ValueError, match='does not contain all of the label'):
Label(vertices, hemi='lh').fill(src)
# test filling empty label
label = Label([], hemi='lh')
label.fill(src)
assert_array_equal(label.vertices, np.array([], int))
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files."""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert (len(stc_label.times) == stc_label.data.shape[1])
assert (len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files."""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert label.subject is None
assert label.color is None
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Ensure two sets of labels are equal."""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert (label_a.name == label_b.name)
assert (label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files."""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
with pytest.raises(IOError, match='\nPALS_B12_Lobes$'):
read_labels_from_annot(subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [label for label in labels if label.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [label for label in parc1 if not label.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for lt, rt in zip(parc1, parc):
assert_labels_equal(lt, rt)
# test saving only one hemisphere
parc = [label for label in labels if label.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert os.path.isfile(annot_fname % 'l')
assert not os.path.isfile(annot_fname % 'r')
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [label for label in parc if label.name.endswith('lh')]
for lt, rt in zip(parc1, parc_lh):
assert_labels_equal(lt, rt)
# test that the annotation is complete (test Label() support)
rr = read_surface(op.join(surf_dir, 'lh.white'))[0]
label = sum(labels, Label(hemi='lh', subject='fsaverage')).lh
assert_array_equal(label.vertices, np.arange(len(rr)))
@testing.requires_testing_data
def test_morph_labels():
"""Test morph_labels."""
# Just process the first 5 labels for speed
parc_fsaverage = read_labels_from_annot(
'fsaverage', 'aparc', subjects_dir=subjects_dir)[:5]
parc_sample = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)[:5]
parc_fssamp = morph_labels(
parc_fsaverage, 'sample', subjects_dir=subjects_dir)
for lf, ls, lfs in zip(parc_fsaverage, parc_sample, parc_fssamp):
assert lf.hemi == ls.hemi == lfs.hemi
assert lf.name == ls.name == lfs.name
perc_1 = np.in1d(lfs.vertices, ls.vertices).mean() * 100
perc_2 = np.in1d(ls.vertices, lfs.vertices).mean() * 100
# Ideally this would be 100%, but we do not use the same algorithm
# as FreeSurfer ...
assert perc_1 > 92
assert perc_2 > 88
with pytest.raises(ValueError, match='wrong and fsaverage'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir,
subject_from='wrong')
with pytest.raises(RuntimeError, match='Number of surface vertices'):
_load_vert_pos('sample', subjects_dir, 'white', 'lh', 1)
for label in parc_fsaverage:
label.subject = None
with pytest.raises(ValueError, match='subject_from must be provided'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_labels_to_stc():
"""Test labels_to_stc."""
labels = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)
values = np.random.RandomState(0).randn(len(labels))
with pytest.raises(ValueError, match='1 or 2 dim'):
labels_to_stc(labels, values[:, np.newaxis, np.newaxis])
with pytest.raises(ValueError, match=r'values\.shape'):
labels_to_stc(labels, values[np.newaxis])
stc = labels_to_stc(labels, values)
for value, label in zip(values, labels):
stc_label = stc.in_label(label)
assert (stc_label.data == value).all()
stc = read_source_estimate(stc_fname, 'sample')
@testing.requires_testing_data
def test_read_labels_from_annot():
"""Test reading labels from FreeSurfer parcellation."""
# test some invalid inputs
pytest.raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
pytest.raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert label.name.endswith('-lh')
assert label.hemi == 'lh'
assert label.color is not None
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert label.name.endswith('-rh')
assert label.hemi == 'rh'
assert label.color is not None
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert (len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert (label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert (label.name == 'G_oc-temp_med-Lingual-lh')
pytest.raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels."""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels."""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test1')))
assert (op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test2')))
assert (op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test3')))
assert (op.exists(dst % ('rh', 'test3')))
# label alone
pytest.raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
for fname in fnames:
with pytest.warns(RuntimeWarning, match='subjects_dir'):
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, 'sample', annot_fname=fname,
overwrite=True, subjects_dir=subjects_dir)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
pytest.raises(ValueError, write_labels_to_annot, labels, 'sample',
annot_fname=fnames[0], subjects_dir=subjects_dir)
# however, this works
write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# duplicate color
labels_[0].color = labels_[2].color
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert (np.all(np.in1d(label0.vertices, label1.vertices)))
# unnamed labels
labels4 = labels[:]
labels4[0].name = None
pytest.raises(ValueError, write_labels_to_annot, labels4,
annot_fname=fnames[0])
@requires_sklearn
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels."""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# Test input error
pytest.raises(ValueError, lingual.split, 'bad_input_string')
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
# Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
'lh.7Networks_7.label'))
DMN_sublabels = label_default_mode.split(parts='contiguous',
subject='fsaverage',
subjects_dir=subjects_dir)
assert_equal([len(label.vertices) for label in DMN_sublabels],
[16181, 7022, 5965, 5300, 823] + [1] * 23)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label."""
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with pytest.warns(RuntimeWarning, match='have holes'):
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
pytest.raises(ValueError, spatial_tris_adjacency, tris,
remap_vertices=False)
adjacency = spatial_tris_adjacency(tris, remap_vertices=True)
assert (adjacency.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert (len(labels_lh) > 1)
assert (len(labels_rh) > 1)
# with smooth='patch'
with pytest.warns(RuntimeWarning, match='have holes'):
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert len(labels_patch) == len(labels1)
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing."""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
pytest.raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
assert (np.in1d(label_orig.vertices, label.vertices).all())
assert (len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
with pytest.warns(None): # morph map maybe missing
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
with pytest.warns(None): # morph map maybe missing
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels."""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert (np.any(label.vertices == seed))
assert (np.all(np.in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
@testing.requires_testing_data
def test_random_parcellation():
"""Test generation of random cortical parcellation."""
hemi = 'both'
n_parcel = 50
surface = 'sphere.reg'
subject = 'sample_ds'
rng = np.random.RandomState(0)
# Parcellation
labels = random_parcellation(subject, n_parcel, hemi, subjects_dir,
surface=surface, random_state=rng)
# test number of labels
assert_equal(len(labels), n_parcel)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
for hemi in set(hemis):
vertices_total = []
for label in labels:
if label.hemi == hemi:
# test that labels are not empty
assert (len(label.vertices) > 0)
# vertices of hemi covered by labels
vertices_total = np.append(vertices_total, label.vertices)
# test that labels don't intersect
assert_equal(len(np.unique(vertices_total)), len(vertices_total))
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert, _ = read_surface(surf_fname)
# Test that labels cover whole surface
assert_array_equal(np.sort(vertices_total), np.arange(len(vert)))
@testing.requires_testing_data
def test_label_sign_flip():
"""Test label sign flip computation."""
src = read_source_spaces(src_fname)
label = Label(vertices=src[0]['vertno'][:5], hemi='lh')
src[0]['nn'][label.vertices] = np.array(
[[1., 0., 0.],
[0., 1., 0.],
[0, 0, 1.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.]])
known_flips = np.array([1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4] # indices that are usable (third row is orthognoal)
flip = label_sign_flip(label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
bi_label = label + Label(vertices=src[1]['vertno'][:5], hemi='rh')
src[1]['nn'][src[1]['vertno'][:5]] = -src[0]['nn'][label.vertices]
flip = label_sign_flip(bi_label, src)
known_flips = np.array([1, 1, np.nan, 1, 1, 1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4, 5, 6, 8, 9]
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), 0.)
src[1]['nn'][src[1]['vertno'][:5]] *= -1
flip = label_sign_flip(bi_label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
@testing.requires_testing_data
def test_label_center_of_mass():
"""Test computing the center of mass of a label."""
stc = read_source_estimate(stc_fname)
stc.lh_data[:] = 0
vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
assert_equal(vertex_stc, 124791)
label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
hemi='rh', subject='sample')
vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
assert_equal(vertex_label, vertex_stc)
labels = read_labels_from_annot('sample', parc='aparc.a2009s',
subjects_dir=subjects_dir)
src = read_source_spaces(src_fname)
# Try a couple of random ones, one from left and one from right
# Visually verified in about the right place using mne_analyze
for label, expected in zip([labels[2], labels[3], labels[-5]],
[141162, 145221, 55979]):
label.values[:] = -1
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 0
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 1
assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=label.vertices),
expected)
# restrict to source space
idx = 0 if label.hemi == 'lh' else 1
# this simple nearest version is not equivalent, but is probably
# close enough for many labels (including the test ones):
pos = label.pos[np.where(label.vertices == expected)[0][0]]
pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
pos = np.argmin(np.sum(pos * pos, axis=1))
src_expected = src[idx]['vertno'][pos]
# see if we actually get the same one
src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src_restrict),
src_expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src),
src_expected)
# degenerate cases
pytest.raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
restrict_vertices='foo')
pytest.raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
surf=1)
pytest.raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
surf='foo')
run_tests_if_main()
@testing.requires_testing_data
def test_select_sources():
"""Test the selection of sources for simulation."""
subject = 'sample'
label_file = op.join(subjects_dir, subject, 'label', 'aparc',
'temporalpole-rh.label')
# Regardless of other parameters, using extent 0 should always yield a
# a single source.
tp_label = read_label(label_file)
tp_label.values[:] = 1
labels = ['lh', tp_label]
locations = ['random', 'center']
for label, location in product(labels, locations):
label = select_sources(
subject, label, location, extent=0, subjects_dir=subjects_dir)
assert (len(label.vertices) == 1)
# As we increase the extent, the new region should contain the previous
# one.
label = select_sources(subject, 'lh', 0, extent=0,
subjects_dir=subjects_dir)
for extent in range(1, 3):
new_label = select_sources(subject, 'lh', 0, extent=extent * 2,
subjects_dir=subjects_dir)
assert (set(new_label.vertices) > set(label.vertices))
assert (new_label.hemi == 'lh')
label = new_label
# With a large enough extent and not allowing growing outside the label,
# every vertex of the label should be in the region.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=False, subjects_dir=subjects_dir)
assert (set(label.vertices) == set(tp_label.vertices))
# Without this restriction, we should get new vertices.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=True, subjects_dir=subjects_dir)
assert (set(label.vertices) > set(tp_label.vertices))
# Other parameters are taken into account.
label = select_sources(subject, tp_label, 0, extent=10,
grow_outside=False, subjects_dir=subjects_dir,
name='mne')
assert (label.name == 'mne')
assert (label.hemi == 'rh')
|
|
import os
import re
import sys
import time
import logging
import subprocess
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401,I0011
from urllib.request import build_opener
from urllib.error import HTTPError, URLError
from urllib.parse import parse_qs, urlparse
uni, pyver = str, 3
else:
from urllib2 import build_opener, HTTPError, URLError
from urlparse import parse_qs, urlparse
uni, pyver = unicode, 2
early_py_version = sys.version_info[:2] < (2, 7)
from . import __version__, g
from .pafy import call_gdata
from .playlist import get_playlist2
from .util import xenc
dbg = logging.debug
def extract_video_id(url):
""" Extract the video id from a url, return video id as str. """
idregx = re.compile(r'[\w-]{11}$')
url = str(url)
if idregx.match(url):
return url # ID of video
if '://' not in url:
url = '//' + url
parsedurl = urlparse(url)
if parsedurl.netloc in ('youtube.com', 'www.youtube.com', 'm.youtube.com', 'gaming.youtube.com'):
query = parse_qs(parsedurl.query)
if 'v' in query and idregx.match(query['v'][0]):
return query['v'][0]
elif parsedurl.netloc in ('youtu.be', 'www.youtu.be'):
vidid = parsedurl.path.split('/')[-1] if parsedurl.path else ''
if idregx.match(vidid):
return vidid
err = "Need 11 character video id or the URL of the video. Got %s"
raise ValueError(err % url)
class BasePafy(object):
""" Class to represent a YouTube video. """
def __init__(self, video_url, basic=True, gdata=False,
size=False, callback=None, ydl_opts=None):
""" Set initial values. """
self.version = __version__
self.videoid = extract_video_id(video_url)
self.watchv_url = g.urls['watchv'] % self.videoid
self.callback = callback or (lambda x: None)
self._have_basic = False
self._have_gdata = False
self._description = None
self._likes = None
self._dislikes = None
self._category = None
self._published = None
self._username = None
self._streams = []
self._oggstreams = []
self._m4astreams = []
self._allstreams = []
self._videostreams = []
self._audiostreams = []
self._title = None
self._rating = None
self._length = None
self._author = None
self._duration = None
self._keywords = None
self._bigthumb = None
self._viewcount = None
self._bigthumbhd = None
self._mix_pl = None
self.expiry = None
if basic:
self._fetch_basic()
if gdata:
self._fetch_gdata()
if size:
for s in self.allstreams:
# pylint: disable=W0104
s.get_filesize()
def _fetch_basic(self):
""" Fetch basic data and streams. """
raise NotImplementedError
def _fetch_gdata(self):
""" Extract gdata values, fetch gdata if necessary. """
raise NotImplementedError
def _get_video_gdata(self, video_id):
""" Return json string containing video metadata from gdata api. """
self.callback("Fetching video gdata")
query = {'part': 'id,snippet,statistics',
'maxResults': 1,
'id': video_id}
gdata = call_gdata('videos', query)
dbg("Fetched video gdata")
self.callback("Fetched video gdata")
return gdata
def _process_streams(self):
""" Create Stream object lists from internal stream maps. """
raise NotImplementedError
def __repr__(self):
""" Print video metadata. Return utf8 string. """
if self._have_basic:
keys = "Title Author ID Duration Rating Views Thumbnail"
keys = keys.split(" ")
keywords = ", ".join(self.keywords)
info = {"Title": self.title,
"Author": self.author,
"Views": self.viewcount,
"Rating": self.rating,
"Duration": self.duration,
"ID": self.videoid,
"Thumbnail": self.thumb}
nfo = "\n".join(["%s: %s" % (k, info.get(k, "")) for k in keys])
else:
nfo = "Pafy object: %s [%s]" % (self.videoid,
self.title[:45] + "..")
return nfo.encode("utf8", "replace") if pyver == 2 else nfo
@property
def streams(self):
""" The streams for a video. Returns list."""
if not self._streams:
self._process_streams()
return self._streams
@property
def allstreams(self):
""" All stream types for a video. Returns list. """
if not self._allstreams:
self._process_streams()
return self._allstreams
@property
def audiostreams(self):
""" Return a list of audio Stream objects. """
if not self._audiostreams:
self._process_streams()
return self._audiostreams
@property
def videostreams(self):
""" The video streams for a video. Returns list. """
if not self._videostreams:
self._process_streams()
return self._videostreams
@property
def oggstreams(self):
""" Return a list of ogg encoded Stream objects. """
if not self._oggstreams:
self._process_streams()
return self._oggstreams
@property
def m4astreams(self):
""" Return a list of m4a encoded Stream objects. """
if not self._m4astreams:
self._process_streams()
return self._m4astreams
@property
def title(self):
""" Return YouTube video title as a string. """
if not self._title:
self._fetch_basic()
return self._title
@property
def author(self):
""" The uploader of the video. Returns str. """
if not self._author:
self._fetch_basic()
return self._author
@property
def rating(self):
""" Rating for a video. Returns float. """
if not self._rating:
self._fetch_basic()
return self._rating
@property
def length(self):
""" Length of a video in seconds. Returns int. """
if not self._length:
self._fetch_basic()
return self._length
@property
def viewcount(self):
""" Number of views for a video. Returns int. """
if not self._viewcount:
self._fetch_basic()
return self._viewcount
@property
def bigthumb(self):
""" Large thumbnail image url. Returns str. """
self._fetch_basic()
return self._bigthumb
@property
def bigthumbhd(self):
""" Extra large thumbnail image url. Returns str. """
self._fetch_basic()
return self._bigthumbhd
@property
def thumb(self):
""" Thumbnail image url. Returns str. """
return g.urls['thumb'] % self.videoid
@property
def duration(self):
""" Duration of a video (HH:MM:SS). Returns str. """
if not self._length:
self._fetch_basic()
self._duration = time.strftime('%H:%M:%S', time.gmtime(self._length))
self._duration = uni(self._duration)
return self._duration
@property
def keywords(self):
""" Return keywords as list of str. """
if not self._keywords:
self._fetch_gdata()
return self._keywords
@property
def category(self):
""" YouTube category of the video. Returns string. """
if not self._category:
self._fetch_gdata()
return self._category
@property
def description(self):
""" Description of the video. Returns string. """
if not self._description:
self._fetch_gdata()
return self._description
@property
def username(self):
""" Return the username of the uploader. """
if not self._username:
self._fetch_basic()
return self._username
@property
def published(self):
""" The upload date and time of the video. Returns string. """
if not self._published:
self._fetch_gdata()
return self._published.replace(".000Z", "").replace("T", " ")
@property
def likes(self):
""" The number of likes for the video. Returns int. """
if not self._likes:
self._fetch_basic()
return self._likes
@property
def dislikes(self):
""" The number of dislikes for the video. Returns int. """
if not self._dislikes:
self._fetch_basic()
return self._dislikes
@property
def mix(self):
""" The playlist for the related YouTube mix. Returns a Playlist object. """
if self._mix_pl is None:
try:
self._mix_pl = get_playlist2("RD" + self.videoid)
except IOError:
return None
return self._mix_pl
def _getbest(self, preftype="any", ftypestrict=True, vidonly=False):
"""
Return the highest resolution video available.
Select from video-only streams if vidonly is True
"""
streams = self.videostreams if vidonly else self.streams
if not streams:
return None
def _sortkey(x, key3d=0, keyres=0, keyftype=0):
""" sort function for max(). """
key3d = "3D" not in x.resolution
keyres = int(x.resolution.split("x")[0])
keyftype = preftype == x.extension
strict = (key3d, keyftype, keyres)
nonstrict = (key3d, keyres, keyftype)
return strict if ftypestrict else nonstrict
r = max(streams, key=_sortkey)
if ftypestrict and preftype != "any" and r.extension != preftype:
return None
else:
return r
def getbestvideo(self, preftype="any", ftypestrict=True):
"""
Return the best resolution video-only stream.
set ftypestrict to False to return a non-preferred format if that
has a higher resolution
"""
return self._getbest(preftype, ftypestrict, vidonly=True)
def getbest(self, preftype="any", ftypestrict=True):
"""
Return the highest resolution video+audio stream.
set ftypestrict to False to return a non-preferred format if that
has a higher resolution
"""
return self._getbest(preftype, ftypestrict, vidonly=False)
def getbestaudio(self, preftype="any", ftypestrict=True):
""" Return the highest bitrate audio Stream object."""
if not self.audiostreams:
return None
def _sortkey(x, keybitrate=0, keyftype=0):
""" Sort function for max(). """
keybitrate = int(x.rawbitrate)
keyftype = preftype == x.extension
strict, nonstrict = (keyftype, keybitrate), (keybitrate, keyftype)
return strict if ftypestrict else nonstrict
r = max(self.audiostreams, key=_sortkey)
if ftypestrict and preftype != "any" and r.extension != preftype:
return None
else:
return r
def populate_from_playlist(self, pl_data):
""" Populate Pafy object with items fetched from playlist data. """
self._title = pl_data.get("title")
self._author = pl_data.get("author")
self._length = int(pl_data.get("length_seconds", 0))
self._rating = pl_data.get("rating", 0.0)
self._viewcount = "".join(re.findall(r"\d", pl_data.get("views", "0")))
self._viewcount = int(self._viewcount)
self._description = pl_data.get("description")
class BaseStream(object):
""" YouTube video stream class. """
def __init__(self, parent):
""" Set initial values. """
self._itag = None
self._mediatype = None
self._threed = None
self._rawbitrate = None
self._resolution = None
self._quality = None
self._dimensions = None
self._bitrate = None
self._extension = None
self.encrypted = None
self._notes = None
self._url = None
self._rawurl = None
self._parent = parent
self._filename = None
self._fsize = None
self._active = False
def generate_filename(self, meta=False, max_length=None):
""" Generate filename. """
ok = re.compile(r'[^/]')
if os.name == "nt":
ok = re.compile(r'[^\\/:*?"<>|]')
filename = "".join(x if ok.match(x) else "_" for x in self.title)
if meta:
filename += " - %s - %s" % (self._parent.videoid, self.itag)
if max_length:
max_length = max_length + 1 + len(self.extension)
if len(filename) > max_length:
filename = filename[:max_length-3] + '...'
filename += "." + self.extension
return xenc(filename)
@property
def rawbitrate(self):
""" Return raw bitrate value. """
return self._rawbitrate
@property
def threed(self):
""" Return bool, True if stream is 3D. """
return self._threed
@property
def itag(self):
""" Return itag value of stream. """
return self._itag
@property
def resolution(self):
""" Return resolution of stream as str. 0x0 if audio. """
return self._resolution
@property
def dimensions(self):
""" Return dimensions of stream as tuple. (0, 0) if audio. """
return self._dimensions
@property
def quality(self):
""" Return quality of stream (bitrate or resolution).
eg, 128k or 640x480 (str)
"""
return self._quality
@property
def title(self):
""" Return YouTube video title as a string. """
return self._parent.title
@property
def extension(self):
""" Return appropriate file extension for stream (str).
Possible values are: 3gp, m4a, m4v, mp4, webm, ogg
"""
return self._extension
@property
def bitrate(self):
""" Return bitrate of an audio stream. """
return self._bitrate
@property
def mediatype(self):
""" Return mediatype string (normal, audio or video).
(normal means a stream containing both video and audio.)
"""
return self._mediatype
@property
def notes(self):
""" Return additional notes regarding the stream format. """
return self._notes
@property
def filename(self):
""" Return filename of stream; derived from title and extension. """
if not self._filename:
self._filename = self.generate_filename()
return self._filename
@property
def url(self):
""" Return the url, decrypt if required. """
return self._url
@property
def url_https(self):
""" Return https url. """
return self.url.replace("http://", "https://")
def __repr__(self):
""" Return string representation. """
out = "%s:%s@%s" % (self.mediatype, self.extension, self.quality)
return out
def get_filesize(self):
""" Return filesize of the stream in bytes. Set member variable. """
if not self._fsize:
try:
dbg("Getting stream size")
cl = "content-length"
self._fsize = int(g.opener.open(self.url).headers[cl])
dbg("Got stream size")
except (AttributeError, HTTPError, URLError):
self._fsize = 0
return self._fsize
def cancel(self):
""" Cancel an active download. """
if self._active:
self._active = False
return True
def download(self, filepath="", quiet=False, callback=lambda *x: None,
meta=False, remux_audio=False):
""" Download. Use quiet=True to supress output. Return filename.
Use meta=True to append video id and itag to generated filename
Use remax_audio=True to remux audio file downloads
"""
# pylint: disable=R0912,R0914
# Too many branches, too many local vars
savedir = filename = ""
if filepath and os.path.isdir(filepath):
savedir, filename = filepath, self.generate_filename(max_length=256-len('.temp'))
elif filepath:
savedir, filename = os.path.split(filepath)
else:
filename = self.generate_filename(meta=meta, max_length=256-len('.temp'))
filepath = os.path.join(savedir, filename)
temp_filepath = filepath + ".temp"
status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} '
'KB/s]. ETA: [{:.0f} secs]')
if early_py_version:
status_string = (' {0:} Bytes [{1:.2%}] received. Rate:'
' [{2:4.0f} KB/s]. ETA: [{3:.0f} secs]')
response = g.opener.open(self.url)
total = int(response.info()['Content-Length'].strip())
chunksize, bytesdone, t0 = 16384, 0, time.time()
fmode, offset = "wb", 0
if os.path.exists(temp_filepath):
if os.stat(temp_filepath).st_size < total:
offset = os.stat(temp_filepath).st_size
fmode = "ab"
outfh = open(temp_filepath, fmode)
if offset:
# partial file exists, resume download
resuming_opener = build_opener()
resuming_opener.addheaders = [('User-Agent', g.user_agent),
("Range", "bytes=%s-" % offset)]
response = resuming_opener.open(self.url)
bytesdone = offset
self._active = True
while self._active:
chunk = response.read(chunksize)
outfh.write(chunk)
elapsed = time.time() - t0
bytesdone += len(chunk)
if elapsed:
rate = ((float(bytesdone) - float(offset)) / 1024.0) / elapsed
eta = (total - bytesdone) / (rate * 1024)
else: # Avoid ZeroDivisionError
rate = 0
eta = 0
progress_stats = (bytesdone, bytesdone * 1.0 / total, rate, eta)
if not chunk:
outfh.close()
break
if not quiet:
status = status_string.format(*progress_stats)
sys.stdout.write("\r" + status + ' ' * 4 + "\r")
sys.stdout.flush()
if callback:
callback(total, *progress_stats)
if self._active:
if remux_audio and self.mediatype == "audio":
remux(temp_filepath, filepath, quiet=quiet, muxer=remux_audio)
else:
os.rename(temp_filepath, filepath)
return filepath
else: # download incomplete, return temp filepath
outfh.close()
return temp_filepath
def remux(infile, outfile, quiet=False, muxer="ffmpeg"):
""" Remux audio. """
muxer = muxer if isinstance(muxer, str) else "ffmpeg"
for tool in set([muxer, "ffmpeg", "avconv"]):
cmd = [tool, "-y", "-i", infile, "-acodec", "copy", "-vn", outfile]
try:
with open(os.devnull, "w") as devnull:
subprocess.call(cmd, stdout=devnull, stderr=subprocess.STDOUT)
except OSError:
dbg("Failed to remux audio using %s", tool)
else:
os.unlink(infile)
dbg("remuxed audio file using %s" % tool)
if not quiet:
sys.stdout.write("\nAudio remuxed.\n")
break
else:
logging.warning("audio remux failed")
os.rename(infile, outfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.