gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# -*- coding: utf-8 -*- import collections import datetime import sys if sys.version_info == (2, 7): import httplib as HttpStatusCode elif sys.version_info >= (3, 0): import http.client as HttpStatusCode import flask import flask_restful import sqlalchemy import werkzeug.exceptions from sqlalchemy import orm import mfit __all__ = ['Base', 'DBContext', 'DBContextFactory'] class DBContext: def __init__(self, session): """ Decorator class that manages persistence operations for ORM-mapped objects. Parameters ---------- session : sqlalchemy.orm.session.Session Session instance. See Also -------- sqlalchemy.orm.session.Session """ # Composition must be used instead of inheritance because # SQLAlchemy Sessions are always accessed through a factory. self._session = session def add(self, entity, created_by=None, updated_by=None): """ Decorator method. Extends the SQLAlchemy Session's `add()` to require specifying the `created_by` or `updated_by` information given the respective condition. The appropriate `created_at` or `updated_at` field is set to the current UTC date and time. Parameters ---------- entity : models.Base subclass Domain model instance. created_by : datetime.datetime, optional Unique identifier for the user who created the entity. This parameter is required only when the entity is being created. Defaults to `None`. updated_by : datetime.datetime, optional Unique identifier for the user who updated the entity. This parameter is required only when the entity is being updated. Defaults to `None`. Returns ------- None Raises ------ TypeError If the `created_by` or `updated_by` information was not specified given the respective condition. See Also -------- sqlalchemy.orm.session.Session """ should_be_persisted = True message = 'add() missing 1 required positional argument: "{}"' entity_state = sqlalchemy.inspect(entity) if entity_state.transient: if created_by is None: raise TypeError(message.format('created_by')) else: entity.created_at = datetime.datetime.utcnow() entity.created_by = created_by elif entity_state.persistent: if entity not in self._session.dirty: should_be_persisted = False elif updated_by is None: raise TypeError(message.format('updated_by')) else: entity.updated_at = datetime.datetime.utcnow() entity.updated_by = updated_by if should_be_persisted: self._session.add(entity) def __getattr__(self, name): return getattr(self._session, name) class DBContextFactory: def __init__(self, connection_string): """ Factory class for producing DBContexts. Parameters ---------- connection_string : str Formatted string containing host and authentication information. """ engine = sqlalchemy.create_engine(connection_string) SessionFactory = orm.sessionmaker() SessionFactory.configure(bind=engine) self._SessionFactory = orm.scoped_session(SessionFactory) def create(self): """ Produce an object configured as specified. Returns ------- resources.base.DBContext References ---------- See the Stack Overflow answer for more details [1]. .. [1] zzzeek, "SQLAlchemy: Creating vs. Reusing a Session", http://stackoverflow.com/a/12223711. """ # Should this dispose the engine, close the connection, and / or # close the session? session = self._SessionFactory() return DBContext(session=session) class _Base(flask_restful.Resource): def __init__(self): super().__init__() self._db_context_factory = DBContextFactory( connection_string=mfit.configuration['repositories'] ['PostgreSQL'] ['connection_string']) self._db_context = self._db_context_factory.create() class Base(_Base): _model = None _resource = None _view = None def get(self, id): # How can this code duplication be avoided? try: entity = self._get_or_404(id=id) except werkzeug.exceptions.NotFound: return dict(), HttpStatusCode.NOT_FOUND return self.to_json(entity=entity) def put(self, id): try: entity = self._get_or_404(id=id) except werkzeug.exceptions.NotFound: return dict(), HttpStatusCode.NOT_FOUND for attribute, value in flask.request.get_json().items(): setattr(entity, attribute, value) self._db_context.add(entity, updated_by=192) self._db_context.commit() body = self._resource.to_json(entity=entity) self._db_context.close() return body def delete(self, id): matched_entities_count = ( self._db_context.query(self._model) .filter_by(id=id) .delete(synchronize_session=False)) if matched_entities_count == 0: return dict(), HttpStatusCode.NOT_FOUND self._db_context.commit() self._db_context.close() return dict(), HttpStatusCode.NO_CONTENT def _get_or_404(self, id): """ Parameters ---------- id : int Unique identifier. Returns ------ models.Base subclass Entity. Raises ------ werkzeug.exceptions.HTTPException If no entities match the given condition or if more than 1 entity matches the given condition. """ try: return self._db_context.query(self._model) \ .filter_by(id=id) \ .one() except (orm.exc.NoResultFound, orm.exc.MultipleResultsFound): flask_restful.abort(404) @classmethod def get_self_url(cls, entity): return mfit.api.url_for(cls._resource, id=entity.id, _external=True) @classmethod def to_json(cls, entity): data = cls._view().dump(entity).data urls = { 'self': cls.get_self_url(entity=entity) } return collections.OrderedDict([('data', data), ('urls', urls)])
""" Support for the Uber API. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.uber/ """ import logging from datetime import timedelta from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ["uber_rides==0.2.4"] ICON = "mdi:taxi" # Return cached results if last scan was less then this time ago. MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Uber sensor.""" if None in (config.get("start_latitude"), config.get("start_longitude")): _LOGGER.error( "You must set start latitude and longitude to use the Uber sensor!" ) return False if config.get("server_token") is None: _LOGGER.error("You must set a server_token to use the Uber sensor!") return False from uber_rides.session import Session session = Session(server_token=config.get("server_token")) wanted_product_ids = config.get("product_ids") dev = [] timeandpriceest = UberEstimate(session, config["start_latitude"], config["start_longitude"], config.get("end_latitude"), config.get("end_longitude")) for product_id, product in timeandpriceest.products.items(): if (wanted_product_ids is not None) and \ (product_id not in wanted_product_ids): continue dev.append(UberSensor("time", timeandpriceest, product_id, product)) if (product.get("price_details") is not None) and \ product["price_details"]["estimate"] is not "Metered": dev.append(UberSensor("price", timeandpriceest, product_id, product)) add_devices(dev) # pylint: disable=too-few-public-methods class UberSensor(Entity): """Implementation of an Uber sensor.""" def __init__(self, sensorType, products, product_id, product): """Initialize the Uber sensor.""" self.data = products self._product_id = product_id self._product = product self._sensortype = sensorType self._name = "{} {}".format(self._product["display_name"], self._sensortype) if self._sensortype == "time": self._unit_of_measurement = "min" time_estimate = self._product.get("time_estimate_seconds", 0) self._state = int(time_estimate / 60) elif self._sensortype == "price": if self._product.get("price_details") is not None: price_details = self._product["price_details"] self._unit_of_measurement = price_details.get("currency_code") if price_details.get("low_estimate") is not None: statekey = "minimum" else: statekey = "low_estimate" self._state = int(price_details.get(statekey, 0)) else: self._state = 0 self.update() @property def name(self): """Return the name of the sensor.""" if "uber" not in self._name.lower(): self._name = "Uber{}".format(self._name) return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes.""" time_estimate = self._product.get("time_estimate_seconds") params = { "Product ID": self._product["product_id"], "Product short description": self._product["short_description"], "Product display name": self._product["display_name"], "Product description": self._product["description"], "Pickup time estimate (in seconds)": time_estimate, "Trip duration (in seconds)": self._product.get("duration"), "Vehicle Capacity": self._product["capacity"] } if self._product.get("price_details") is not None: price_details = self._product["price_details"] dunit = price_details.get("distance_unit") distance_key = "Trip distance (in {}s)".format(dunit) distance_val = self._product.get("distance") params["Cost per minute"] = price_details.get("cost_per_minute") params["Distance units"] = price_details.get("distance_unit") params["Cancellation fee"] = price_details.get("cancellation_fee") cpd = price_details.get("cost_per_distance") params["Cost per distance"] = cpd params["Base price"] = price_details.get("base") params["Minimum price"] = price_details.get("minimum") params["Price estimate"] = price_details.get("estimate") params["Price currency code"] = price_details.get("currency_code") params["High price estimate"] = price_details.get("high_estimate") params["Low price estimate"] = price_details.get("low_estimate") params["Surge multiplier"] = price_details.get("surge_multiplier") else: distance_key = "Trip distance (in miles)" distance_val = self._product.get("distance") params[distance_key] = distance_val return {k: v for k, v in params.items() if v is not None} @property def icon(self): """Icon to use in the frontend, if any.""" return ICON # pylint: disable=too-many-branches def update(self): """Get the latest data from the Uber API and update the states.""" self.data.update() self._product = self.data.products[self._product_id] if self._sensortype == "time": time_estimate = self._product.get("time_estimate_seconds", 0) self._state = int(time_estimate / 60) elif self._sensortype == "price": price_details = self._product.get("price_details") if price_details is not None: min_price = price_details.get("minimum") self._state = int(price_details.get("low_estimate", min_price)) else: self._state = 0 # pylint: disable=too-few-public-methods class UberEstimate(object): """The class for handling the time and price estimate.""" # pylint: disable=too-many-arguments def __init__(self, session, start_latitude, start_longitude, end_latitude=None, end_longitude=None): """Initialize the UberEstimate object.""" self._session = session self.start_latitude = start_latitude self.start_longitude = start_longitude self.end_latitude = end_latitude self.end_longitude = end_longitude self.products = None self.update() @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest product info and estimates from the Uber API.""" from uber_rides.client import UberRidesClient client = UberRidesClient(self._session) self.products = {} products_response = client.get_products( self.start_latitude, self.start_longitude) products = products_response.json.get("products") for product in products: self.products[product["product_id"]] = product if self.end_latitude is not None and self.end_longitude is not None: price_response = client.get_price_estimates( self.start_latitude, self.start_longitude, self.end_latitude, self.end_longitude) prices = price_response.json.get("prices", []) for price in prices: product = self.products[price["product_id"]] product["duration"] = price.get("duration", "0") product["distance"] = price.get("distance", "0") price_details = product.get("price_details") if product.get("price_details") is None: price_details = {} price_details["estimate"] = price.get("estimate", "0") price_details["high_estimate"] = price.get("high_estimate", "0") price_details["low_estimate"] = price.get("low_estimate", "0") price_details["currency_code"] = price.get("currency_code") surge_multiplier = price.get("surge_multiplier", "0") price_details["surge_multiplier"] = surge_multiplier product["price_details"] = price_details estimate_response = client.get_pickup_time_estimates( self.start_latitude, self.start_longitude) estimates = estimate_response.json.get("times") for estimate in estimates: self.products[estimate["product_id"]][ "time_estimate_seconds"] = estimate.get("estimate", "0")
# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tempest.lib.services.identity.v3 import policies_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestPoliciesClient(base.BaseServiceTest): FAKE_CREATE_POLICY = { "policy": { "blob": "{'foobar_user': 'role:compute-user'}", "project_id": "0426ac1e48f642ef9544c2251e07e261", "type": "application/json", "user_id": "0ffd248c55b443eaac5253b4e9cbf9b5" } } FAKE_POLICY_INFO = { "policy": { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717273", "links": { "self": "http://example.com/identity/v3/policies/717273" }, "project_id": "456789", "type": "application/json", "user_id": "616263" } } FAKE_LIST_POLICIES = { "links": { "next": None, "previous": None, "self": "http://example.com/identity/v3/policies" }, "policies": [ { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717273", "links": { "self": "http://example.com/identity/v3/policies/717273" }, "project_id": "456789", "type": "application/json", "user_id": "616263" }, { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717274", "links": { "self": "http://example.com/identity/v3/policies/717274" }, "project_id": "456789", "type": "application/json", "user_id": "616263" } ] } FAKE_ENDPOINT_ID = "234789" FAKE_SERVICE_ID = "556782" FAKE_POLICY_ID = "717273" FAKE_REGION_ID = "73" def setUp(self): super(TestPoliciesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = policies_client.PoliciesClient(fake_auth, 'identity', 'regionOne') def _test_create_policy(self, bytes_body=False): self.check_service_client_function( self.client.create_policy, 'tempest.lib.common.rest_client.RestClient.post', self.FAKE_CREATE_POLICY, bytes_body, status=201) def _test_show_policy(self, bytes_body=False): self.check_service_client_function( self.client.show_policy, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_POLICY_INFO, bytes_body, policy_id="717273") def _test_list_policies(self, bytes_body=False): self.check_service_client_function( self.client.list_policies, 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_LIST_POLICIES, bytes_body) def _test_update_policy(self, bytes_body=False): self.check_service_client_function( self.client.update_policy, 'tempest.lib.common.rest_client.RestClient.patch', self.FAKE_POLICY_INFO, bytes_body, policy_id="717273") def test_create_policy_with_str_body(self): self._test_create_policy() def test_create_policy_with_bytes_body(self): self._test_create_policy(bytes_body=True) def test_show_policy_with_str_body(self): self._test_show_policy() def test_show_policy_with_bytes_body(self): self._test_show_policy(bytes_body=True) def test_list_policies_with_str_body(self): self._test_list_policies() def test_list_policies_with_bytes_body(self): self._test_list_policies(bytes_body=True) def test_update_policy_with_str_body(self): self._test_update_policy() def test_update_policy_with_bytes_body(self): self._test_update_policy(bytes_body=True) def test_delete_policy(self): self.check_service_client_function( self.client.delete_policy, 'tempest.lib.common.rest_client.RestClient.delete', {}, policy_id="717273", status=204) def test_update_policy_association_for_endpoint(self): self.check_service_client_function( self.client.update_policy_association_for_endpoint, 'tempest.lib.common.rest_client.RestClient.put', {}, policy_id=self.FAKE_POLICY_ID, endpoint_id=self.FAKE_ENDPOINT_ID, status=204) def test_show_policy_association_for_endpoint(self): self.check_service_client_function( self.client.show_policy_association_for_endpoint, 'tempest.lib.common.rest_client.RestClient.get', {}, policy_id=self.FAKE_POLICY_ID, endpoint_id=self.FAKE_ENDPOINT_ID, status=204) def test_delete_policy_association_for_endpoint(self): self.check_service_client_function( self.client.delete_policy_association_for_endpoint, 'tempest.lib.common.rest_client.RestClient.delete', {}, policy_id=self.FAKE_POLICY_ID, endpoint_id=self.FAKE_ENDPOINT_ID, status=204) def test_update_policy_association_for_service(self): self.check_service_client_function( self.client.update_policy_association_for_service, 'tempest.lib.common.rest_client.RestClient.put', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, status=204) def test_show_policy_association_for_service(self): self.check_service_client_function( self.client.show_policy_association_for_service, 'tempest.lib.common.rest_client.RestClient.get', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, status=204) def test_delete_policy_association_for_service(self): self.check_service_client_function( self.client.delete_policy_association_for_service, 'tempest.lib.common.rest_client.RestClient.delete', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, status=204) def test_update_policy_association_for_region_and_service(self): self.check_service_client_function( self.client.update_policy_association_for_region_and_service, 'tempest.lib.common.rest_client.RestClient.put', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, region_id=self.FAKE_REGION_ID, status=204) def test_show_policy_association_for_region_and_service(self): self.check_service_client_function( self.client.show_policy_association_for_region_and_service, 'tempest.lib.common.rest_client.RestClient.get', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, region_id=self.FAKE_REGION_ID, status=204) def test_delete_policy_association_for_region_and_service(self): self.check_service_client_function( self.client.delete_policy_association_for_region_and_service, 'tempest.lib.common.rest_client.RestClient.delete', {}, policy_id=self.FAKE_POLICY_ID, service_id=self.FAKE_SERVICE_ID, region_id=self.FAKE_REGION_ID, status=204)
import script from script import * import shlex import edition import layout import query import player import test import graph import opendns class Color(script.Script): def __init__(self, console): super(Color, self).__init__(console) self.colors = { "red" : [ 1.0, 0.0, 0.0, 1.0 ], "green" : [ 0.0, 1.0, 0.0, 1.0 ], "blue" : [ 0.0, 0.0, 1.0, 1.0 ], "yellow" : [ 1.0, 1.0, 0.0, 1.0 ], "cyan" : [ 0.0, 1.0, 1.0, 1.0 ], "magenta" : [ 1.0, 0.0, 1.0, 1.0 ], "white" : [ 1.0, 1.0, 1.0, 1.0 ], "gray" : [ 0.5, 0.5, 0.5, 1.0 ], "black" : [ 0.0, 0.0, 0.0, 1.0 ], "orange" : [ 1.0, 0.4, 0.0, 1.0 ], "purple" : [ 0.5, 0, 0.5, 1.0], "pink" : [ 1.0, 0.75, 0.79, 1.0 ], "brown" : [ 0.64, 0.16, 0.16, 1.0 ] } self.color_map = None self.color_masks = { "rgba" : [ True, True, True, True ], "rgb" : [ True, True, True, False ], "alpha" : [ False, False, False, True ] } def random_color(self): return [ random.random(), random.random(), random.random(), 1.0 ] def parse_color(self, s): if s in self.colors: return std.vec4_to_str(self.colors[s]) else: return std.vec4_to_str(self.colors["black"]) def lambda_assign(self, element_type, element_id, color): if element_type == "node": og.set_node_attribute(element_id, "og:space:color", "vec4", color) elif element_type == "edge": og.set_edge_attribute(element_id, "og:space:color", "vec4", color) def lambda_by(self, element_type, element_id, attr, color_map): if element_type not in color_map: color_map[element_type] = dict() if element_type == "node": value = og.get_node_attribute(element_id, attr) elif element_type == "edge": value = og.get_edge_attribute(element_id, attr) if value is None: color = std.vec4_to_str(self.colors["gray"]) else: value = "{0}".format(value) if value not in color_map[element_type]: color_map[element_type][value] = self.random_color() color = std.vec4_to_str(color_map[element_type][value]) if element_type == "node": og.set_node_attribute(element_id, "og:space:color", "vec4", color) elif element_type == "edge": og.set_edge_attribute(element_id, "og:space:color", "vec4", color) def lambda_op(self, element_type, element_id, op, color_mask, factor): def calculate(op, v1, v2, mask): if op == "add": r = [ v1[i] + v2[i] for i in xrange(4) ] elif op == "sub": r = [ v1[i] - v2[i] for i in xrange(4) ] elif op == "mul": r = [ v1[i] * v2[i] for i in xrange(4) ] elif op == "div": r = [ v1[i] / v2[i] for i in xrange(4) ] elif op == "set": r = v2 else: self.console.log("Error: '{0}': Unknown operator!") return for i in xrange(4): if not mask[i]: r[i] = v1[i] return r if element_type == "node": color = og.get_node_attribute(element_id, "og:space:color") og.set_node_attribute(element_id, "og:space:color", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask))) elif element_type == "edge": color = og.get_edge_attribute(element_id, "og:space:color1") og.set_edge_attribute(element_id, "og:space:color1", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask))) color = og.get_edge_attribute(element_id, "og:space:color2") og.set_edge_attribute(element_id, "og:space:color2", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask))) def run(self, args): query = self.console.query if query is None: self.console.log("Error: Query is empty!") return if len(args) == 2: color = self.parse_color(args[1]) if 'nodes' in query: [ self.lambda_assign("node", nid, color) for nid in query['nodes'] ] if 'edges' in query: [ self.lambda_assign("edge", eid, color) for eid in query['edges'] ] elif len(args) == 3 and args[1] == "by": attr = args[2] color_map = dict() if 'nodes' in query: [ self.lambda_by("node", nid, attr, color_map) for nid in query['nodes'] ] if 'edges' in query: [ self.lambda_by("edge", eid, attr, color_map) for eid in query['edges'] ] elif len(args) >= 4 and args[1] in [ "mul", "div", "add", "sub", "set" ]: if args[2] not in self.color_masks: self.console.log("Error: '{0}': Unknown color mask!".format(args[2])) return array = [ float(i) for i in " ".join(args[3:]).split() ] if len(array) == 1: factor = [ array[0], array[0], array[0], array[0] ] elif len(array) == 3: factor = [ array[0], array[1], array[2], 1.0 ] elif len(array) == 4: factor = [ array[0], array[1], array[2], array[3] ] else: self.console.log("Error: Can't parse color factor!") return if 'nodes' in query: [ self.lambda_op("node", nid, args[1], self.color_masks[args[2]], factor) for nid in query['nodes'] ] if 'edges' in query: [ self.lambda_op("edge", eid, args[1], self.color_masks[args[2]], factor) for eid in query['edges'] ] class Help(script.Script): def __init__(self, console): super(Help, self).__init__(console) def run(self, args): self.console.log("Avalailable commands:") self.console.log(", ".join(self.console.context['scripts'].keys())) class Quit(script.Script): def __init__(self, console): super(Quit, self).__init__(console) def run(self, args): self.console.log("Terminating OpenGraphiti...") og.quit() class Native(script.Script): def __init__(self, console): super(Native, self).__init__(console) def run(self, args): exec(" ".join(args[1:])) # ----- Callbacks ----- class OpenGraphiti(object): def __init__(self): self.ids = { "node" : og.get_node_ids, "edge" : og.get_edge_ids } self.setters = { "graph" : og.set_attribute, "node" : og.set_node_attribute, "edge" : og.set_edge_attribute, } self.getters = { "graph" : og.get_attribute, "node" : og.get_node_attribute, "edge" : og.get_edge_attribute, } def get_ids(self, entity_type): if entity_type in self.ids: return self.ids[entity_type]() raise Exception("{0}: Unknown entity type!".format(entity_type)) def set_attribute(self, entity_type, entity_id, attr_name, attr_type, attr_value): if entity_type in self.setters: return self.setters[entity_type](entity_id, attr_name, attr_type, attr_value) raise Exception("{0}: Unknown entity type!".format(entity_type)) def get_attribute(self, entity_type, entity_id, attr_name): if entity_type in self.getters: return self.getters[entity_type](entity_id, attr_name) raise Exception("{0}: Unknown entity type!".format(entity_type)) class Console(object): def __init__(self): self.context = { "scripts" : { "info" : edition.Info(self), "load" : edition.Load(self), "save" : edition.Save(self), "screenshot" : edition.Screenshot(self), "set" : edition.Set(self), "get" : edition.Get(self), "remove" : edition.Remove(self), "map" : edition.Map(self), "clear" : edition.Clear(self), "select" : query.Select(self), "filter" : query.Filter(self), "query" : query.Query(self), "layout" : layout.Layout(self), "play" : player.Play(self), "stop" : player.Stop(self), "topo" : graph.Topology(self), "test" : test.Test(self), "help" : Help(self), "color" : Color(self), "quit" : Quit(self), "opendns" : opendns.OpenDNS(self), "py" : Native(self) } } self.query = dict() self.api = OpenGraphiti() def log(self, text): og.console({ 'log' : text }) def print_query(self): s = "Entities: " key_count = 0 for key in self.query.keys(): if key_count > 0: s += ", " s += "#{0}={1}".format(key, len(self.query[key])) key_count += 1 self.log(s) def execute(self, command): lex = shlex.shlex(command, posix=True) lex.whitespace_split = True args = list(lex) if 'scripts' in self.context and args[0] in self.context['scripts']: self.context['scripts'][args[0]].run(args) else: # TODO: og.console("{0}: Command not found!".format(args[0])) self.log("{0}: Command not found!".format(args[0]))
"""This file contains all calculations related to spatial explicit calculations of technology/innovation penetration.""" import logging from collections import defaultdict import numpy as np def spatial_diffusion_values( regions, real_values, speed_con_max, low_congruence_crit, p_outlier ): """Generate spatial diffusion values from real data Arguments --------- regions : dict Regions p_outlier : float (percentage) Percentage of outliers which are capped at both ends of the value spectrum of the real data Returns ------- diffusion_values : dict Spatial diffusion values based on speed assumptions Example ------- This function calculates the values which already incorporate different speeds in diffusion. For example based on real values (e.g. population density) congruence values are calculated. Then, the congruence values are linked to diffusion speed differentes. """ diffusion_values = {} # Diffusion speed assumptions speed_con_min = 1 # Speed at val_con == 0 speed_con_max = speed_con_max # Speed at con_val == 1 if speed_con_max == 1: # No regional difference for region in regions: diffusion_values[region] = 1 #100% congruence else: # ---------------- # plot real values to check for outliers # ---------------- # Select number of outliers to remove lower and higher extremes nr_of_outliers = int(100 / len(regions) * p_outlier) sorted_vals = list(real_values.values()) sorted_vals.sort() # Get value of largest outlier treshold_upper_real_value = sorted_vals[-nr_of_outliers] treshold_lower_real_value = sorted_vals[nr_of_outliers] for reg, val in real_values.items(): if val > treshold_upper_real_value: real_values[reg] = treshold_upper_real_value if val < treshold_lower_real_value: real_values[reg] = treshold_lower_real_value # --------------------------------- # Congruence calculations # ---------------------------------- # Max congruence value con_max = max(real_values.values()) for region in regions: # Multiply speed of diffusion of concept with concept congruence value try: real_value = real_values[region] except KeyError: real_value = np.average(real_values.values()) logging.warning("Set average real data for region %s", region) # Calculate congruence value congruence_value = real_value / con_max # If the assignement is thoe other way round (lowest value has highest congruence value) if low_congruence_crit: congruence_value = 1 - congruence_value else: pass # Calculate diffusion value lower_concept_val = (1 - congruence_value) * speed_con_min higher_concept_val = congruence_value * speed_con_max diffusion_values[region] = lower_concept_val + higher_concept_val return diffusion_values def calc_diffusion_f(regions, f_reg, spatial_diff_values, fuels): """From spatial diffusion values calculate diffusion factor for every region (which needs to sum up to one across all regions) and end use. With help of these calculation diffusion factors, a spatial explicit diffusion of innovations can be implemented. Arguments ---------- regions : dict Regions f_reg : dict Regional not weighted diffusion factors spatial_diff_values : dict Spatial diffusion index values fuels : array Fuels per enduse or fuel per sector and enduse Example ------- If the national assumption of a technology diffusion of 50% is defined (e.g. 50% of service are heat pumps), this percentage can be changed per region, i.e. in some regions with higher diffusion factors, a larger percentage adopt the technology on the expense of other regions, where a lower percentage adopt this technology. In sum however, for all regions, the total service still sums up to 50%. Note ----- The total sum can be higher than 1 in case of high values. Therefore the factors need to be capped. TODO MORE INFO """ # Calculate fraction of energy demand of every region of total demand reg_enduse_p = defaultdict(dict) fuels_enduse = {} for fuel_submodel in fuels: # ----------------------------------- # Sum fuel across sectors # ----------------------------------- fuel_submodel_new = defaultdict(dict) for region, entries in fuel_submodel.items(): enduses = entries.keys() try: for enduse in entries: for sector in entries[enduse]: fuel_submodel_new[region][enduse] = 0 for enduse in entries: for sector in entries[enduse]: fuel_submodel_new[region][enduse] += np.sum(entries[enduse][sector]) fuel_submodel = fuel_submodel_new except IndexError: enduses = entries.keys() break # -------------------- # Calculate fraction of fuel for each region # -------------------- for enduse in enduses: fuels_enduse[enduse] = 0 # Total uk fuel of enduse tot_enduse_uk = 0 for region in regions: tot_enduse_uk += np.sum(fuel_submodel[region][enduse]) # Calculate regional % of enduse for region in regions: reg_enduse_p[enduse][region] = np.sum(fuel_submodel[region][enduse]) / tot_enduse_uk fuels_enduse[enduse] += np.sum(fuel_submodel[region][enduse]) # ---------- # Norm spatial factor (f_reg_norm) with population (does not sum upt to 1.p Eq. 7 Appendix) # ---------- f_reg_norm = {} for enduse, regions_fuel_p in reg_enduse_p.items(): # Sum across all regs (factor * fuel_p) sum_p_f_all_regs = 0 for region in regions: sum_p_f_all_regs += f_reg[region] * regions_fuel_p[region] f_reg_norm[enduse] = {} for region, fuel_p in regions_fuel_p.items(): f_reg_norm[enduse][region] = f_reg[region] / sum_p_f_all_regs # ---------- # Norm which sums up to 1 (f_reg_norm_abs) (e.g. distriubte 200 units across space) # ---------- f_reg_norm_abs = {} for enduse, regions_fuel_p in reg_enduse_p.items(): f_reg_norm_abs[enduse] = {} for region, fuel_p in regions_fuel_p.items(): f_reg_norm_abs[enduse][region] = fuel_p * spatial_diff_values[region] #----------- # Normalize f_reg_norm_abs #----------- for enduse in f_reg_norm_abs: sum_enduse = sum(f_reg_norm_abs[enduse].values()) for region in f_reg_norm_abs[enduse]: f_reg_norm_abs[enduse][region] = f_reg_norm_abs[enduse][region] / sum_enduse # Testing for enduse in f_reg_norm_abs: np.testing.assert_almost_equal( sum(f_reg_norm_abs[enduse].values()), 1, decimal=2) return f_reg_norm_abs, f_reg_norm def calc_regional_services( enduse, uk_techs_service_p, regions, spatial_factors, fuel_disaggregated, techs_affected_spatial_f, capping_val=1 ): """Calculate regional specific end year service shares of technologies (rs_reg_enduse_tech_p_ey) Arguments --------- uk_techs_service_p : dict Service shares per technology for future year regions : dict Regions spatial_factors : dict Spatial factor per enduse and region fuel_disaggregated : dict Fuels per region techs_affected_spatial_f : list List with technologies where spatial diffusion is affected capping_val : float Maximum service share (1.0). This is needed in case of spatial explicit diffusion modelling where the diffusion speed is very large and thus would lead to areas with largher shares than 1 Returns ------- rs_reg_enduse_tech_p_ey : dict Regional specific model end year service shares of techs Modelling steps ----- A.) Calculation national end use service to reduce (e.g. 50% heat pumps for all regions) (uk_tech_service_ey_p) B.) Distribute this service according to spatial index for techs where the spatial explicit diffusion applies (techs_affected_spatial_f). Otherwise disaggregated according to fuel C.) Convert regional service reduction to ey % in region """ reg_enduse_tech_p_ey = defaultdict(dict) # ------------------------------------ # Calculate national total enduse fuel and service # ------------------------------------ uk_enduse_fuel = 0 for region in regions: reg_enduse_tech_p_ey[region] = {} uk_enduse_fuel += np.sum(fuel_disaggregated[region][enduse]) # ---- # Service of enduse for all regions # ---- for region in regions: # Disaggregation factor f_fuel_disagg = np.sum(fuel_disaggregated[region][enduse]) / uk_enduse_fuel # Calculate fraction of regional service for tech, uk_tech_service_ey_p in uk_techs_service_p.items(): global_tech_service_ey_p = uk_tech_service_ey_p # --------------------------------------------- # B.) Calculate regional service for technology # --------------------------------------------- if tech in techs_affected_spatial_f: # Use spatial factors reg_service_tech = global_tech_service_ey_p * spatial_factors[enduse][region] else: # If not specified, use fuel disaggregation for enduse factor reg_service_tech = global_tech_service_ey_p #* f_fuel_disagg reg_enduse_tech_p_ey[region][tech] = reg_service_tech # --------------------------------------------- # C.) Calculate regional fraction # --------------------------------------------- for tech, service_tech in reg_enduse_tech_p_ey[region].items(): # ---------------------------------- # Capping value in case larger than 1.0 # ---------------------------------- service_share = service_tech if service_share > capping_val: reg_enduse_tech_p_ey[region][tech] = capping_val logging.info("Maximum value is capped: {} {} {}".format( region, service_share, tech)) else: reg_enduse_tech_p_ey[region][tech] = service_share return dict(reg_enduse_tech_p_ey) def calc_spatially_diffusion_factors( regions, fuel_disagg, real_values, low_congruence_crit, speed_con_max, p_outlier ): """ Calculate spatial diffusion values Arguments --------- regions : dict Regions fuel_disagg : dict Disaggregated fuel per region real_values : dict Real values p_outlier : float Percentage of min and max outliers are flattened Returns ------- f_reg_norm_abs : dict Diffusion values with normed population. If no value is larger than 1, the total sum of all shares calculated for every region is identical to the defined scenario variable. spatial_diff_values : dict Spatial diffusion values (not normed, only considering differences in speed and congruence values) Explanation ============ (I) Load diffusion values (II) Calculate diffusion factors (III) Calculate sigmoid diffusion values for technology specific enduse service shares for every region """ # ----- # I. Diffusion diffusion values # ----- spatial_diff_values = spatial_diffusion_values( regions=regions, real_values=real_values, speed_con_max=speed_con_max, low_congruence_crit=low_congruence_crit, p_outlier=p_outlier) # ----- # II. Calculation of diffusion factors (Not weighted with demand) # ----- # Not weighted with demand max_value_diffusion = max(list(spatial_diff_values.values())) f_reg = {} for region in regions: f_reg[region] = spatial_diff_values[region] / max_value_diffusion # Weighted with demand f_reg_norm_abs, f_reg_norm = calc_diffusion_f( regions, f_reg, spatial_diff_values, [fuel_disagg['residential'], fuel_disagg['service'], fuel_disagg['industry']]) return f_reg, f_reg_norm, f_reg_norm_abs '''def spatially_differentiated_modelling( regions, fuel_disagg, rs_share_s_tech_ey_p, ss_share_s_tech_ey_p, is_share_s_tech_ey_p, techs_affected_spatial_f, spatial_diffusion_factor, spatial_explicit_diffusion=False ): """ Regional diffusion shares of technologies is calculated based on calcualted spatial diffusion factors Arguments --------- regions : dict Regions fuel_disagg : dict Fuel per region rs_share_s_tech_ey_p : dict Global technology service shares ss_share_s_tech_ey_p : dict Global technology service shares is_share_s_tech_ey_p : dict Global technology service shares techs_affected_spatial_f : list Technologies which are affected by spatially heterogeneous diffusion spatial_diffusion_factor : dict Spatial diffusion factor Returns -------- XX_reg_share_s_tech_ey_p : Technology specific service shares for every region (residential) considering differences in diffusion speed. If the calculate regional shares are larger than 1.0, the diffusion is set to the maximum criteria (`cap_max`). This means that if some regions reach the maximum defined value, thes cannot futher increase their share. This means that other regions diffuse slower and do not reach such high leves (and because the faster regions cannot over-compensate, the total sum is not identical). Calculate sigmoid diffusion values for technology specific enduse service shares for every region """ # Residential spatial explicit modelling rs_reg_share_s_tech_ey_p = {} for enduse, uk_techs_service_p in rs_share_s_tech_ey_p.items(): rs_reg_share_s_tech_ey_p[enduse] = calc_regional_services( enduse, uk_techs_service_p, regions, spatial_diffusion_factor, fuel_disagg['residential'], techs_affected_spatial_f) ss_reg_share_s_tech_ey_p = {} for sector, uk_techs_service_enduses_p in ss_share_s_tech_ey_p.items(): ss_reg_share_s_tech_ey_p[sector] = {} for enduse, uk_techs_service_p in uk_techs_service_enduses_p.items(): ss_reg_share_s_tech_ey_p[sector][enduse] = calc_regional_services( enduse, uk_techs_service_p, regions, spatial_diffusion_factor, fuel_disagg['ss_fuel_disagg_sum_all_sectors'], techs_affected_spatial_f) is_reg_share_s_tech_ey_p = {} for sector, uk_techs_service_enduses_p in is_share_s_tech_ey_p.items(): is_reg_share_s_tech_ey_p[sector] = {} for enduse, uk_techs_service_p in uk_techs_service_enduses_p.items(): is_reg_share_s_tech_ey_p[sector][enduse] = calc_regional_services( enduse, uk_techs_service_p, regions, spatial_diffusion_factor, fuel_disagg['is_aggr_fuel_sum_all_sectors'], techs_affected_spatial_f) return rs_reg_share_s_tech_ey_p, ss_reg_share_s_tech_ey_p, is_reg_share_s_tech_ey_p''' def factor_improvements_single( factor_uk, regions, f_reg, f_reg_norm, f_reg_norm_abs, fuel_regs_enduse ): """Calculate regional specific end year service shares of technologies (rs_reg_enduse_tech_p_ey) Arguments ========= factor_uk : float Improvement of either an enduse or a variable for the whole UK regions : dict Regions f_reg : dict Regional spatial factors not normed with fuel demand f_reg_norm : dict Regional spatial factors normed with fuel demand (sum is not 1) f_reg_norm_abs : dict Regional spatial factors normed with fuel demand and normed that sum is 1 spatial_diff_values : dict Spatial diffusion values fuel_regs_enduse : dict Fuels per region and end use Returns ------- rs_reg_enduse_tech_p_ey : dict Regional specific model end year service shares of techs Modelling steps ----- A.) Calculation national end use service to reduce (e.g. 50% heat pumps for all regions) (uk_tech_service_ey_p) B.) Distribute this service according to spatial index for techs where the spatial explicit diffusion applies (techs_affected_spatial_f). Otherwise disaggregated according to fuel C.) Convert regional service reduction to ey % in region """ reg_enduse_tech_p_ey = {} # Check which factors is to be used # if only distribute: f_reg_norm_abs # if max 1: f_reg_nrm # if not intersted in correct sum: f_reg if fuel_regs_enduse == {}: logging.info("spatial_factor: fuel_regs_enduse") spatial_factor = f_reg else: logging.info("spatial_factor: f_reg_norm_abs") spatial_factor = f_reg_norm_abs # Sum fuel for all regions uk_enduse_fuel = sum(fuel_regs_enduse.values()) test = 0 for region in regions: try: test += (reg_enduse_tech_p_ey[region] * np.sum(fuel_regs_enduse[region])) logging.info( "FUEL FACTOR reg: {} val: {}, fuel: {} fuel: {} ".format( region, round(reg_enduse_tech_p_ey[region], 3), round(uk_enduse_fuel, 3), round(np.sum(fuel_regs_enduse[region]), 3))) except: pass reg_enduse_tech_p_ey[region] = factor_uk * spatial_factor[region] logging.info("spatial single factor reg: {} val: {}".format( region, round(reg_enduse_tech_p_ey[region], 3))) # --------- # PROBLEM THAT MORE THAN 100 percent could be reached if nt normed # --------- reg_enduse_tech_p_ey_capped = {} # Cap regions which have already reached and are larger than 1.0 cap_max_crit = 1.0 #100% demand_lost = 0 for region, region_factor in reg_enduse_tech_p_ey.items(): if region_factor > cap_max_crit: logging.warning("INFO: FOR A REGION THE SHARE OF IMPROVEMENTIS LARGER THAN 1.0.") # Demand which is lost and capped diff_to_cap = region_factor - cap_max_crit demand_lost += diff_to_cap * np.sum(fuel_regs_enduse[region]) reg_enduse_tech_p_ey_capped[region] = cap_max_crit else: reg_enduse_tech_p_ey_capped[region] = region_factor # Replace reg_enduse_tech_p_ey = reg_enduse_tech_p_ey_capped #logging.warning("FAKTOR UK :" + str(factor_uk)) #logging.warning("Lost demand: " + str(demand_lost)) #logging.warning("TESTDUM a " + str(test)) #logging.warning("TESTDUM b " + str(uk_enduse_fuel * factor_uk)) return reg_enduse_tech_p_ey def get_enduse_regs( enduse, fuels_disagg): """ Get a specific enduse for all regions Arguments --------- enduse : str Enduse to sum fuels_disagg : list Fuels per disaggregated regions Returns ------- fuels_enduse : dict Fuels of an enduse for all regions {'reg': np.array(enduse_fuel)} """ fuels_enduse = {} for fuel_submodel in fuels_disagg: for reg, enduse_fuels in fuel_submodel.items(): for enduse_to_match, fuels_regs in enduse_fuels.items(): if enduse == enduse_to_match: fuels_enduse[reg] = fuels_regs return fuels_enduse
#!/usr/bin/env python # # Copyright (C) 2014 Brian Caswell <bmc@lungetech.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # import sys import ansi_x931_aes128 import random import string def encode(data): """ Encodes a string to the 'cstring' encoding supported by the replay DTD. Args: data: string value to be encoded Returns: String containing the encoded value Raises: None """ chars = string.letters + string.digits + " ?!:." return ''.join([x if x in chars else "\\x%02x" % ord(x) for x in data]) class Variable(object): def __init__(self, name): assert name.isalnum() self._name = name self._method = None self._begin = None self._end = None self._re = None self._value = None def set_value(self, value): """ Sets the 'Variable' value, useful for Actions.set(). Args: value: Specify data that should be set in the 'var' instance, as specified by the DTD. Returns: None Raises: Exception if value is not a string """ assert isinstance(value, str) self._value = value self._method = 'value' def set_re(self, value, group=None): """ Sets the 'Variable' value to be a regular expression Args: value: Specify the regular expression used in an 'assign' instance, as specified by the DTD. Returns: None Raises: Exception if value is not a string Exception if value is empty """ assert isinstance(value, str) assert len(value) if group is not None: assert isinstance(group, int) else: group = 0 self._re = value self._re_group = group self._method = 're' def set_slice(self, begin, end=None): """ Sets the 'Variable' value to be a slice Args: value: Specify the slice used in an 'assign' instance, as specified by the DTD. Returns: None Raises: Exception if begin is not an integer Exception if end is not None or an integer """ assert isinstance(begin, int) assert isinstance(end, (type(None), int)) self._method = 'slice' self._begin = begin self._end = end def get_read(self): """ Get the 'assign' XML element Args: None Returns: String defining the '<assign>' XML element Raises: Exception if the method has not been defined as 're' or 'slice' """ # <assign> <var>foo</var> <slice begin="1" end="10" /> </assign> # <assign> <var>bar</var> <pcre>(.*)</pcre> </assign> assert self._method in ['re', 'slice'] xml = '' if self._method == 'slice': if self._end is None: xml = '<slice begin="%d" />' % (self._begin) else: xml = '<slice begin="%d" end="%d" />' % (self._begin, self._end ) elif self._method == 're': if self._re_group != 0: xml = '<pcre group="%d">%s</pcre>' % (self._re_group, self._re) else: xml = '<pcre>%s</pcre>' % (self._re) return '<assign> <var>%s</var> %s </assign>' % (self._name, xml) def get_set(self): """ Get the 'decl' XML element Args: None Returns: String defining the '<decl>' XML element Raises: Exception if the method has not been defined as 'value' """ assert self._method == 'value' return '<decl> <var>%s</var> <value> <data>%s</data> '\ '</value> </decl>' % (self._name, encode(self._value)) def get_write(self): """ Get the XML element that defines writing the variable Args: None Returns: String defining the '<var>' XML element Raises: None """ assert self._method is not None return '<var>%s</var>' % (self._name) class Actions(object): """Actions - Define the interactions for a CB This class implements the basic methods to interact with a CB, in terms of XML generation for use with 'cb-replay'. Usage: a = Actions(): a.write('foo') a.read(delim='\n') a.xml() Attributes: state: Dict of state values, to be reset upon each iteration. used_magic_page: Has the magic page been used """ def __init__(self): self._actions = [] self.state = {} self._seed = None self._magic_page = None self.used_magic_page = False self._matched = False self._sent = False self._recv = False @property def magic_page(self): if not self.used_magic_page: self.used_magic_page = True self._seed = self.random_buffer(48) prng = ansi_x931_aes128.PRNG(self._seed) self._magic_page = prng.get(0x1000) return self._magic_page @classmethod def chance(cls, value=0.5): """ Randomly return True or False, with the likelyhood defined by specifying a percentage argument. Args: value: A float between 0.0 and 1.0 Returns: True or False Raises: Exception: if 'value' is not a float between 0.0 and 1.0 """ assert isinstance(value, float) assert value > 0.0 and value < 1.0 return random.random() < value @staticmethod def random_buffer(size, chars=None): """ Create a buffer of random data of a length specified by the the size argument. Args: size: Size of buffer to create chars: Optional sequence of characters (If none is provided, all characters are used) Raises: None """ if chars is None: chars = ''.join([chr(x) for x in range(256)]) return ''.join(random.choice(chars) for _ in range(size)) def reset(self): """ Re-initialize the instance of the class Args: None Returns: None Raises: None """ self.__init__() def xml(self, store_seed=None): """ Returns the XML of the existing actions following the replay.dtd spec Args: store_xml: Should the seed be stored in the XML Returns: String containing XML of the existing actions Raises: None """ lines = [ '<?xml version="1.0" standalone="no" ?>', '<!DOCTYPE pov SYSTEM "/usr/share/cgc-docs/replay.dtd">' ] out = '\n' actions = '\n ' + '\n '.join(self._actions) + '\n' out += self._wrap('cbid', 'service') + '\n' assert self._matched, 'polls must match or assign content from the server' assert self._sent, 'polls must send data to the server' assert self._recv, 'polls must recv data from the server' if store_seed: seed = self._seed if seed is None: seed = self.random_buffer(48) out += self._wrap('seed', seed.encode('hex')) + '\n' out += self._wrap('replay', actions) + '\n' lines.append(self._wrap('pov', out)) return '\n'.join(lines) @classmethod def _wrap(cls, tag, value, **options): """ Creates an XML eliment Args: tag: The tag name value: The value of the tag **options: arbitrary named arguments, used as attributes for the tag Returns: String containing XML element Raises: None """ opt_string = '' if len(options): opts = [] for key in options: opts.append('%s="%s"' % (key, options[key])) opt_string = ' ' + ' '.join(opts) return '<%s%s>%s</%s>' % (tag, opt_string, value, tag) def read(self, delim=None, length=None, expect=None, expect_format=None, timeout=None, assign=None): """ Create a 'read' interaction for a challenge binary as supported by the replay DTD. Args: delim: Specify data should be read until the string has been received from the CB length: Specify the amount of data to be read from the CB. expect: Specify the expected value that should be returned from the CB expect_format: Specify the format of the 'expect' value, allowed values are 'pcre', 'asciic', or 'variable'. Unless a value is specified, the default format is 'asciic'. assign: An optional 'Variable' instance used to specify XML 'assign' arguments. Returns: None Raises: Exception if delim AND length are specified Exception if expect_format is specified and expect is not specified Exception if expect_format is not 'pcre' or 'asciic' Exception if timeout is not an integer Exception if length is not an integer Exception if the delimiter is not a string Exception if the delimiter is an empty string Exception if assign is set and is not a 'Variable' instance """ assert length is not None or delim is not None if expect_format is not None: assert expect is not None assert expect_format in ['pcre', 'asciic', 'variable'] xml = '' if timeout is not None: assert isinstance(timeout, int) xml += self._wrap('timeout', str(timeout)) if length is not None: assert isinstance(length, int) assert length > 0 xml += self._wrap('length', str(length)) if delim is not None: assert isinstance(delim, str) assert len(delim) > 0 delim = encode(delim) xml += self._wrap('delim', delim) if expect is not None: match = None if expect_format == 'variable': assert isinstance(expect, Variable) match = expect.get_write() elif expect_format == 'pcre': match = self._wrap('pcre', expect) else: match = self._wrap('data', encode(expect)) xml += self._wrap('match', match) self._matched = True if assign is not None: assert isinstance(assign, Variable) xml += assign.get_read() self._matched = True self._recv = True xml = self._wrap('read', xml) self._actions.append(xml) def comment(self, msg, *args): """ Create an XML comment of 'msg % args' Args: msg: Message to be logged args: Arguments to be interpreted via str formatting Returns None Raises: None """ data = '<!-- %s -->' % encode(msg % args) self._actions.append(data) def delay(self, value): """ Create a 'delay' element, to cause a sleep in the interaction with the challenge binary, as supported by the replay DTD. Args: value: An integer to specify how much to sleep Returns None Raises: Exception: if value is not a number greater than zero """ assert isinstance(value, int) assert value >= 0 self._actions.append(self._wrap('delay', '%d' % value)) def set(self, value): """ Declare a variable as supported by the replay DTD. Args: value: A Variable instance Returns None Raises: Exception: if value is not an instance of Variable """ assert isinstance(value, Variable) self._actions.append(value.get_set()) def write(self, *values): """ Create a 'write' interaction for a challenge binary as supported by the replay DTD. Args: values: Specify the data that should be sent to the CB. Returns None Raises: None """ xml = [] for value in values: if isinstance(value, Variable): xml.append(value.get_write()) else: xml.append(self._wrap('data', encode(value))) self._sent = True self._actions.append(self._wrap('write', ''.join(xml)))
# Copyright 2011 Denali Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from lxml import etree import webob from nova.api.openstack.volume import snapshots from nova import exception from nova import flags from nova import log as logging from nova import test from nova import volume from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) def _get_default_snapshot_param(): return { 'id': 123, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', } def stub_snapshot_create(self, context, volume_id, name, description): snapshot = _get_default_snapshot_param() snapshot['volume_id'] = volume_id snapshot['display_name'] = name snapshot['display_description'] = description return snapshot def stub_snapshot_delete(self, context, snapshot): if snapshot['id'] != 123: raise exception.NotFound def stub_snapshot_get(self, context, snapshot_id): if snapshot_id != 123: raise exception.NotFound param = _get_default_snapshot_param() return param def stub_snapshot_get_all(self, context): param = _get_default_snapshot_param() return [param] class SnapshotApiTest(test.TestCase): def setUp(self): super(SnapshotApiTest, self).setUp() self.controller = snapshots.SnapshotsController() self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) self.stubs.Set(volume.api.API, "get_all_snapshots", stub_snapshot_get_all) def test_snapshot_create(self): self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) snapshot = {"volume_id": '12', "force": False, "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) self.assertTrue('snapshot' in resp_dict) self.assertEqual(resp_dict['snapshot']['display_name'], snapshot['display_name']) self.assertEqual(resp_dict['snapshot']['display_description'], snapshot['display_description']) def test_snapshot_create_force(self): self.stubs.Set(volume.api.API, "create_snapshot_force", stub_snapshot_create) self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) snapshot = {"volume_id": '12', "force": True, "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) self.assertTrue('snapshot' in resp_dict) self.assertEqual(resp_dict['snapshot']['display_name'], snapshot['display_name']) self.assertEqual(resp_dict['snapshot']['display_description'], snapshot['display_description']) def test_snapshot_delete(self): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = 123 req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) resp = self.controller.delete(req, snapshot_id) self.assertEqual(resp.status_int, 202) def test_snapshot_delete_invalid_id(self): self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) snapshot_id = 234 req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, snapshot_id) def test_snapshot_show(self): req = fakes.HTTPRequest.blank('/v1/snapshots/123') resp_dict = self.controller.show(req, 123) self.assertTrue('snapshot' in resp_dict) self.assertEqual(resp_dict['snapshot']['id'], '123') def test_snapshot_show_invalid_id(self): snapshot_id = 234 req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, snapshot_id) def test_snapshot_detail(self): req = fakes.HTTPRequest.blank('/v1/snapshots/detail') resp_dict = self.controller.detail(req) self.assertTrue('snapshots' in resp_dict) resp_snapshots = resp_dict['snapshots'] self.assertEqual(len(resp_snapshots), 1) resp_snapshot = resp_snapshots.pop() self.assertEqual(resp_snapshot['id'], '123') class SnapshotSerializerTest(test.TestCase): def _verify_snapshot(self, snap, tree): self.assertEqual(tree.tag, 'snapshot') for attr in ('id', 'status', 'size', 'created_at', 'display_name', 'display_description', 'volume_id'): self.assertEqual(str(snap[attr]), tree.get(attr)) def test_snapshot_show_create_serializer(self): serializer = snapshots.SnapshotTemplate() raw_snapshot = dict( id='snap_id', status='snap_status', size=1024, created_at=datetime.datetime.now(), display_name='snap_name', display_description='snap_desc', volume_id='vol_id', ) text = serializer.serialize(dict(snapshot=raw_snapshot)) print text tree = etree.fromstring(text) self._verify_snapshot(raw_snapshot, tree) def test_snapshot_index_detail_serializer(self): serializer = snapshots.SnapshotsTemplate() raw_snapshots = [dict( id='snap1_id', status='snap1_status', size=1024, created_at=datetime.datetime.now(), display_name='snap1_name', display_description='snap1_desc', volume_id='vol1_id', ), dict( id='snap2_id', status='snap2_status', size=1024, created_at=datetime.datetime.now(), display_name='snap2_name', display_description='snap2_desc', volume_id='vol2_id', )] text = serializer.serialize(dict(snapshots=raw_snapshots)) print text tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child)
# Copyright (c) 2016 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields from neutron.db import api as db_api from neutron.db import dns_db from neutron.db.models import segment as segment_model from neutron.db import models_v2 from neutron.db.port_security import models as ps_models from neutron.db.qos import models as qos_models from neutron.db import rbac_db_models from neutron.extensions import availability_zone as az_ext from neutron.objects import base from neutron.objects import common_types from neutron.objects.db import api as obj_db_api from neutron.objects.extensions import port_security as base_ps from neutron.objects import rbac_db @obj_base.VersionedObjectRegistry.register class NetworkSegment(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = segment_model.NetworkSegment fields = { 'id': obj_fields.UUIDField(), 'network_id': obj_fields.UUIDField(), 'name': obj_fields.StringField(), 'network_type': obj_fields.StringField(), 'physical_network': obj_fields.StringField(nullable=True), 'segmentation_id': obj_fields.IntegerField(nullable=True), 'is_dynamic': obj_fields.BooleanField(default=False), 'segment_index': obj_fields.IntegerField(default=0) } foreign_keys = { 'Network': {'network_id': 'id'}, 'PortBindingLevel': {'id': 'segment_id'}, } @classmethod def get_objects(cls, context, _pager=None, **kwargs): if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [ (field, True) for field in ('network_id', 'segment_index') ] return super(NetworkSegment, cls).get_objects(context, _pager, **kwargs) @obj_base.VersionedObjectRegistry.register class NetworkPortSecurity(base_ps._PortSecurity): # Version 1.0: Initial version VERSION = "1.0" db_model = ps_models.NetworkSecurityBinding fields_need_translation = {'id': 'network_id'} @obj_base.VersionedObjectRegistry.register class Network(rbac_db.NeutronRbacObject): # Version 1.0: Initial version VERSION = '1.0' rbac_db_model = rbac_db_models.NetworkRBAC db_model = models_v2.Network fields = { 'id': obj_fields.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'status': obj_fields.StringField(nullable=True), 'admin_state_up': obj_fields.BooleanField(nullable=True), 'vlan_transparent': obj_fields.BooleanField(nullable=True), # TODO(ihrachys): consider converting to a field of stricter type 'availability_zone_hints': obj_fields.ListOfStringsField( nullable=True), 'shared': obj_fields.BooleanField(default=False), 'mtu': obj_fields.IntegerField(nullable=True), # TODO(ihrachys): consider exposing availability zones # TODO(ihrachys): consider converting to boolean 'security': obj_fields.ObjectField( 'NetworkPortSecurity', nullable=True), 'segments': obj_fields.ListOfObjectsField( 'NetworkSegment', nullable=True), 'dns_domain': common_types.DomainNameField(nullable=True), 'qos_policy_id': obj_fields.UUIDField(nullable=True, default=None), # TODO(ihrachys): add support for tags, probably through a base class # since it's a feature that will probably later be added for other # resources too # TODO(ihrachys): expose external network attributes } synthetic_fields = [ 'dns_domain', # MTU is not stored in the database any more, it's a synthetic field # that may be used by plugins to provide a canonical representation for # the resource 'mtu', 'qos_policy_id', 'security', 'segments', ] fields_need_translation = { 'security': 'port_security', } def create(self): fields = self.obj_get_changes() with db_api.autonested_transaction(self.obj_context.session): dns_domain = self.dns_domain qos_policy_id = self.qos_policy_id super(Network, self).create() if 'dns_domain' in fields: self._set_dns_domain(dns_domain) if 'qos_policy_id' in fields: self._attach_qos_policy(qos_policy_id) def update(self): fields = self.obj_get_changes() with db_api.autonested_transaction(self.obj_context.session): super(Network, self).update() if 'dns_domain' in fields: self._set_dns_domain(fields['dns_domain']) if 'qos_policy_id' in fields: self._attach_qos_policy(fields['qos_policy_id']) def _attach_qos_policy(self, qos_policy_id): # TODO(ihrachys): introduce an object for the binding to isolate # database access in a single place, currently scattered between port # and policy objects obj_db_api.delete_objects( self.obj_context, qos_models.QosNetworkPolicyBinding, network_id=self.id, ) if qos_policy_id: obj_db_api.create_object( self.obj_context, qos_models.QosNetworkPolicyBinding, {'network_id': self.id, 'policy_id': qos_policy_id} ) self.qos_policy_id = qos_policy_id self.obj_reset_changes(['qos_policy_id']) def _set_dns_domain(self, dns_domain): obj_db_api.delete_objects( self.obj_context, dns_db.NetworkDNSDomain, network_id=self.id, ) if dns_domain: obj_db_api.create_object( self.obj_context, dns_db.NetworkDNSDomain, {'network_id': self.id, 'dns_domain': dns_domain} ) self.dns_domain = dns_domain self.obj_reset_changes(['dns_domain']) @classmethod def modify_fields_from_db(cls, db_obj): result = super(Network, cls).modify_fields_from_db(db_obj) if az_ext.AZ_HINTS in result: result[az_ext.AZ_HINTS] = ( az_ext.convert_az_string_to_list(result[az_ext.AZ_HINTS])) return result @classmethod def modify_fields_to_db(cls, fields): result = super(Network, cls).modify_fields_to_db(fields) if az_ext.AZ_HINTS in result: result[az_ext.AZ_HINTS] = ( az_ext.convert_az_list_to_string(result[az_ext.AZ_HINTS])) return result def from_db_object(self, *objs): super(Network, self).from_db_object(*objs) for db_obj in objs: # extract domain name if db_obj.get('dns_domain'): self.dns_domain = ( db_obj.dns_domain.dns_domain ) else: self.dns_domain = None self.obj_reset_changes(['dns_domain']) # extract qos policy binding if db_obj.get('qos_policy_binding'): self.qos_policy_id = ( db_obj.qos_policy_binding.policy_id ) else: self.qos_policy_id = None self.obj_reset_changes(['qos_policy_id']) @classmethod def get_bound_tenant_ids(cls, context, policy_id): # TODO(ihrachys): provide actual implementation return set()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility funtions for the graph_editor. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six import iteritems from tensorflow.python.framework import ops as tf_ops from tensorflow.python.ops import array_ops as tf_array_ops __all__ = [ "make_list_of_op", "get_tensors", "make_list_of_t", "get_generating_ops", "get_consuming_ops", "ControlOutputs", "placeholder_name", "make_placeholder_from_tensor", "make_placeholder_from_dtype_and_shape", ] def concatenate_unique(la, lb): """Add all the elements of lb in la if they are not there already.""" for l in lb: if l not in la: la.append(l) return la # TODO(fkp): very generic code, it should be moved in a more generic place. class ListView(object): """Immutable list wrapper. This class is strongly inspired by the one in tf.Operation. """ def __init__(self, list_): if not isinstance(list_, list): raise TypeError("Expected a list, got: {}.".format(type(list_))) self._list = list_ def __iter__(self): return iter(self._list) def __len__(self): return len(self._list) def __bool__(self): return bool(self._list) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._list[i] def __add__(self, other): if not isinstance(other, list): other = list(other) return list(self) + other # TODO(fkp): very generic code, it should be moved in a more generic place. def is_iterable(obj): """Return true if the object is iterable.""" try: _ = iter(obj) except Exception: # pylint: disable=broad-except return False return True def flatten_tree(tree, leaves=None): """Flatten a tree into a list. Args: tree: iterable or not. If iterable, its elements (child) can also be iterable or not. leaves: list to which the tree leaves are appended (None by default). Returns: A list of all the leaves in the tree. """ if leaves is None: leaves = [] if is_iterable(tree): for child in tree: flatten_tree(child, leaves) else: leaves.append(tree) return leaves def transform_tree(tree, fn, iterable_type=tuple): """Transform all the nodes of a tree. Args: tree: iterable or not. If iterable, its elements (child) can also be iterable or not. fn: function to apply to each leaves. iterable_type: type use to construct the resulting tree for unknwon iterable, typically `list` or `tuple`. Returns: A tree whose leaves has been transformed by `fn`. The hierarchy of the output tree mimics the one of the input tree. """ if is_iterable(tree): if isinstance(tree, list): return [transform_tree(child, fn) for child in tree] elif isinstance(tree, tuple): # this works for named tupled as well: return tree.__new__(type(tree), (transform_tree(child, fn) for child in tree)) elif isinstance(tree, dict): return {k: transform_tree(child, fn) for k, child in iteritems(tree)} else: return iterable_type(transform_tree(child, fn) for child in tree) else: return fn(tree) def check_graphs(*args): """Check that all the element in args belong to the same graph. Args: *args: a list of object with a obj.graph property. Raises: ValueError: if all the elements do not belong to the same graph. """ graph = None for i, sgv in enumerate(args): if graph is None and sgv.graph is not None: graph = sgv.graph elif sgv.graph is not None and sgv.graph is not graph: raise ValueError("Argument[{}]: Wrong graph!".format(i)) def get_unique_graph(tops, check_types=None, none_if_empty=False): """Return the unique graph used by the all the elements in tops. Args: tops: list of elements to check (usually a list of tf.Operation and/or tf.Tensor). Or a tf.Graph. check_types: check that the element in tops are of given type(s). If None, the types (tf.Operation, tf.Tensor) are used. none_if_empty: don't raise an error if tops is an empty list, just return None. Returns: The unique graph used by all the tops. Raises: TypeError: if tops is not a iterable of tf.Operation. ValueError: if the graph is not unique. """ if isinstance(tops, tf_ops.Graph): return tops if not is_iterable(tops): raise TypeError("{} is not iterable".format(type(tops))) if check_types is None: check_types = (tf_ops.Operation, tf_ops.Tensor) elif not is_iterable(check_types): check_types = (check_types,) g = None for op in tops: if not isinstance(op, check_types): raise TypeError("Expected a type in ({}), got: {}".format(", ".join([str( t) for t in check_types]), type(op))) if g is None: g = op.graph elif g is not op.graph: raise ValueError("Operation {} does not belong to given graph".format(op)) if g is None and not none_if_empty: raise ValueError("Can't find the unique graph of an empty list") return g def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False): """Convert ops to a list of `tf.Operation`. Args: ops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single operation. check_graph: if `True` check if all the operations belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ts: if True, silently ignore `tf.Tensor`. Returns: A newly created list of `tf.Operation`. Raises: TypeError: if ops cannot be converted to a list of `tf.Operation` or, if `check_graph` is `True`, if all the ops do not belong to the same graph. """ if isinstance(ops, tf_ops.Graph): if allow_graph: return ops.get_operations() else: raise TypeError("allow_graph is False: cannot convert a tf.Graph.") else: if not is_iterable(ops): ops = [ops] if not ops: return [] if check_graph: check_types = None if ignore_ts else tf_ops.Operation get_unique_graph(ops, check_types=check_types) return [op for op in ops if isinstance(op, tf_ops.Operation)] # TODO(fkp): move this function in tf.Graph? def get_tensors(graph): """get all the tensors which are input or output of an op in the graph. Args: graph: a `tf.Graph`. Returns: A list of `tf.Tensor`. Raises: TypeError: if graph is not a `tf.Graph`. """ if not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a graph, got: {}".format(type(graph))) ts = [] for op in graph.get_operations(): ts += op.outputs return ts def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False): """Convert ts to a list of `tf.Tensor`. Args: ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor. check_graph: if `True` check if all the tensors belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ops: if `True`, silently ignore `tf.Operation`. Returns: A newly created list of `tf.Tensor`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or, if `check_graph` is `True`, if all the ops do not belong to the same graph. """ if isinstance(ts, tf_ops.Graph): if allow_graph: return get_tensors(ts) else: raise TypeError("allow_graph is False: cannot convert a tf.Graph.") else: if not is_iterable(ts): ts = [ts] if not ts: return [] if check_graph: check_types = None if ignore_ops else tf_ops.Tensor get_unique_graph(ts, check_types=check_types) return [t for t in ts if isinstance(t, tf_ops.Tensor)] def get_generating_ops(ts): """Return all the generating ops of the tensors in `ts`. Args: ts: a list of `tf.Tensor` Returns: A list of all the generating `tf.Operation` of the tensors in `ts`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor`. """ ts = make_list_of_t(ts, allow_graph=False) return [t.op for t in ts] def get_consuming_ops(ts): """Return all the consuming ops of the tensors in ts. Args: ts: a list of `tf.Tensor` Returns: A list of all the consuming `tf.Operation` of the tensors in `ts`. Raises: TypeError: if ts cannot be converted to a list of `tf.Tensor`. """ ts = make_list_of_t(ts, allow_graph=False) ops = [] for t in ts: for op in t.consumers(): if op not in ops: ops.append(op) return ops class ControlOutputs(object): """The control outputs topology.""" def __init__(self, graph): """Create a dictionary of control-output dependencies. Args: graph: a `tf.Graph`. Returns: A dictionary where a key is a `tf.Operation` instance and the corresponding value is a list of all the ops which have the key as one of their control-input dependencies. Raises: TypeError: graph is not a `tf.Graph`. """ if not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a tf.Graph, got: {}".format(type(graph))) self._control_outputs = {} self._graph = graph self._version = None self._build() def update(self): """Update the control outputs if the graph has changed.""" if self._version != self._graph.version: self._build() return self def _build(self): """Build the control outputs dictionary.""" self._control_outputs.clear() ops = self._graph.get_operations() for op in ops: for control_input in op.control_inputs: if control_input not in self._control_outputs: self._control_outputs[control_input] = [] if op not in self._control_outputs[control_input]: self._control_outputs[control_input].append(op) self._version = self._graph.version def get_all(self): return self._control_outputs def get(self, op): """return the control outputs of op.""" if op in self._control_outputs: return self._control_outputs[op] else: return () @property def graph(self): return self._graph def scope_finalize(scope): if scope and scope[-1] != "/": scope += "/" return scope def scope_dirname(scope): slash = scope.rfind("/") if slash == -1: return "" return scope[:slash + 1] def scope_basename(scope): slash = scope.rfind("/") if slash == -1: return scope return scope[slash + 1:] def placeholder_name(t=None, scope=None): """Create placeholder name for the graph editor. Args: t: optional tensor on which the placeholder operation's name will be based on scope: absolute scope with which to prefix the placeholder's name. None means that the scope of t is preserved. "" means the root scope. Returns: A new placeholder name prefixed by "geph". Note that "geph" stands for Graph Editor PlaceHolder. This convention allows to quickly identify the placeholder generated by the Graph Editor. Raises: TypeError: if t is not None or a tf.Tensor. """ if scope is not None: scope = scope_finalize(scope) if t is not None: if not isinstance(t, tf_ops.Tensor): raise TypeError("Expected a tf.Tenfor, got: {}".format(type(t))) op_dirname = scope_dirname(t.op.name) op_basename = scope_basename(t.op.name) if scope is None: scope = op_dirname if op_basename.startswith("geph__"): ph_name = op_basename else: ph_name = "geph__{}_{}".format(op_basename, t.value_index) return scope + ph_name else: if scope is None: scope = "" return scope + "geph" def make_placeholder_from_tensor(t, scope=None): """Create a `tf.placeholder` for the Graph Editor. Note that the correct graph scope must be set by the calling function. Args: t: a `tf.Tensor` whose name will be used to create the placeholder (see function placeholder_name). scope: absolute scope within which to create the placeholder. None means that the scope of `t` is preserved. `""` means the root scope. Returns: A newly created `tf.placeholder`. Raises: TypeError: if `t` is not `None` or a `tf.Tensor`. """ return tf_array_ops.placeholder(dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(t, scope=scope)) def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None): """Create a tf.placeholder for the Graph Editor. Note that the correct graph scope must be set by the calling function. The placeholder is named using the function placeholder_name (with no tensor argument). Args: dtype: the tensor type. shape: the tensor shape (optional). scope: absolute scope within which to create the placeholder. None means that the scope of t is preserved. "" means the root scope. Returns: A newly created tf.placeholder. """ return tf_array_ops.placeholder(dtype=dtype, shape=shape, name=placeholder_name(scope=scope))
""" Copyright (c) 2016 Keith Sterling Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from programy.utils.parsing.linenumxml import LineNumberingParser import xml.etree.ElementTree as ET import yaml import json import logging from abc import ABCMeta, abstractmethod class BaseConfigurationData(object): __metaclass__ = ABCMeta def __init__(self, name): self.section_name = name def sub_bot_root(self, text, root): return text.replace('$BOT_ROOT', root) @abstractmethod def load_config_section(self, config_file, bot_root): """ Never Implemented """ class BrainFileConfiguration(object): def __init__(self, files, extension=".aiml", directories=False): self._files = files self._extension = extension self._directories = directories @property def files(self): return self._files @property def extension(self): return self._extension @property def directories(self): return self._directories class BrainServiceConfiguration(object): def __init__(self, name, data=None): self._name = name.upper() self._params = {} if data is not None: for key in data.keys(): self._params[key.upper()] = data[key] @property def name(self): return self._name @property def path(self): return self._params['PATH'] def parameters(self): return self._params.keys() def set_parameter(self, key, value): self._params[key] = value def parameter(self, name): if name in self._params: return self._params[name] else: return None class BrainConfiguration(BaseConfigurationData): def __init__(self): self._supress_warnings = False self._allow_system_aiml = True self._allow_learn_aiml = True self._allow_learnf_aiml = True self._aiml_files = None self._set_files = None self._map_files = None self._denormal = None self._normal = None self._gender = None self._person = None self._person2 = None self._predicates = None self._pronouns = None self._properties = None self._triples = None self._preprocessors = None self._postprocessors = None self._services = [] BaseConfigurationData.__init__(self, "brain") def _get_file_option(self, config_file, option_name, section, bot_root): option = config_file.get_option(option_name, section) if option is not None: option = self.sub_bot_root(option, bot_root) return option def _get_brain_file_configuration(self, config_file, section, bot_root): files = config_file.get_option("files", section) files = self.sub_bot_root(files, bot_root) extension = config_file.get_option("extension", section) directories = config_file.get_option("directories", section) return BrainFileConfiguration(files, extension, directories) def load_config_section(self, config_file, bot_root): brain = config_file.get_section(self.section_name) if brain is not None: self._supress_warnings = config_file.get_option("supress_warnings", brain, False) self._allow_system_aiml = config_file.get_option("allow_system_aiml", brain, False) self._allow_learn_aiml = config_file.get_option("allow_learn_aiml", brain, False) self._allow_learnf_aiml = config_file.get_option("allow_learnf_aiml", brain, False) files = config_file.get_section("files", brain) if files is not None: aiml = config_file.get_section("aiml", files) self._aiml_files = self._get_brain_file_configuration(config_file, aiml, bot_root) sets = config_file.get_section("sets", files) self._set_files = self._get_brain_file_configuration(config_file, sets, bot_root) maps = config_file.get_section("maps", files) self._map_files = self._get_brain_file_configuration(config_file, maps, bot_root) self._denormal = self._get_file_option(config_file, "denormal", files, bot_root) self._normal = self._get_file_option(config_file, "normal", files, bot_root) self._gender = self._get_file_option(config_file, "gender", files, bot_root) self._person = self._get_file_option(config_file, "person", files, bot_root) self._person2 = self._get_file_option(config_file, "person2", files, bot_root) self._predicates = self._get_file_option(config_file, "predicates", files, bot_root) self._pronouns = self._get_file_option(config_file, "pronouns", files, bot_root) self._properties = self._get_file_option(config_file, "properties", files, bot_root) self._triples = self._get_file_option(config_file, "triples", files, bot_root) self._preprocessors = self._get_file_option(config_file, "preprocessors", files, bot_root) self._postprocessors = self._get_file_option(config_file, "postprocessors", files, bot_root) else: logging.warning("Config section [files] missing from Brain") services = config_file.get_section("services", brain) if services is not None: service_keys = config_file.get_child_section_keys("services", brain) for name in service_keys: service_data = config_file.get_section_data(name, services) self._services.append(BrainServiceConfiguration(name, service_data)) else: logging.warning("Config section [services] missing from Brain") else: logging.warning("Config section [%s] missing", self.section_name) @property def supress_warnings(self): return self._supress_warnings @property def allow_system_aiml(self): return self._allow_system_aiml @property def allow_learn_aiml(self): return self._allow_learn_aiml @property def allow_learnf_aiml(self): return self._allow_learnf_aiml @property def aiml_files(self): return self._aiml_files @property def set_files(self): return self._set_files @property def map_files(self): return self._map_files @property def denormal(self): return self._denormal @property def normal(self): return self._normal @property def gender(self): return self._gender @property def person(self): return self._person @property def person2(self): return self._person2 @property def predicates(self): return self._predicates @property def pronouns(self): return self._pronouns @property def properties(self): return self._properties @property def triples(self): return self._triples @property def preprocessors(self): return self._preprocessors @property def postprocessors(self): return self._postprocessors @property def services(self): return self._services class BotConfiguration(BaseConfigurationData): def __init__(self): self.bot_root = "." self._prompt = ">>> " self._default_response = "Sorry, I don't have an answer for that right now" self._exit_response = "Bye!" self._initial_question = "Hello" self._override_predicates = True BaseConfigurationData.__init__(self, "bot") def load_config_section(self, config_file, bot_root): bot = config_file.get_section(self.section_name) if bot is not None: self._prompt = config_file.get_option("prompt", bot) self._default_response = config_file.get_option("default_response", bot) self._exit_response = config_file.get_option("exit_response", bot) self._initial_question = config_file.get_option("initial_question", bot) else: logging.warning("Config section [%s] missing", self.section_name) @property def prompt(self): return self._prompt @prompt.setter def prompt(self, text): self._prompt = text @property def default_response(self): return self._default_response @default_response.setter def default_response(self, text): self._default_response = text @property def exit_response(self): return self._exit_response @exit_response.setter def exit_response(self, text): self._exit_response = text @property def initial_question(self): return self._initial_question @initial_question.setter def initial_question(self, text): self._initial_question = text @property def override_predicates(self): return self._override_predicates @override_predicates.setter def override_predicates(self, override): self._override_predicates = override class RestConfiguration(BaseConfigurationData): def __init__(self): self._host = "0.0.0.0" self._port = 80 self._debug = False self._use_api_keys = False BaseConfigurationData.__init__(self, "rest") @property def host(self): return self._host @property def port(self): return self._port @property def debug(self): return self._debug @property def use_api_keys(self): return self._use_api_keys def load_config_section(self, config_file, bot_root): rest = config_file.get_section(self.section_name) if rest is not None: self._host = config_file.get_option("host", rest) self._port = config_file.get_option("port", rest) self._debug = config_file.get_bool_option("debug", rest) self._use_api_keys = config_file.get_bool_option("use_api_keys", rest) class ClientConfiguration(object): def __init__(self): self._brain_config = BrainConfiguration() self._bot_config = BotConfiguration() @property def brain_configuration(self): return self._brain_config @property def bot_configuration(self): return self._bot_config def load_config_data(self, config_file, bot_root): self._brain_config.load_config_section(config_file, bot_root) self._bot_config.load_config_section(config_file, bot_root) class RestClientConfiguration(ClientConfiguration): def __init__(self): ClientConfiguration.__init__(self) self._rest_config = RestConfiguration() @property def rest_configuration(self): return self._rest_config def load_config_data(self, config_file, bot_root): super(RestClientConfiguration, self).load_config_data(config_file, bot_root) self._rest_config.load_config_section(config_file, bot_root) class BaseConfigurationFile(object): __metaclass__ = ABCMeta def __init__(self, client_config): self.client_config = client_config @abstractmethod def load_from_text(self, text, bot_root): """ Never Implemented """ @abstractmethod def load_from_file(self, filename, bot_root): """ Never Implemented """ @abstractmethod def get_section(self, section_name, parent_section=None): """ Never Implemented """ @abstractmethod def get_section_data(self, section_name, parent_section=None): """ Never Implemented """ @abstractmethod def get_child_section_keys(self, section_name, parent_section=None): """ Never Implemented """ @abstractmethod #TODO option_name and section are the wrong way round to other function calls def get_option(self, option_name, section, missing_value=None): """ Never Implemented """ def _infer_type_from_string(self, text): if text == 'True' or text == 'true': return True elif text == 'False' or text == 'false': return False else: return text class YamlConfigurationFile(BaseConfigurationFile): def __init__(self, client_config): BaseConfigurationFile.__init__(self, client_config) self.yaml_data = None def load_from_text(self, text, bot_root): self.yaml_data = yaml.load(text) self.client_config.load_config_data(self, bot_root) def load_from_file(self, filename, bot_root): with open(filename, 'r+') as yml_data_file: self.yaml_data = yaml.load(yml_data_file) self.client_config.load_config_data(self, bot_root) def get_section(self, section_name, parent_section=None): if parent_section is None: if section_name in self.yaml_data: return self.yaml_data[section_name] else: if section_name in parent_section: return parent_section[section_name] return None def get_section_data(self, section_name, parent_section=None): return self.get_section(section_name, parent_section) def get_child_section_keys(self, section_name, parent_section=None): if parent_section is None: return self.yaml_data[section_name].keys() else: return parent_section[section_name].keys() def get_option(self, option_name, section, missing_value=None): if option_name in section: return section[option_name] else: logging.error("Missing value for [%s] in config section [%s], return default value %s", option_name, section, missing_value) return missing_value def get_bool_option(self, option_name, section, missing_value=False): if option_name in section: value = section[option_name] if isinstance(value, bool): return bool(value) else: raise Exception("Invalid boolean config value") else: logging.error("Missing value for [%s] in config section [%s], return default value %s", option_name, section, missing_value) return missing_value def get_int_option(self, option_name, section, missing_value=0): if option_name in section: value = section[option_name] if isinstance(value, int): return int(value) else: raise Exception("Invalid integer config value") else: logging.error("Missing value for [%s] in config section [%s], return default value %d", option_name, section, missing_value) return missing_value class JSONConfigurationFile(BaseConfigurationFile): def __init__(self, client_config): BaseConfigurationFile.__init__(self, client_config) self.json_data = None def load_from_text(self, text, bot_root): self.json_data = json.loads(text) self.client_config.load_config_data(self, bot_root) def load_from_file(self, filename, bot_root): with open(filename, 'r+') as json_data_file: self.json_data = json.load(json_data_file) self.client_config.load_config_data(self, bot_root) def get_section(self, section_name, parent_section=None): if parent_section is None: return self.json_data[section_name] else: return parent_section[section_name] def get_section_data(self, section_name, parent_section=None): return self.get_section(section_name, parent_section) def get_child_section_keys(self, section_name, parent_section=None): if parent_section is None: return self.json_data[section_name].keys() else: return parent_section[section_name].keys() def get_option(self, option_name, section, missing_value=None): if option_name in section: return section[option_name] else: logging.error("Missing value for [%s] in config section [%s], return default value %s", option_name, section, missing_value) return missing_value class XMLConfigurationFile(BaseConfigurationFile): def __init__(self, client_config): BaseConfigurationFile.__init__(self, client_config) self.xml_data = None def load_from_text(self, text, bot_root): tree = ET.fromstring(text) self.xml_data = tree self.client_config.load_config_data(self, bot_root) def load_from_file(self, filename, bot_root): with open(filename, 'r+') as xml_data_file: tree = ET.parse(xml_data_file, parser=LineNumberingParser()) self.xml_data = tree.getroot() self.client_config.load_config_data(self, bot_root) def get_section(self, section_name, parent_section=None): if parent_section is None: return self.xml_data.find(section_name) else: return parent_section.find(section_name) def get_section_data(self, section_name, parent_section=None): if parent_section is None: section = self.xml_data.find(section_name) else: section = parent_section.find(section_name) data = {} for child in section: data[child.tag] = child.text return data def get_child_section_keys(self, section_name, parent_section=None): keys = [] if parent_section is None: for child in self.xml_data.find(section_name): keys.append(child.tag) else: for child in parent_section.find(section_name): keys.append(child.tag) return keys def get_option(self, option_name, section, missing_value=None): child = section.find(option_name) if child is not None: return self._infer_type_from_string(child.text) else: logging.error("Missing value for [%s] in config section [%s], return default value %s", option_name, section, missing_value) return missing_value class ConfigurationFactory(object): @classmethod def load_configuration_from_file(cls, client_config, filename, file_format=None, bot_root="."): if file_format is None or len(file_format) == 0: file_format = ConfigurationFactory.guess_format_from_filename(filename) config_file = ConfigurationFactory.get_config_by_name(client_config, file_format) config_file.load_from_file(filename, bot_root) return config_file @classmethod def guess_format_from_filename(cls, filename): if "." not in filename: raise Exception("No file extension to allow format guessing!") last_dot = filename.rfind(".") file_format = filename[last_dot + 1:] return file_format @classmethod def get_config_by_name(cls, client_config, file_format): file_format = file_format.lower() if file_format == 'yaml': return YamlConfigurationFile(client_config) elif file_format == 'json': return JSONConfigurationFile(client_config) elif file_format == 'xml': return XMLConfigurationFile(client_config) else: raise Exception("Unsupported configuration format:", file_format)
from __future__ import print_function, division, absolute_import import subprocess import re from ..shell_integration import (BEFORE_PROMPT, AFTER_PROMPT, BEFORE_OUTPUT, AFTER_OUTPUT, readline_invisible) import IPython from IPython.testing.tools import get_ipython_cmd IPy5 = IPython.version_info >= (5,) def test_IPython(): ipython = get_ipython_cmd() SMM = b'\x1b[?1034h' commands = b"""\ 1 raise Exception undefined def f(): pass f() """ # First the control (without iterm2_tools) if IPy5: # Skip IPython >= 5 tests for now. I can't get pexpect tests to work. return import pexpect p = pexpect.spawn(' '.join(ipython + ['--quick', '--colors=NoColor', '--no-banner', '--no-simple-prompt', '--no-term-title', '--no-confirm-exit', '--no-color-info'])) p.delaybeforesend = 1 fout = open('mylog.txt','wb') p.logfile = fout # See prompt_toolkit/terminal/vt100_input.py and prompt_toolkit/terminal/vt100_output.py # prompt = ( # b'\r\n' # Newline # b'\x1b[?1l' # Put terminal in cursor mode # b'\x1b[6n' # Ask for cursor position report (CPR) # b'\x1b[?2004h' # Enable bracketed paste # b'\x1b[?25l' # Hide cursor # b'\x1b[?7l' # Disable autowrap # b'\x1b[0m' # Reset attributes # b'\x1b[0m' # Reset attributes # b'\x1b[J' # Erase down # b'\x1b[0m' # Reset attributes # b'In [' # (visible text) # b'\x1b[0m' # Reset attributes # b'1' # (visible text) # b'\x1b[0m' # Reset attributes # b']: ' # (visible text) # b'\x1b[8D' # ??? # b'\x1b[8C' # ??? # b'\x1b[?12l' # Stop blinking cursor # b'\x1b[?25h' # Show cursor # ) # p.expect_exact(prompt, timeout=10) # p.write(b'1\n') for command in commands.split(b'\n'): p.write(command + b'\n') p.sendeof() p.expect(pexpect.EOF, timeout=10) stdout, stderr = p.before, b'' try: assert not p.isalive() finally: p.terminate(force=True) else: p = subprocess.Popen(ipython + ['--quick', '--colors=NoColor', '--no-banner'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate(input=commands) # Different versions of readline do different things with the smm code. stdout = stdout.replace(SMM, b'').strip() # Exceptions are printed differently in Python 2 and 3 for some reason stdout = stdout.replace(b'-'*75, b'') stdout = stdout.replace(b' '*33 + b'Traceback', b'Traceback') expected42 = b"""\ In [1]: Out[1]: 1 In [2]: \n\ In [2]: \n\ ExceptionTraceback (most recent call last) <ipython-input-2-fca2ab0ca76b> in <module>() ----> 1 raise Exception Exception: \n\nIn [3]: \n\ NameErrorTraceback (most recent call last) <ipython-input-3-002bcaa7be0e> in <module>() ----> 1 undefined NameError: name 'undefined' is not defined In [4]: ...: ...: \n\ In [5]: \n\ In [6]: \n\ In [6]: \n\ Do you really want to exit ([y]/n)?\ """ expected5 = b"""\ In [1]: Out[1]: 1 In [2]: \n\ In [2]: \n\ ExceptionTraceback (most recent call last) <ipython-input-2-fca2ab0ca76b> in <module>() ----> 1 raise Exception Exception: \n\ In [3]: \n\ NameErrorTraceback (most recent call last) <ipython-input-3-002bcaa7be0e> in <module>() ----> 1 undefined NameError: name 'undefined' is not defined In [4]: ...: ...: \n\ In [5]: \n\ In [6]: \n\ In [6]: \n\ Do you really want to exit ([y]/n)?\ """ if IPython.version_info >= (5,): assert stdout == expected5 else: assert stdout == expected42 assert stderr == b'' # Now the same thing with iterm2_tools.ipython p = subprocess.Popen(ipython + ['--quick', '--colors=NoColor', '--no-banner', '--ext=iterm2_tools.ipython'] + (['--no-simple-prompt'] if IPython.version_info >= (5,) else []), stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) # Things of note here: # - There are 8 prompts (3 with no input, the second, sixth, and # seventh). The sixth is empty because f() returns None, and the eighth # is not empty because of the exit confirmation. # - The color codes are outside of the iterm2 codes. This is because of # the way IPython handles color codes. See the note in ipython.py. # - The D code (after_output) should always go right before the A code # (before_prompt). # - The fourth and fifth D code (after_output), corresponding to the third # and fourth prompt, should have D;1 (exceptions). The rest should have # D;0. # - The A, B, and D codes should be surrounded by \001 and \002 (C, # before_output) does not need it because it is not in the prompt. stdout, stderr = p.communicate(input=commands) # Different versions of readline do different things with the smm code. stdout = stdout.replace(SMM, b'').strip() # Exceptions are printed differently in Python 2 and 3 for some reason stdout = stdout.replace(b'-'*75, b'') stdout = stdout.replace(b' '*33 + b'Traceback', b'Traceback') # Note: this test will fail in versions of IPython < 4.1.0 because of a # bug. See https://github.com/ipython/ipython/issues/8724 and # https://github.com/ipython/ipython/pull/8738. expected42 = b"""\ \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [1]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07Out[1]: 1 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [2]: \x01\x1b]133;B\x07\x02 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [2]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 ExceptionTraceback (most recent call last) <ipython-input-2-fca2ab0ca76b> in <module>() ----> 1 raise Exception Exception: \n\n\x01\x1b]133;D;1\x07\x02\x01\x1b]133;A\x07\x02In [3]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 NameErrorTraceback (most recent call last) <ipython-input-3-002bcaa7be0e> in <module>() ----> 1 undefined NameError: name 'undefined' is not defined \x01\x1b]133;D;1\x07\x02\x01\x1b]133;A\x07\x02In [4]: \x01\x1b]133;B\x07\x02 ...: ...: \x1b]133;C\x07 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [5]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [6]: \x01\x1b]133;B\x07\x02 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [6]: \x01\x1b]133;B\x07\x02 Do you really want to exit ([y]/n)?\ """ expected5 = b"""\ \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [1]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07Out[1]: 1 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [2]: \x01\x1b]133;B\x07\x02 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [2]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 ExceptionTraceback (most recent call last) <ipython-input-2-fca2ab0ca76b> in <module>() ----> 1 raise Exception Exception: \n\ \x01\x1b]133;D;1\x07\x02\x01\x1b]133;A\x07\x02In [3]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 NameErrorTraceback (most recent call last) <ipython-input-3-002bcaa7be0e> in <module>() ----> 1 undefined NameError: name 'undefined' is not defined \x01\x1b]133;D;1\x07\x02\x01\x1b]133;A\x07\x02In [4]: \x01\x1b]133;B\x07\x02 ...: ...: \x1b]133;C\x07 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [5]: \x01\x1b]133;B\x07\x02\x1b]133;C\x07 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [6]: \x01\x1b]133;B\x07\x02 \x01\x1b]133;D;0\x07\x02\x01\x1b]133;A\x07\x02In [6]: \x01\x1b]133;B\x07\x02 Do you really want to exit ([y]/n)?\ """ if IPython.version_info >= (5,): assert stdout == expected5 else: assert stdout == expected42 assert stderr == b'' # Ideally all the codes would be bytes in Python 3, but bytes don't have a # format (even in Python 3.5). stdout = stdout.decode('ascii') AFTER_OUTPUT0 = AFTER_OUTPUT.format(command_status=0) AFTER_OUTPUT1 = AFTER_OUTPUT.format(command_status=1) assert (stdout.count(AFTER_OUTPUT0) == stdout.count(readline_invisible(AFTER_OUTPUT0)) == stdout.count(readline_invisible(AFTER_OUTPUT0) + readline_invisible(BEFORE_PROMPT)) == 6) assert (stdout.count(AFTER_OUTPUT1) == stdout.count(readline_invisible(AFTER_OUTPUT1)) == stdout.count(readline_invisible(AFTER_OUTPUT1) + readline_invisible(BEFORE_PROMPT)) == 2) assert (stdout.count(BEFORE_PROMPT) == stdout.count(readline_invisible(BEFORE_PROMPT)) == 8) assert (stdout.count(AFTER_PROMPT) == stdout.count(readline_invisible(AFTER_PROMPT)) == 8) assert stdout.count(BEFORE_OUTPUT) == 5 assert stdout.count(readline_invisible(BEFORE_OUTPUT)) == 0 AFTER_OUTPUT_RE = re.compile(re.escape(AFTER_OUTPUT.format(command_status='DUMMY')).replace("DUMMY", r'\d')) assert re.findall(AFTER_OUTPUT_RE, stdout) == [ AFTER_OUTPUT0, AFTER_OUTPUT0, AFTER_OUTPUT0, AFTER_OUTPUT1, AFTER_OUTPUT1, AFTER_OUTPUT0, AFTER_OUTPUT0, AFTER_OUTPUT0, ] AFTER_PROMPT_RE = re.escape(AFTER_PROMPT) BEFORE_OUTPUT_RE = re.escape(BEFORE_OUTPUT) AFTER_PROMPT_OR_BEFORE_OUTPUT_RE = re.compile('(%s|%s)' % (AFTER_PROMPT_RE, BEFORE_OUTPUT_RE)) assert re.findall(AFTER_PROMPT_OR_BEFORE_OUTPUT_RE, stdout) == [ AFTER_PROMPT, BEFORE_OUTPUT, # non-empty prompt AFTER_PROMPT, # empty prompt AFTER_PROMPT, BEFORE_OUTPUT, AFTER_PROMPT, BEFORE_OUTPUT, AFTER_PROMPT, BEFORE_OUTPUT, AFTER_PROMPT, BEFORE_OUTPUT, AFTER_PROMPT, AFTER_PROMPT, ]
import pandas as pd s = pd.Series([0, 1, 2], index=['a', 'b', 'c']) print(s) # a 0 # b 1 # c 2 # dtype: int64 df = pd.DataFrame(s) print(df) # 0 # a 0 # b 1 # c 2 print(type(df)) # <class 'pandas.core.frame.DataFrame'> df_ = pd.DataFrame([s]) print(df_) # a b c # 0 0 1 2 print(type(df_)) # <class 'pandas.core.frame.DataFrame'> s_name = pd.Series([0, 1, 2], index=['a', 'b', 'c'], name='X') print(s_name) # a 0 # b 1 # c 2 # Name: X, dtype: int64 print(pd.DataFrame(s_name)) # X # a 0 # b 1 # c 2 print(pd.DataFrame([s_name])) # a b c # X 0 1 2 s1 = pd.Series([0, 1, 2], index=['a', 'b', 'c']) print(s1) # a 0 # b 1 # c 2 # dtype: int64 s2 = pd.Series([0.0, 0.1, 0.2], index=['a', 'b', 'c']) print(s2) # a 0.0 # b 0.1 # c 0.2 # dtype: float64 print(pd.DataFrame({'col0': s1, 'col1': s2})) # col0 col1 # a 0 0.0 # b 1 0.1 # c 2 0.2 print(pd.DataFrame([s1, s2])) # a b c # 0 0.0 1.0 2.0 # 1 0.0 0.1 0.2 print(pd.concat([s1, s2], axis=1)) # 0 1 # a 0 0.0 # b 1 0.1 # c 2 0.2 s1_name = pd.Series([0, 1, 2], index=['a', 'b', 'c'], name='X') print(s1_name) # a 0 # b 1 # c 2 # Name: X, dtype: int64 s2_name = pd.Series([0.0, 0.1, 0.2], index=['a', 'b', 'c'], name='Y') print(s2_name) # a 0.0 # b 0.1 # c 0.2 # Name: Y, dtype: float64 print(pd.DataFrame({s1_name.name: s1_name, s2_name.name: s2_name})) # X Y # a 0 0.0 # b 1 0.1 # c 2 0.2 print(pd.DataFrame([s1_name, s2_name])) # a b c # X 0.0 1.0 2.0 # Y 0.0 0.1 0.2 print(pd.concat([s1_name, s2_name], axis=1)) # X Y # a 0 0.0 # b 1 0.1 # c 2 0.2 s3 = pd.Series([0.1, 0.2, 0.3], index=['b', 'c', 'd']) print(s3) # b 0.1 # c 0.2 # d 0.3 # dtype: float64 print(pd.DataFrame({'col0': s1, 'col1': s3})) # col0 col1 # a 0.0 NaN # b 1.0 0.1 # c 2.0 0.2 # d NaN 0.3 print(pd.DataFrame([s1, s3])) # a b c d # 0 0.0 1.0 2.0 NaN # 1 NaN 0.1 0.2 0.3 print(pd.concat([s1, s3], axis=1)) # 0 1 # a 0.0 NaN # b 1.0 0.1 # c 2.0 0.2 # d NaN 0.3 # # /usr/local/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version # of pandas will change to not sort by default. # # To accept the future behavior, pass 'sort=False'. # # To retain the current behavior and silence the warning, pass 'sort=True'. # # """Entry point for launching an IPython kernel. print(pd.concat([s1, s3], axis=1, join='inner')) # 0 1 # b 1 0.1 # c 2 0.2 print(s1.values) # [0 1 2] print(type(s1.values)) # <class 'numpy.ndarray'> print(pd.DataFrame({'col0': s1.values, 'col1': s3.values})) # col0 col1 # 0 0 0.1 # 1 1 0.2 # 2 2 0.3 print(pd.DataFrame([s1.values, s3.values])) # 0 1 2 # 0 0.0 1.0 2.0 # 1 0.1 0.2 0.3 # print(pd.concat([s1.values, s3.values], axis=1)) # TypeError: cannot concatenate object of type '<class 'numpy.ndarray'>'; only Series and DataFrame objs are valid print(pd.DataFrame({'col0': s1, 'col1': s3.values})) # col0 col1 # a 0 0.1 # b 1 0.2 # c 2 0.3 print(pd.DataFrame([s1, s3.values])) # a b c # 0 0.0 1.0 2.0 # 1 NaN NaN NaN print(pd.DataFrame({'col0': s1.values, 'col1': s3.values}, index=s1.index)) # col0 col1 # a 0 0.1 # b 1 0.2 # c 2 0.3 print(pd.DataFrame([s1.values, s3.values], columns=s1.index)) # a b c # 0 0.0 1.0 2.0 # 1 0.1 0.2 0.3 s4 = pd.Series([0.1, 0.2], index=['b', 'd']) print(s4) # b 0.1 # d 0.2 # dtype: float64 print(pd.DataFrame({'col0': s1, 'col1': s4})) # col0 col1 # a 0.0 NaN # b 1.0 0.1 # c 2.0 NaN # d NaN 0.2 print(pd.DataFrame([s1, s4])) # a b c d # 0 0.0 1.0 2.0 NaN # 1 NaN 0.1 NaN 0.2 print(pd.concat([s1, s4], axis=1, join='inner')) # 0 1 # b 1 0.1 # print(pd.DataFrame({'col0': s1.values, 'col1': s4.values})) # ValueError: arrays must all be same length print(pd.DataFrame([s1.values, s4.values])) # 0 1 2 # 0 0.0 1.0 2.0 # 1 0.1 0.2 NaN s4.index = ['a', 'b'] print(s4) # a 0.1 # b 0.2 # dtype: float64 print(pd.DataFrame({'col0': s1, 'col1': s4})) # col0 col1 # a 0 0.1 # b 1 0.2 # c 2 NaN print(pd.DataFrame({'col0': s1, 'col1': s4}).fillna(100)) # col0 col1 # a 0 0.1 # b 1 0.2 # c 2 100.0 print(s) # a 0 # b 1 # c 2 # dtype: int64 df = pd.DataFrame(s) print(df) # 0 # a 0 # b 1 # c 2 s[0] = 100 print(s) # a 100 # b 1 # c 2 # dtype: int64 print(df) # 0 # a 100 # b 1 # c 2 df_copy = pd.DataFrame(s, copy=True) print(df_copy) # 0 # a 100 # b 1 # c 2 s[1] = 100 print(s) # a 100 # b 100 # c 2 # dtype: int64 print(df_copy) # 0 # a 100 # b 1 # c 2 df_c = pd.concat([s1, s2], axis=1) print(df_c) # 0 1 # a 0 0.0 # b 1 0.1 # c 2 0.2 s1[0] = 100 print(s1) # a 100 # b 1 # c 2 # dtype: int64 print(df_c) # 0 1 # a 0 0.0 # b 1 0.1 # c 2 0.2 df_c_false = pd.concat([s1, s2], axis=1, copy=False) print(df_c_false) # 0 1 # a 100 0.0 # b 1 0.1 # c 2 0.2 s1[1] = 100 print(s1) # a 100 # b 100 # c 2 # dtype: int64 print(df_c_false) # 0 1 # a 100 0.0 # b 1 0.1 # c 2 0.2
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Momentum.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.python.platform import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class MomentumOptimizerTest(tf.test.TestCase): def testBasic(self): with self.test_session(): var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([3.0, 4.0]) grads0 = tf.constant([0.1, 0.1]) grads1 = tf.constant([0.01, 0.01]) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0.1, 0.1]), slot0.eval()) self.assertAllClose(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllClose(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def testTensorLearningRateAndMomentum(self): with self.test_session(): var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([3.0, 4.0]) grads0 = tf.constant([0.1, 0.1]) grads1 = tf.constant([0.01, 0.01]) mom_opt = tf.train.MomentumOptimizer( learning_rate=tf.constant(2.0), momentum=tf.constant(0.9)) mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0.1, 0.1]), slot0.eval()) self.assertAllClose(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllClose(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def testFloat64(self): with self.test_session(): opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) # compute_gradients. values = [1.0, 3.0] good_vars = [tf.Variable([v]) for v in values] bad_loss = tf.constant(2.0, tf.float64, name="bad_loss") self.assertRaisesRegexp( ValueError, r"Invalid type.*float64.*bad_loss.*expected.*float32", opt.compute_gradients, bad_loss, good_vars) bad_vars = [ tf.Variable(np.array([v], np.float64), name="bad_var") for v in values] self.assertRaisesRegexp( ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32", opt.compute_gradients, tf.cast(bad_vars[0] + bad_vars[1], tf.float32), bad_vars) opt.compute_gradients(good_vars[0] + good_vars[1], good_vars) # apply_gradients. bad_grads = [ tf.constant([0.1], dtype=np.float64, name="bad_grad"), tf.constant([0.01])] self.assertRaisesRegexp( ValueError, r"Invalid type.*float64.*bad_grad.*expected.*float32", opt.apply_gradients, zip(bad_grads, good_vars)) good_grads = [tf.constant([0.01]), tf.constant([0.02])] self.assertRaisesRegexp( ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32", opt.apply_gradients, zip(good_grads, bad_vars)) opt.apply_gradients(zip(good_grads, good_vars)) def _dbParamsMom01(self): """Return dist-belief momentum values. Return values been generated from the dist-belief momentum unittest, running with a learning rate of 0.1 and a momemntum of 0.1. These values record how a parameter vector of size 10, initialized with 0.0, gets updated with 10 consecutive momentum steps. It uses random gradients. Returns: db_grad: The gradients to apply db_out: The parameters after the momentum update. """ db_grad = [[]] * 10 db_out = [[]] * 10 # pylint: disable=line-too-long db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615] db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618] db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521] db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544] db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311] db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189] db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648] db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303] db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287] db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544] db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229] db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717] db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251] db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997] db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922] db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418] db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227] db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781] db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711] db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295] # pylint: enable=line-too-long return db_grad, db_out def testLikeDistBeliefMom01(self): with self.test_session(): db_grad, db_out = self._dbParamsMom01() num_samples = len(db_grad) var0 = tf.Variable([0.0] * num_samples) grads0 = tf.constant([0.0] * num_samples) mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1) mom_update = mom_opt.apply_gradients(zip([grads0], [var0])) tf.initialize_all_variables().run() for i in xrange(num_samples): mom_update.run(feed_dict={grads0: db_grad[i]}) self.assertAllClose(np.array(db_out[i]), var0.eval()) def testSparse(self): with self.test_session(): var0 = tf.Variable(tf.zeros([4, 2])) var1 = tf.Variable( tf.constant(1.0, tf.float32, [4, 2])) grads0 = tf.IndexedSlices(tf.constant([[.1, .1]]), tf.constant([1]), tf.constant([4, 2])) grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]]), tf.constant([2, 3]), tf.constant([4, 2])) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([0, 0], var0.eval()[0]) self.assertAllClose([0, 0], var0.eval()[1]) self.assertAllClose([1, 1], var1.eval()[2]) # Step 1: the momentum accumulators are 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), slot0.eval()[0]) self.assertAllClose(np.array([.1, .1]), slot0.eval()[1]) self.assertAllClose(np.array([.01, .01]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), var0.eval()[0]) self.assertAllClose(np.array([- (0.1 * 2.0), - (0.1 * 2.0)]), var0.eval()[1]) self.assertAllClose(np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2]) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), slot0.eval()[0]) self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1]) self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), var0.eval()[0]) self.assertAllClose( np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()[1]) self.assertAllClose(np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()[2]) def testSharing(self): with self.test_session(): var0 = tf.Variable([1.0, 2.0]) var1 = tf.Variable([3.0, 4.0]) grads0 = tf.constant([0.1, 0.1]) grads1 = tf.constant([0.01, 0.01]) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update1 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update1.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0.1, 0.1]), slot0.eval()) self.assertAllClose(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllClose(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the second momentum accumulators contain the previous update. mom_update2.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllClose( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) if __name__ == "__main__": tf.test.main()
# Copyright 2022 The MT3 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset configurations.""" import dataclasses from typing import Mapping, Sequence, Union from mt3 import note_sequences import tensorflow as tf @dataclasses.dataclass class InferEvalSplit: # key in dictionary containing all dataset splits name: str # task name suffix (each eval split is a separate task) suffix: str # whether or not to include in the mixture of all eval tasks include_in_mixture: bool = True @dataclasses.dataclass class DatasetConfig: """Configuration for a transcription dataset.""" # dataset name name: str # mapping from split name to path paths: Mapping[str, str] # mapping from feature name to feature features: Mapping[str, Union[tf.io.FixedLenFeature, tf.io.FixedLenSequenceFeature]] # training split name train_split: str # training eval split name train_eval_split: str # list of infer eval split specs infer_eval_splits: Sequence[InferEvalSplit] # list of track specs to be used for metrics track_specs: Sequence[note_sequences.TrackSpec] = dataclasses.field( default_factory=list) MAESTROV1_CONFIG = DatasetConfig( name='maestrov1', paths={ 'train': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-?????-of-00010', 'train_subset': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-00002-of-00010', 'validation': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-?????-of-00010', 'validation_subset': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-0000[06]-of-00010', 'test': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_test.tfrecord-?????-of-00010' }, features={ 'audio': tf.io.FixedLenFeature([], dtype=tf.string), 'sequence': tf.io.FixedLenFeature([], dtype=tf.string), 'id': tf.io.FixedLenFeature([], dtype=tf.string) }, train_split='train', train_eval_split='validation_subset', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train_full', include_in_mixture=False), InferEvalSplit(name='train_subset', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation_full', include_in_mixture=False), InferEvalSplit(name='validation_subset', suffix='validation'), InferEvalSplit(name='test', suffix='test', include_in_mixture=False) ]) MAESTROV3_CONFIG = DatasetConfig( name='maestrov3', paths={ 'train': 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-?????-of-00025', 'train_subset': 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-00004-of-00025', 'validation': 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-?????-of-00025', 'validation_subset': 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-0002?-of-00025', 'test': 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_test.tfrecord-?????-of-00025' }, features={ 'audio': tf.io.FixedLenFeature([], dtype=tf.string), 'sequence': tf.io.FixedLenFeature([], dtype=tf.string), 'id': tf.io.FixedLenFeature([], dtype=tf.string) }, train_split='train', train_eval_split='validation_subset', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train_full', include_in_mixture=False), InferEvalSplit(name='train_subset', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation_full', include_in_mixture=False), InferEvalSplit(name='validation_subset', suffix='validation'), InferEvalSplit(name='test', suffix='test', include_in_mixture=False) ]) GUITARSET_CONFIG = DatasetConfig( name='guitarset', paths={ 'train': 'gs://mt3/data/datasets/guitarset/train.tfrecord-?????-of-00019', 'validation': 'gs://mt3/data/datasets/guitarset/validation.tfrecord-?????-of-00006', }, features={ 'sequence': tf.io.FixedLenFeature([], dtype=tf.string), 'audio': tf.io.FixedLenFeature([], dtype=tf.string), 'velocity_range': tf.io.FixedLenFeature([], dtype=tf.string), 'id': tf.io.FixedLenFeature([], dtype=tf.string), }, train_split='train', train_eval_split='validation', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation'), ]) URMP_CONFIG = DatasetConfig( name='urmp', paths={ 'train': 'gs://mt3/data/datasets/urmp/train.tfrecord', 'validation': 'gs://mt3/data/datasets/urmp/validation.tfrecord', }, features={ 'id': tf.io.FixedLenFeature([], dtype=tf.string), 'tracks': tf.io.FixedLenSequenceFeature( [], dtype=tf.int64, allow_missing=True), 'inst_names': tf.io.FixedLenSequenceFeature( [], dtype=tf.string, allow_missing=True), 'audio': tf.io.FixedLenFeature([], dtype=tf.string), 'sequence': tf.io.FixedLenFeature([], dtype=tf.string), 'instrument_sequences': tf.io.FixedLenSequenceFeature( [], dtype=tf.string, allow_missing=True), }, train_split='train', train_eval_split='validation', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation') ]) MUSICNET_CONFIG = DatasetConfig( name='musicnet', paths={ 'train': 'gs://mt3/data/datasets/musicnet/musicnet-train.tfrecord-?????-of-00036', 'validation': 'gs://mt3/data/datasets/musicnet/musicnet-validation.tfrecord-?????-of-00005', 'test': 'gs://mt3/data/datasets/musicnet/musicnet-test.tfrecord-?????-of-00003' }, features={ 'id': tf.io.FixedLenFeature([], dtype=tf.string), 'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32), 'audio': tf.io.FixedLenSequenceFeature( [], dtype=tf.float32, allow_missing=True), 'sequence': tf.io.FixedLenFeature([], dtype=tf.string) }, train_split='train', train_eval_split='validation', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation'), InferEvalSplit(name='test', suffix='test', include_in_mixture=False) ]) CERBERUS4_CONFIG = DatasetConfig( name='cerberus4', paths={ 'train': 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-?????-of-00286', 'train_subset': 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-00000-of-00286', 'validation': 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-?????-of-00212', 'validation_subset': 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-0000?-of-00212', 'test': 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_test_bass:drums:guitar:piano.tfrecord-?????-of-00106' }, features={ 'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64), 'inst_names': tf.io.FixedLenSequenceFeature( [], dtype=tf.string, allow_missing=True), 'midi_class': tf.io.FixedLenSequenceFeature( [], dtype=tf.int64, allow_missing=True), 'mix': tf.io.FixedLenSequenceFeature( [], dtype=tf.float32, allow_missing=True), 'note_sequences': tf.io.FixedLenSequenceFeature( [], dtype=tf.string, allow_missing=True), 'plugin_name': tf.io.FixedLenSequenceFeature( [], dtype=tf.int64, allow_missing=True), 'program_num': tf.io.FixedLenSequenceFeature( [], dtype=tf.int64, allow_missing=True), 'slakh_class': tf.io.FixedLenSequenceFeature( [], dtype=tf.int64, allow_missing=True), 'src_ids': tf.io.FixedLenSequenceFeature( [], dtype=tf.string, allow_missing=True), 'stems': tf.io.FixedLenSequenceFeature( [], dtype=tf.float32, allow_missing=True), 'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64), 'target_type': tf.io.FixedLenFeature([], dtype=tf.string), 'track_id': tf.io.FixedLenFeature([], dtype=tf.string), }, train_split='train', train_eval_split='validation_subset', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train_full', include_in_mixture=False), InferEvalSplit(name='train_subset', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation_full', include_in_mixture=False), InferEvalSplit(name='validation_subset', suffix='validation'), InferEvalSplit(name='test', suffix='test', include_in_mixture=False) ], track_specs=[ note_sequences.TrackSpec('bass', program=32), note_sequences.TrackSpec('drums', is_drum=True), note_sequences.TrackSpec('guitar', program=24), note_sequences.TrackSpec('piano', program=0) ]) SLAKH_CONFIG = DatasetConfig( name='slakh', paths={ 'train': 'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-?????-of-02307', 'train_subset': 'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-00000-of-02307', 'validation': 'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-?????-of-00168', 'validation_subset': 'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-0000?-of-00168', 'test': 'gs://mt3/data/datasets/slakh/slakh_multi_full_test_all_inst.tfrecord-?????-of-00109' }, features={ 'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64), 'inst_names': tf.io.FixedLenSequenceFeature([], dtype=tf.string, allow_missing=True), 'midi_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True), 'mix': tf.io.FixedLenSequenceFeature([], dtype=tf.float32, allow_missing=True), 'note_sequences': tf.io.FixedLenSequenceFeature([], dtype=tf.string, allow_missing=True), 'plugin_name': tf.io.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True), 'program_num': tf.io.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True), 'slakh_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True), 'src_ids': tf.io.FixedLenSequenceFeature([], dtype=tf.string, allow_missing=True), 'stems': tf.io.FixedLenSequenceFeature([], dtype=tf.float32, allow_missing=True), 'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64), 'target_type': tf.io.FixedLenFeature([], dtype=tf.string), 'track_id': tf.io.FixedLenFeature([], dtype=tf.string), }, train_split='train', train_eval_split='validation_subset', infer_eval_splits=[ InferEvalSplit(name='train', suffix='eval_train_full', include_in_mixture=False), InferEvalSplit(name='train_subset', suffix='eval_train'), InferEvalSplit(name='validation', suffix='validation_full', include_in_mixture=False), InferEvalSplit(name='validation_subset', suffix='validation'), InferEvalSplit(name='test', suffix='test', include_in_mixture=False) ])
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import datetime from frappe import _ import frappe import frappe.database import frappe.utils import frappe.utils.user from frappe import conf from frappe.sessions import Session, clear_sessions, delete_session from frappe.modules.patch_handler import check_session_stopped from urllib import quote class HTTPRequest: def __init__(self): # Get Environment variables self.domain = frappe.request.host if self.domain and self.domain.startswith('www.'): self.domain = self.domain[4:] frappe.local.request_ip = frappe.get_request_header('REMOTE_ADDR') \ or frappe.get_request_header('X-Forwarded-For') or '127.0.0.1' # language self.set_lang(frappe.get_request_header('HTTP_ACCEPT_LANGUAGE')) # load cookies frappe.local.cookie_manager = CookieManager() # override request method. All request to be of type POST, but if _type == "POST" then commit if frappe.form_dict.get("_type"): frappe.local.request_method = frappe.form_dict.get("_type") del frappe.form_dict["_type"] # set db self.connect() # login frappe.local.login_manager = LoginManager() # write out latest cookies frappe.local.cookie_manager.init_cookies() # check status check_session_stopped() # load user self.setup_user() # run login triggers if frappe.form_dict.get('cmd')=='login': frappe.local.login_manager.run_trigger('on_session_creation') def set_lang(self, lang): from frappe.translate import guess_language_from_http_header frappe.local.lang = guess_language_from_http_header(lang) def setup_user(self): frappe.local.user = frappe.utils.user.User() def get_db_name(self): """get database name from conf""" return conf.db_name def connect(self, ac_name = None): """connect to db, from ac_name or db_name""" frappe.local.db = frappe.database.Database(user = self.get_db_name(), \ password = getattr(conf,'db_password', '')) class LoginManager: def __init__(self): self.user = None if frappe.local.form_dict.get('cmd')=='login' or frappe.local.request.path=="/api/method/login": self.login() else: self.make_session(resume=True) def login(self): # clear cache frappe.clear_cache(user = frappe.form_dict.get('usr')) self.authenticate() self.post_login() def is_valid_user(self, user_info): login_as = frappe.form_dict.get("login_as") if user_info.access_type != login_as: frappe.throw(_("<center><b>%s</b> can not login as <b>%s</b></center>"%(self.user,login_as)), frappe.AuthenticationError) def post_login(self): self.run_trigger('on_login') self.validate_ip_address() self.validate_hour() self.make_session() # self.is_valid_user() self.set_user_info() def set_user_info(self): frappe.local.cookie_manager.init_cookies() info = frappe.db.get_value("User", self.user, ["user_type", "first_name", "last_name", "user_image","access_type","profile_id"], as_dict=1) vd=frappe.db.get_value("Verification Details",{"email":self.user},["mflag","name","verification_for"],as_dict=1) if info.user_type == "Website User": # check for valid user login details, provider can not login from patient login form and vice-versa # self.is_valid_user(info) # anand frappe.local.response["mob_v_req"] = 'No' # if vd and vd.mflag == 0 and info.access_type == "Patient": # frappe.local.response["mob_v_req"] = 'Yes' # frappe.local.cookie_manager.set_cookie("system_user", "no") # frappe.local.response["message"] = "No App" # if info.access_type == 'Patient': # frappe.local.response["access_link"] = "/patient" # frappe.local.cookie_manager.set_cookie("user_type","patient") # elif info.access_type == 'Provider': # frappe.local.response["access_link"] = "/provider" # frappe.local.cookie_manager.set_cookie("user_type","provider") # # # check if provider is verified or not # # is_verified = "No" # # if vd.verification_for == "Provider": # # is_verified = "Yes" if frappe.db.get_value("Provider",{"provider_id":vd.name}, "is_verified") else "No" # # frappe.local.response["is_provider_verified"] = is_verified frappe.local.response["mob_v_req"] = 'No' if vd and vd.mflag == 0 and info.access_type == "Patient": frappe.local.response["mob_v_req"] = 'Yes' frappe.local.cookie_manager.set_cookie("system_user", "no") frappe.local.response["message"] = "No App" if info.access_type == 'Patient': frappe.local.response["access_link"] = "/patient" frappe.local.response["access_role"] = "Patient" frappe.local.cookie_manager.set_cookie("user_type","patient") elif info.access_type == 'Provider': frappe.local.response["access_link"] = "/provider" frappe.local.response["access_role"] = "Provider" frappe.local.cookie_manager.set_cookie("user_type","provider") elif info.access_type == 'Lab': frappe.local.response["access_link"] = "/lab" frappe.local.response["access_role"] = "Lab" frappe.local.cookie_manager.set_cookie("user_type","lab") elif info.access_type == 'Delivery Boy': frappe.local.response["access_link"] = "/delivery_boy" frappe.local.response["access_role"] = "Delivery Boy" frappe.local.cookie_manager.set_cookie("user_type","delivery_boy") db_parent = self.get_db_parent(info.profile_id) if db_parent == "Medical Store": frappe.local.response["check_for_status"] = "Waiting For Patients Confirmation" if db_parent == "Stockist": frappe.local.response["access_link"] = "Waiting For Chemist Confirmation" elif info.access_type == 'Chemist': frappe.local.response["access_role"] = "Chemist" frappe.response["name"] = frappe.db.get_value("Medical Store", {"store_id": info.profile_id}, "name") elif info.access_type == 'Stockist': frappe.local.response["access_role"] = "Stockist" frappe.local.response["name"] = frappe.db.get_value("Stockist", {"stockist_id": info.profile_id}, "name") elif info.access_type == 'Admin': frappe.local.response["access_link"] = "/products" else: frappe.local.cookie_manager.set_cookie("system_user", "yes") frappe.local.response['message'] = 'Logged In' full_name = " ".join(filter(None, [info.first_name, info.last_name])) frappe.response["full_name"] = full_name frappe.local.cookie_manager.set_cookie("full_name", full_name) frappe.local.cookie_manager.set_cookie("user_id", self.user) if vd: frappe.response["profile_id"] = vd.get('name') frappe.local.cookie_manager.set_cookie("profile_id", vd.name) elif info.profile_id: frappe.response["profile_id"] = info.profile_id frappe.local.cookie_manager.set_cookie("profile_id", info.profile_id) frappe.local.cookie_manager.set_cookie("user_image", info.user_image or "") def get_db_parent(Self, profile_id): return frappe.get_doc("Chemist Delivery Team", {"provider_id": profile_id}).parenttype def make_session(self, resume=False): # start session frappe.local.session_obj = Session(user=self.user, resume=resume) # reset user if changed to Guest self.user = frappe.local.session_obj.user print ["data", self.user] frappe.local.session = frappe.local.session_obj.data def authenticate(self, user=None, pwd=None): print ["data",user, pwd] if not (user and pwd): user, pwd = frappe.form_dict.get('usr'), frappe.form_dict.get('pwd') if not (user and pwd): self.fail('Incomplete login details') print user self.check_if_enabled(user) self.user = self.check_password(user, pwd) def check_if_enabled(self, user): """raise exception if user not enabled""" from frappe.utils import cint if user=='Administrator': return if not cint(frappe.db.get_value('User', user, 'enabled')): self.fail('User disabled or missing') def check_password(self, user, pwd): """check password""" user = frappe.db.sql("""select `user` from __Auth where `user`=%s and `password`=password(%s)""", (user, pwd)) if not user: self.fail('Incorrect password') else: return user[0][0] # in correct case def fail(self, message): frappe.local.response['message'] = message raise frappe.AuthenticationError def run_trigger(self, event='on_login'): for method in frappe.get_hooks().get(event, []): frappe.call(frappe.get_attr(method), login_manager=self) def validate_ip_address(self): """check if IP Address is valid""" ip_list = frappe.db.get_value('User', self.user, 'restrict_ip', ignore=True) if not ip_list: return ip_list = ip_list.replace(",", "\n").split('\n') ip_list = [i.strip() for i in ip_list] for ip in ip_list: if frappe.local.request_ip.startswith(ip): return frappe.throw(_("Not allowed from this IP Address"), frappe.AuthenticationError) def validate_hour(self): """check if user is logging in during restricted hours""" login_before = int(frappe.db.get_value('User', self.user, 'login_before', ignore=True) or 0) login_after = int(frappe.db.get_value('User', self.user, 'login_after', ignore=True) or 0) if not (login_before or login_after): return from frappe.utils import now_datetime current_hour = int(now_datetime().strftime('%H')) if login_before and current_hour > login_before: frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError) if login_after and current_hour < login_after: frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError) def login_as_guest(self): """login as guest""" self.user = 'Guest' self.post_login() def logout(self, arg='', user=None): if not user: user = frappe.session.user self.run_trigger('on_logout') if user == frappe.session.user: delete_session(frappe.session.sid) self.clear_cookies() else: clear_sessions(user) def clear_cookies(self): clear_cookies() class CookieManager: def __init__(self): self.cookies = {} self.to_delete = [] def init_cookies(self): if not frappe.local.session.get('sid'): return # sid expires in 3 days expires = datetime.datetime.now() + datetime.timedelta(days=3) if frappe.session.sid: self.cookies["sid"] = {"value": frappe.session.sid, "expires": expires} if frappe.session.session_country: self.cookies["country"] = {"value": frappe.session.get("session_country")} def set_cookie(self, key, value, expires=None): self.cookies[key] = {"value": value, "expires": expires} def delete_cookie(self, to_delete): if not isinstance(to_delete, (list, tuple)): to_delete = [to_delete] self.to_delete.extend(to_delete) def flush_cookies(self, response): for key, opts in self.cookies.items(): response.set_cookie(key, quote((opts.get("value") or "").encode('utf-8')), expires=opts.get("expires")) # expires yesterday! expires = datetime.datetime.now() + datetime.timedelta(days=-1) for key in set(self.to_delete): response.set_cookie(key, "", expires=expires) def _update_password(user, password): frappe.db.sql("""insert into __Auth (user, `password`) values (%s, password(%s)) on duplicate key update `password`=password(%s)""", (user, password, password)) return "done" @frappe.whitelist() def get_logged_user(): return frappe.session.user def clear_cookies(): if hasattr(frappe.local, "session"): frappe.session.sid = "" frappe.local.cookie_manager.delete_cookie(["full_name", "user_id", "sid", "user_image", "system_user","profile_id","user_type"])
# Copyright (c) 2006-2009 The Trustees of Indiana University. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # - Neither the Indiana University nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # SPU Iterator Hierarchy import array import corepy.spre.spe as spe import corepy.lib.extarray as extarray import corepy.arch.spu.platform as env import corepy.arch.spu.isa as spu import corepy.arch.spu.types.spu_types as var import corepy.arch.spu.lib.dma as dma import corepy.arch.spu.lib.util as util def _mi(cls): """ Return the machine order for an instruction. """ return cls.machine_inst._machine_order CTR = 0 DEC = 1 INC = 2 # ------------------------------------------------------------ # Helpers # ------------------------------------------------------------ _array_type = type(array.array('I', [1])) _extarray_type = type(extarray.extarray('I', [1])) def _typecode(a): if type(a) in (_array_type, _extarray_type): return a.typecode elif type(a) is memory_desc: return a.typecode else: raise Exception('Unknown array type ' + type(a)) def _array_address(a): if type(a) in (_array_type, _extarray_type): return a.buffer_info()[0] elif type(a) is memory_desc: return a.addr else: raise Exception('Unknown array type ' + type(a)) # ------------------------------ # PPU Memory # ------------------------------ # TODO - AWF - this isn't use by anything except for spu_vec_iter. Why? # Even then most of this class doesn't appear to be used. class memory_desc(object): def __init__(self, typecode, addr = None, size = None): self.typecode = typecode self.addr = addr self.size = size self.r_addr = None self.r_size = None return def __str__(self): return '<memory_desc typcode = %s addr = 0x%X size = %d r_addr = %s r_size = %s>' % ( self.typecode, self.addr, self.size, str(self.r_addr), str(self.r_size)) def set_addr_reg(self, reg): self.r_addr = reg def set_size_reg(self, reg): self.r_size = reg def __len__(self): return self.size def nbytes(self): return self.size * var.INT_SIZES[self.typecode] def from_buffer(self, b): """ Extract the address and size from a buffer object. Note: this doesn't very well with buffer objects. """ l = repr(b).split(' ') self.size = int(l[l.index('size') + 1]) self.addr = int(l[l.index('ptr') + 1][:-1], 0) # print l, self.size, self.addr return def from_ibuffer(self, m): """ Extract the address and size from an object that supports the buffer interface. This should be more flexible than the buffer object. """ self.addr, self.size = synbuffer.buffer_info(m) self.size = self.size / var.INT_SIZES[self.typecode] return def from_array(self, a): self.addr, self.size = a.buffer_info() return def get(self, code, lsa, tag = 1): return self._transfer_data(code, dma.mfc_get, lsa, tag) def put(self, code, lsa, tag = 2): return self._transfer_data(code, dma.mfc_put, lsa, tag) def _transfer_data(self, code, kernel, lsa, tag): """ Load the data into the SPU memory """ # Check the types if not isinstance(code, spe.InstructionStream): raise Exception('Code must be an InstructionStream') if not (isinstance(lsa, int) or issubclass(type(lsa), (spe.Register, spe.Variable))): raise Exception('lsa must be an integer, Register, or Variable') old_code = spu.get_active_code() spu.set_active_code(code) # Acquire registers for address and size, if they were not supplied by the user if self.r_addr is None: r_ea_data = code.prgm.acquire_register() else: r_ea_data = self.r_addr if self.r_size is None: r_size = code.prgm.acquire_register() else: r_size = self.r_size # Create variables ea_addr = var.SignedWord(reg = r_ea_data) aligned_size = var.SignedWord(0) mod_16 = var.SignedWord(0xF) # Initialize the lsa_addr variable. if isinstance(lsa, int): # From a constant ls_addr = var.SignedWord(lsa) elif issubclass(type(lsa), (spe.Register, spe.Variable)): # From a variable ls_addr = var.SignedWord() ls_addr.v = lsa tag_var = var.SignedWord(tag) cmp = var.SignedWord(0) # Load the effective address if self.r_addr is None: if self.addr % 16 != 0: print '[get_memory] Misaligned data' util.load_word(code, ea_addr, self.addr) # Load the size, rounding up as required to be 16-byte aligned if self.r_size is None: rnd_size = self.size * var.INT_SIZES[self.typecode] if rnd_size < 16: rnd_size = 16 elif (rnd_size % 16) != 0: rnd_size += (16 - (rnd_size % 16)) util.load_word(code, aligned_size, rnd_size) else: # TODO: !!! UNIT TEST THIS !!! # Same as above, but using SPU arithemtic to round size = var.SignedWord(reg = r_size) sixteen = var.SignedWord(16) cmp.v = ((size & mod_16) == size) aligned_size.v = size + (sixteen - (size & mod_16)) spu.selb(aligned_size.reg, size.reg, aligned_size.reg, cmp.reg, order = _mi(spu.selb)) code.release_register(sixteen.reg) # Use an auxillary register for the moving ea value if the # caller supplied the address register if self.r_addr is not None: ea_load = var.SignedWord(0) ea_load.v = ea_addr else: ea_load = ea_addr # note that this is reference, not .v assignment # Transfer parameters buffer_size = var.SignedWord(16384) remaining = var.SignedWord(0) transfer_size = var.SignedWord(0) remaining.v = aligned_size # Set up the iterators to transfer at most 16k at a time xfer_iter = syn_iter(code, 0, 16384) xfer_iter.set_stop_reg(aligned_size.reg) for offset in xfer_iter: cmp.v = buffer_size > remaining spu.selb(transfer_size, buffer_size, remaining, cmp) # Transfer the data kernel(code, ls_addr, ea_load, transfer_size, tag_var) ls_addr.v = ls_addr + buffer_size ea_load.v = ea_load + buffer_size remaining.v = remaining - buffer_size # Set the tag bit to tag dma.mfc_write_tag_mask(code, 1<<tag); # Wait for the transfer to complete dma.mfc_read_tag_status_all(code); # Release the registers code.release_register(buffer_size.reg) code.release_register(remaining.reg) code.release_register(aligned_size.reg) code.release_register(transfer_size.reg) code.release_register(cmp.reg) code.release_register(ls_addr.reg) code.release_register(tag_var.reg) code.release_register(ea_load.reg) if old_code is not None: spu.set_active_code(old_code) return # ------------------------------------------------------------ # Iterators # ------------------------------------------------------------ class syn_iter(object): def __init__(self, code, count, step = 1, mode = INC, hint = True): object.__init__(self) self.code = code self.mode = mode self.hint = hint self.state = 0 self.n = count self.step = step self.r_count = None self.r_stop = None self.r_step = None self.current_count = None self.start_label = None self.continue_label = None self.r_start = None self._external_start = False self._external_stop = False return def get_acquired_registers(self): """ This is a minor hack that returns a list of the acquired registers. It is intended to allow the caller to re-acquire the registers after the loop completes in cases where 'subroutines' that are called from the loop have not yet been synthesized. By re-requiring the registers, the caller can ensure that the subroutines do not corrupt data in them. TODO: This is a temporary fix until a better resource management scheme is implemented. """ regs = [self.r_count] if self.r_step is not None: regs.append(self.r_step) if not self._external_stop: regs.append(self.r_stop) return regs def set_start_reg(self, reg): self._external_start = True self.r_start = reg return def set_stop_reg(self, reg): self._external_stop = True self.r_stop = reg return def get_start(self): """ Used in INC mode to start the count from somewhere other than zero. Has no effect on CTR or DEC modes. """ return 0 def get_count(self): return self.n def n_steps(self): return self.n / self.step def step_size(self): return self.step def start(self, align = True, branch = True): """Do pre-loop iteration initialization""" if self.r_count is None: self.r_count = self.code.prgm.acquire_register() if self.mode == DEC: if self._external_start: self.code.add(spu.ai(self.r_count, self.r_start, 0)) else: util.load_word(self.code, self.r_count, self.get_count()) elif self.mode == INC: if self.r_stop is None and branch: self.r_stop = self.code.prgm.acquire_register() if self._external_start: self.code.add(spu.ai(self.r_count, self.r_start, 0)) else: util.load_word(self.code, self.r_count, self.get_start()) if branch and not self._external_stop: util.load_word(self.code, self.r_stop, self.get_count()) # /end mode if if self.r_count is not None: self.current_count = var.SignedWord(code = self.code, reg = self.r_count) # If the step size doesn't fit in an immediate value, store it in a register # (-512 < word < 511): if not (-512 < self.step_size() < 511): self.r_step = self.code.prgm.acquire_register() util.load_word(self.code, self.r_step, self.step_size()) # Label self.start_label = self.code.prgm.get_unique_label("SYN_ITER_START") self.code.add(self.start_label) # Create continue/branch labels so they can be referenced; they will be # added to the code in their appropriate locations. self.branch_label = self.code.prgm.get_unique_label("SYN_ITER_BRANCH") self.continue_label = self.code.prgm.get_unique_label("SYN_ITER_CONTINUE") return def setup(self): """Do beginning-of-loop iterator setup/initialization""" return def get_current(self): return self.current_count def cleanup(self): """Do end-of-loop iterator code""" # Update the current count if self.mode == DEC: if self.r_step is not None: self.code.add(spu.sf(self.r_count, self.r_step, self.r_count)) else: self.code.add(spu.ai( self.r_count, self.r_count, -self.step_size())) elif self.mode == INC: if self.r_step is not None: self.code.add(spu.a(self.r_count, self.r_count, self.r_step)) else: self.code.add(spu.ai(self.r_count, self.r_count, self.step_size())) return def end(self, branch = True): """Do post-loop iterator code""" if self.hint == True: self.code.add(spu.hbrr(self.branch_label, self.start_label)) if self.mode == DEC: # branch if r_count is not zero (CR) # Note that this relies on someone (e.g. cleanup()) setting the # condition register properly. if branch: self.code.add(self.branch_label) self.code.add(spu.brnz(self.r_count, self.start_label)) # Reset the counter in case this is a nested loop util.load_word(self.code, self.r_count, self.get_count()) elif self.mode == INC: # branch if r_current < r_stop if branch: r_cmp_gt = self.code.prgm.acquire_register() self.code.add(spu.cgt(r_cmp_gt, self.r_stop, self.r_count)) self.code.add(self.branch_label) self.code.add(spu.brnz(r_cmp_gt, self.start_label)) self.code.prgm.release_register(r_cmp_gt) # Reset the the current value in case this is a nested loop if self._external_start: self.code.add(spu.ai(self.r_count, self.r_start, 0)) else: util.load_word(self.code, self.r_count, self.get_start()) if self.r_count is not None: self.code.prgm.release_register(self.r_count) if self.r_stop is not None and not self._external_stop: self.code.prgm.release_register(self.r_stop) return def add_continue(self, code, idx, branch_inst = spu.br): """ Insert a branch instruction to branch to the end of the loop. """ #if self.continue_label is None: # raise Exception('Continue point not set. Has the loop been synthesized yet?') #next = (self.continue_label - idx) # print 'Continue:', next, idx, self.continue_label #code[idx] = branch_inst(next) #code[idx] = branch_inst(self.continue_label) code.add(branch_inst(self.continue_label)) return def __iter__(self): self.start() return self def next(self): if self.state == 0: self.state = 1 self.setup() return self.get_current() else: self.code.add(self.continue_label) self.cleanup() self.end() raise StopIteration return class syn_range(syn_iter): """ Purpose: Iterate a set number of times and make the current iteration count available as a variable. """ def __init__(self, code, start, stop = None, step = 1): if stop is None: stop = start start = 0 syn_iter.__init__(self, code, stop, step = step, mode = INC) self.istart = start return def get_start(self): return self.istart def _overlap(lsa, lsb, size): lsa_in_lsb = (lsa < lsb) and (lsa + size) > lsb lsb_in_lsa = (lsb < lsa) and (lsb + size) > lsa return lsa_in_lsb or lsb_in_lsa _strides = {'b':1, 'h':2, 'i':4, 'B':1, 'H':2, 'I':4, 'f':4, 'd':8} _vector_sizes = {'b':16, 'h':8, 'i':4, 'B':16, 'H':8, 'I':4, 'f':4} class spu_vec_iter(syn_iter): """ Purpose: Iterate over the values as vectors. """ def __init__(self, code, data, step = 1, length = None, store_only = False, addr_reg = None, save = True, type_cls = None): self.var_type = type_cls or var.array_spu_lu[data.typecode] if type(data) not in (_array_type, _extarray_type, memory_desc): raise Exception('Unsupported array type') if _typecode(data) not in _vector_sizes.keys(): raise Exception('Unsupported array data type for vector operations: ' + data.typecode) stop = 0 self.data = data self.addr_reg = addr_reg self.store_only = store_only self.save = save if length is None: length = len(data) t = _typecode(data) step = (step * _vector_sizes[_typecode(data)]) * _strides[t] stop = _strides[t] * length # len(data) self.typecode = t syn_iter.__init__(self, code, stop, step, mode = INC) self.r_current = None self.r_addr = None self.current_var = None return def get_acquired_registers(self): """ See comment in syn_iter. """ regs = syn_iter.get_acquired_registers(self) regs.append(self.r_current) if self.addr_reg is None: regs.append(self.r_addr) return regs def get_current(self): return self.current_var def load_current(self): return self.code.add(spu.lqx(self.r_current, self.r_addr, self.r_count)) def store_current(self): return self.code.add(spu.stqx(self.r_current, self.r_addr, self.r_count)) def make_current(self): return self.var_type(code = self.code, reg = self.r_current) def init_address(self): if self.addr_reg is None: return util.load_word(self.code, self.r_addr, _array_address(self.data)) def start(self, align = True, branch = True): self.r_current = self.code.prgm.acquire_register() # addr_reg is the user supplied address for the data if self.addr_reg is None: self.r_addr = self.code.prgm.acquire_register() else: self.r_addr = self.addr_reg syn_iter.start(self, align, branch) self.current_var = self.make_current() self.init_address() # print self.r_count, self.r_stop, self.r_current, self.r_addr, self.data.buffer_info()[0] return def setup(self): if not self.store_only: self.load_current() syn_iter.setup(self) return def cleanup(self): if self.current_var.assigned and self.save: self.store_current() syn_iter.cleanup(self) return def end(self, branch = True): if self.r_current is not None: self.code.prgm.release_register(self.r_current) if self.r_addr is not None and self.addr_reg is None: self.code.prgm.release_register(self.r_addr) syn_iter.end(self, branch) return class stream_buffer(syn_range): """ Manage a buffered data stream from main memory. """ def __init__(self, code, ea, data_size, buffer_size, ls, buffer_mode='single', save = False): """Stream buffer. If save is True, buffers will be written back to main memory.""" syn_range.__init__(self, code, ea, ea + data_size, buffer_size) # Buffer addresses if buffer_mode == 'single': self.lsa = ls self.lsb = ls elif buffer_mode == 'double': if type(ls) is list: if _overlap(ls[0], ls[1], buffer_size): raise Exception('Local store buffers overlap') self.lsa, self.lsb = ls else: # Assume contiguous buffers: lsa = ls, lsb = ls + buffer_size self.lsa = ls self.lsb = ls + buffer_size else: raise Exception('Unknown buffering mode: ' + buffer_mode) self.buffer_mode = buffer_mode self.save = save self.ls = None self.tag = None self.buffer_size = None self.ibuffer_size = buffer_size return def set_ea_addr_reg(self, reg): self.set_start_reg(reg) return def set_ea_size_reg(self, reg): self.set_stop_reg(reg) return # ------------------------------ # Buffer management # ------------------------------ def _toggle(self, var): """ Use rotate to toggle between two preferred slot values in a vector. """ if self.buffer_mode == 'double': self.code.add(spu.rotqbyi(var.reg, var.reg, 4)) return def _swap_buffers(self): return def _load_buffer(self): # TODO - AWF - some optimization is possible here. # rather than skipping around the DMA get on the last iteration, short out # of the loop completely. Saves doing the check twice.. # Also as soon as we do this first check, we know we are going to go # through the loop again. Again, no need for a second conditional at the # end, just increment counters and always branch. A hint could be added # right before the DMA get. # Don't perform the load the last time through the loop r_cmp = self.code.prgm.acquire_register() # Compare count == step self.code.add(spu.ceq(r_cmp, self.r_stop, self.r_count)) # Create a skip label and add the branch skip_label = self.code.prgm.get_unique_label("STREAM_BUFFER_SKIP") self.code.add(spu.brnz(r_cmp, skip_label)) # Start the DMA get dma.mfc_get(self.code, self.ls, syn_range.get_current(self), self.buffer_size, self.tag) # Add the branch label self.code.add(skip_label) self.code.prgm.release_register(r_cmp) return def _save_buffer(self): dma.mfc_put(self.code, self.ls, syn_range.get_current(self), self.buffer_size, self.tag) return def _wait_buffer(self): # TODO - BUG HERE!! # Here's what happens: a variable 'mask' is created, then used. When this # code finishes with the variable, it calls mask.release_register() to # release the underlying register, which is no longer needed. But, # release_register() sets mask.reg to None. Although it appears mask would # go out of scope here and be garbage collected, it does not! mask is # still referred to by self.code, since instructions have been added that # reference it. The problem is that if these instructions ever need to be # rendered again -- like say, for print_code() -- mask.reg.reg is None, # which makes it impossible to render the instruction. mask = var.SignedWord(1, self.code) mask.v = mask << self.tag dma.mfc_write_tag_mask(self.code, mask) reg = dma.mfc_read_tag_status_all(self.code) self.code.prgm.release_register(reg) #mask.release_register() return # ------------------------------ # Iterator methods # ------------------------------ def get_current(self): """ Overload current to return the local buffer address. Use syn_range.get_current(self) to get the ea/count variable. """ return self.ls def _inc_ea(self): """ Increment the ea/count register by step size. This is used for double buffering. """ if self.r_step is not None: vstep = var.SignedWord(code = self.code, reg = self.r_step) self.current_count.v = self.current_count + vstep else: self.current_count.v = self.current_count + self.step_size() return def _dec_ea(self): """ Decrement the ea/count register by step size. This is used for double buffering. """ if self.r_step is not None: vstep = var.SignedWord(code = self.code, reg = self.r_step) self.current_count.v = self.current_count - vstep else: self.current_count.v = self.current_count - self.step_size() return def start(self, align = True, branch = True): """Do pre-loop iteration initialization""" syn_range.start(self, align = align, branch = branch) if not hasattr(self, 'skip_start_post'): self._start_post() return def _start_post(self): # Initialize the buffer size self.buffer_size = var.SignedWord(self.ibuffer_size, self.code) # Initialize the ls and tag vectors with (optionally) alternating values if self.buffer_mode == 'single': self.ls = var.SignedWord(self.lsa, self.code) self.tag = var.SignedWord(1, self.code) else: self.ls = var.SignedWord(array.array('i', [self.lsa, self.lsb, self.lsa, self.lsb]), self.code) self.tag = var.SignedWord(array.array('i', [1, 2, 1, 2]), self.code) # For double buffering, load the first buffer self._load_buffer() # Update the start label (make a new one and add it) self.start_label = self.code.prgm.get_unique_label("STREAM_BUFFER_START") self.code.add(self.start_label) return def setup(self): """Do beginning-of-loop iterator setup/initialization""" syn_range.setup(self) # Toggle the tag and set the ls to next if self.buffer_mode == 'double': self._toggle(self.tag) self._toggle(self.ls) self._inc_ea() # Start the transfer of next if self.save: self._wait_buffer() self._load_buffer() # Reset tag/ls if self.buffer_mode == 'double': self._toggle(self.tag) self._toggle(self.ls) self._dec_ea() # Wait for current to complete self._wait_buffer() return def cleanup(self): """Do end-of-loop iterator code""" # Save current if self.save: self._save_buffer() # Swap buffers self._toggle(self.tag) self._toggle(self.ls) # Update the counter syn_range.cleanup(self) return def end(self, branch = True): """Do post-loop iterator code""" syn_range.end(self, branch = branch) self.code.prgm.release_register(self.ls.reg) self.code.prgm.release_register(self.tag.reg) self.code.prgm.release_register(self.buffer_size.reg) return class zip_iter: pass class parallel(object): def __init__(self, obj): object.__init__(self) self.obj = obj if type(obj.code.prgm) is not env.ParallelProgram: raise Exception("ParallelProgram required") if obj.code.prgm.raw_data_size is not None: print 'Warning (parallel): raw_data_size is already set' if type(self.obj) is zip_iter: self.obj.iters = [parallel(i) for i in self.obj.iters] self.state = 0 return def get_start(self): return self.obj.get_start() def get_count(self): return self.obj.get_count() def n_steps(self): return self.obj.n_steps() def step_size(self): return self.obj.step_size() def setup(self): return self.obj.setup() def get_current(self): return self.obj.get_current() def cleanup(self): return self.obj.cleanup() def end(self, branch = True): return self.obj.end(branch) def _update_inc_count(self): code = self.obj.code code.prgm.acquire_block_registers() r_block_size = code.prgm.r_block_size r_offset = code.prgm.r_offset # Determine the block size for each loop code.prgm.raw_data_size = self.get_count() - self.get_start() # synppc.load_word(code, r_block_size, self.get_count() - self.get_start()) # code.add(synppc.ppc.divw(r_block_size, r_block_size, code.r_size)) # Determine the offset for the current block and update the r_count # (this is primarily for range, which uses different values in r_count # to initialize ranges that don't start at 0) # code.add(synppc.ppc.mullw(r_offset, code.r_rank, r_block_size)) code.add(spu.a(self.obj.r_count, r_offset, self.obj.r_count)) # Offset is rank * block_size # Count is count + offset # Stop is count + block_size if self.obj.r_stop is not None: code.add(spu.a(self.obj.r_stop, r_block_size, self.obj.r_count)) # code.prgm.release_register(r_offset) # code.prgm.release_register(r_block_size) return def start(self, align = True, branch = True): # HACK to get double buffering and parallel working together if hasattr(self.obj, '_start_post'): self.obj.skip_start_post = True self.obj.start(align = False, branch = branch) code = self.obj.code # replace count with rank if self.obj.mode == CTR: raise Exception('Parallel CTR loops not supported') elif self.obj.mode == DEC: raise Exception('Parallel DEC loops not supported') elif self.obj.mode == INC: self._update_inc_count() if align and branch: self.obj.code.align(16) # Align the start of the loop on a 16 byte boundary # while (code.size()) % 4 != 0: # if code.size() % 2 == 0: # code.add(spu.nop(0)) # else: # code.add(spu.lnop(0)) # Update the real iterator's label self.obj.start_label = code.prgm.get_unique_label("PARALLEL_START") # HACK end if hasattr(self.obj, '_start_post'): self.obj._start_post() return def end(self, branch = True): self.obj.end(branch) if self.obj.mode == CTR and branch: raise Exception('Parallel CTR loops not supported') elif self.obj.mode == DEC: raise Exception('Parallel DEC loops not supported') elif self.obj.mode == INC: self._update_inc_count() return def init_address(self): # Call syn_iters init self.code self.obj.init_address(self) # Update the address with the offset # For variable iterators, this is the value already computed for r_count self.obj.code.add(spu.a(self.r_addr, self.obj.r_count, self.r_addr)) return def __iter__(self): self.start() return self def next(self): if self.state == 0: self.state = 1 self.setup() return self.get_current() else: self.cleanup() self.end() raise StopIteration return # ------------------------------------------------------------ # Tests # ------------------------------------------------------------ def TestSPUIter(): size = 32 data = extarray.extarray('I', range(size)) prgm = env.Program() code = prgm.get_stream() r_ea_data = prgm.acquire_register() r_ls_data = prgm.acquire_register() r_size = prgm.acquire_register() r_tag = prgm.acquire_register() #print 'array ea: %X' % (data.buffer_info()[0]) #print 'r_zero = %s, ea_data = %s, ls_data = %s, r_size = %s, r_tag = %s' % ( # str(code.r_zero), str(r_ea_data), str(r_ls_data), str(r_size), str(r_tag)) # Load the effective address util.load_word(code, r_ea_data, data.buffer_info()[0]) # Load the size util.load_word(code, r_size, size * 4) # Load the tag code.add(spu.ai(r_tag, code.r_zero, 12)) # Load the lsa code.add(spu.ai(r_ls_data, code.r_zero, 0)) # Load the data into address 0 dma.mfc_get(code, r_ls_data, r_ea_data, r_size, r_tag) # Set the tag bit to 12 dma.mfc_write_tag_mask(code, 1<<12); # Wait for the transfer to complete dma.mfc_read_tag_status_all(code); # Increment the data values by 1 using an unrolled loop (no branches) # r_current = code.acquire_register() current = var.SignedWord(0, code) # Use an SPU iter for lsa in syn_iter(code, size * 4, 16): code.add(spu.lqx(current, code.r_zero, lsa)) # code.add(spu.ai(1, r_current, r_current)) current.v = current + current code.add(spu.stqx(current, code.r_zero, lsa)) # code.prgm.release_register(r_current) #current.release_register(code) # Store the values back to main memory # Load the tag code.add(spu.ai(r_tag, code.r_zero, 13)) # Load the data into address 0 dma.mfc_put(code, r_ls_data, r_ea_data, r_size, r_tag) # Set the tag bit to 12 dma.mfc_write_tag_mask(code, 1<<13); # Wait for the transfer to complete dma.mfc_read_tag_status_all(code); # Cleanup prgm.release_register(r_ea_data) prgm.release_register(r_ls_data) prgm.release_register(r_size) prgm.release_register(r_tag) # Stop for debugging # code.add(spu.stop(0xA)) # Execute the code prgm.add(code) proc = env.Processor() r = proc.execute(prgm) for i in range(0, size): assert(data[i] == i + i) return def TestSPUParallelIter(data, size, n_spus = 6, buffer_size = 16, run_code = True): import time # n_spus = 8 # buffer_size = 16 # 16 ints/buffer # n_buffers = 4 # 4 buffers/spu # n_buffers = size / buffer_size # size = buffer_size * n_buffers * n_spus # data = array.array('I', range(size + 2)) #data = env.aligned_memory(n, typecode = 'I') #data.copy_to(data_array.buffer_info()[0], len(data_array)) # print 'Data align: 0x%X, %d' % (data.buffer_info()[0], data.buffer_info()[0] % 16) code = env.ParallelInstructionStream() # code = env.InstructionStream() r_zero = code.acquire_register() r_ea_data = code.acquire_register() r_ls_data = code.acquire_register() r_size = code.acquire_register() r_tag = code.acquire_register() # Load zero util.load_word(code, r_zero, 0) # print 'array ea: 0x%X 0x%X' % (data.buffer_info()[0], long(data.buffer_info()[0])) # print 'r_zero = %d, ea_data = %d, ls_data = %d, r_size = %d, r_tag = %d' % ( # r_zero, r_ea_data, r_ls_data, r_size, r_tag) # Load the effective address if data.buffer_info()[0] % 16 == 0: util.load_word(code, r_ea_data, data.buffer_info()[0]) else: util.load_word(code, r_ea_data, data.buffer_info()[0] + 8) ea_start = data.buffer_info()[0] # Iterate over each buffer for ea in parallel(syn_range(code, ea_start, ea_start + size * 4 , buffer_size * 4)): # ea = var.SignedWord(code = code, reg = r_ea_data) # print 'n_iters:', size / buffer_size # for i in syn_range(code, size / buffer_size): # code.add(spu.stop(0xB)) # Load the size util.load_word(code, r_size, buffer_size * 4) # Load the tag code.add(spu.ai(r_tag, r_zero, 12)) # Load the lsa code.add(spu.ai(r_ls_data, r_zero, 0)) # Load the data into address 0 dma.mfc_get(code, r_ls_data, ea, r_size, r_tag) # Set the tag bit to 12 dma.mfc_write_tag_mask(code, 1<<12); # Wait for the transfer to complete dma.mfc_read_tag_status_all(code); # Increment the data values by 1 using an unrolled loop (no branches) # r_current = code.acquire_register() current = var.SignedWord(0, code) count = var.SignedWord(0, code) # Use an SPU iter for lsa in syn_iter(code, buffer_size * 4, 16): code.add(spu.lqx(current, r_zero, lsa)) # code.add(spu.ai(1, r_current, r_current)) current.v = current + current code.add(spu.stqx(current, r_zero, lsa)) count.v = count + 1 code.add(spu.stqx(count, r_zero, 0)) # code.release_register(r_current) current.release_registers(code) # Store the values back to main memory # Load the tag code.add(spu.ai(r_tag, r_zero, 13)) # Load the data into address 0 dma.mfc_put(code, r_ls_data, ea.reg, r_size, r_tag) # Set the tag bit to 13 dma.mfc_write_tag_mask(code, 1<<13); # Wait for the transfer to complete dma.mfc_read_tag_status_all(code); # code.add(spu.stop(0xB)) # Update ea # ea.v = ea + (buffer_size * 4) # /for ea address # Cleanup code.release_register(r_zero) code.release_register(r_ea_data) code.release_register(r_ls_data) code.release_register(r_size) code.release_register(r_tag) if not run_code: return code # Stop for debugging # code.add(spu.stop(0xA)) # Execute the code proc = env.Processor() #data.copy_from(data_array.buffer_info()[0], len(data_array)) def print_blocks(): for i in range(0, size, buffer_size): # print data[i:(i + buffer_size)] print data[i + buffer_size], print '' # print_blocks() s = time.time() r = proc.execute(code, n_spus = n_spus) # r = proc.execute(code) t = time.time() - s # print_blocks() return t # LOG = {1:0, 2:1, 4:2, 8:3} def ParallelTests(): max_exp = 16 max_size = pow(2, max_exp) print 'Creating data...' data = extarray.extarray('I', range(max_size)) print 'Executing Tests...' # t = TestSPUParallelIter(data, 8192, n_spus = 1, buffer_size = 128) # return i = 0 for exponent in range(13, max_exp + 1): size = pow(2, exponent) for n_spus in [1, 2, 4]: # Increase the buffer size until to the largest possible factor for the # number of SPUs or 4096 (*4=16k), whichever is smaller for buffer_exp in range(2, min(exponent - LOG[n_spus] - 2, 12)): buffer_size = pow(2, buffer_exp) # for buffer_size in [4]: t = 0.0 print 'try\t%d\t%d\t%d\t-.-' % (size, n_spus, buffer_size) # for i in range(10): t += TestSPUParallelIter(data, size, n_spus = n_spus, buffer_size = buffer_size) print 'test\t%d\t%d\t%d\t%.8f' % (size, n_spus, buffer_size, t / 10.0) # print 'count:', i i += 1 return def TestStreamBufferSingle(n_spus = 1): n = 1024 a = extarray.extarray('I', range(n)) buffer_size = 128 if n_spus > 1: prgm = env.ParallelProgram() else: prgm = env.Program() code = prgm.get_stream() current = var.SignedWord(0, code) addr = a.buffer_info()[0] stream = stream_buffer(code, addr, n * 4, buffer_size, 0, save = True) if n_spus > 1: stream = parallel(stream) #r_bufsize = code.acquire_register() #r_lsa = code.acquire_register() #r_current = code.acquire_register() for buffer in stream: #util.load_word(code, r_bufsize, buffer_size) #code.add(spu.il(r_lsa, 0)) #loop = code.size() #code.add(spu.lqx(r_current, buffer, r_lsa)) #code.add(spu.a(r_current, r_current, r_current)) #code.add(spu.stqx(r_current, buffer, r_lsa)) #code.add(spu.ai(r_bufsize, r_bufsize, -16)) #code.add(spu.ai(r_lsa, r_lsa, 16)) #code.add(spu.brnz(r_bufsize, loop - code.size())) for lsa in syn_iter(code, buffer_size, 16): code.add(spu.lqx(current, lsa, buffer)) current.v = current + current #current.v = 5 code.add(spu.stqx(current, lsa, buffer)) prgm.add(code) proc = env.Processor() r = proc.execute(prgm, n_spus = n_spus) for i in range(0, n): assert(a[i] == i + i) return def TestVecIter(n_spus = 1): n = 1024 a = extarray.extarray('I', range(n)) buffer_size = 16 if n_spus > 1: prgm = env.ParallelProgram() else: prgm = env.Program() code = prgm.get_stream() current = var.SignedWord(0, code) stream = stream_buffer(code, a.buffer_info()[0], n * 4, buffer_size, 0, save = True) if n_spus > 1: stream = parallel(stream) md = memory_desc('i', 0, buffer_size) for buffer in stream: for current in spu_vec_iter(code, md): current.v = current + current prgm.add(code) proc = env.Processor() r = proc.execute(prgm, n_spus = n_spus) for i in range(0, n): assert(a[i] == i + i) return def TestContinueLabel(n_spus = 1): n = 1024 a = extarray.extarray('I', range(n)) buffer_size = 16 if n_spus > 1: prgm = env.ParallelProgram() else: prgm = env.Program() code = prgm.get_stream() current = var.SignedWord(0, code) test = var.SignedWord(0, code) four = var.SignedWord(4, code) stream = stream_buffer(code, a.buffer_info()[0], n * 4, buffer_size, 0, save = True) if n_spus > 1: stream = parallel(stream) md = memory_desc('i', 0, buffer_size) lsa_iter = spu_vec_iter(code, md) for buffer in stream: for current in lsa_iter: current.v = current + current test.v = (current == four) code.add(spu.gbb(test, test)) #lbl_continue = code.add(spu.stop(0xC)) - 1 # Place holder for the continue #lsa_iter.add_continue(code, 0, lambda lbl, reg = test.reg: spu.brz(reg, lbl)) code.add(spu.brz(test.reg, lsa_iter.continue_label)) current.v = current + current #lsa_iter.add_continue(code, lbl_continue, lambda next, reg = test.reg: spu.brz(reg, next)) prgm.add(code) proc = env.Processor() r = proc.execute(prgm, n_spus = n_spus) for i in range(0, n): if i >= 4: assert(a[i] == i + i) else: #print a[i] assert(a[i] == i * 4) return def TestStreamBufferDouble(n_spus = 1): n = 2048 a = extarray.extarray('I', range(n)) buffer_size = 32 if n_spus > 1: prgm = env.ParallelProgram() else: prgm = env.Program() code = prgm.get_stream() current = var.SignedWord(0, code) addr = a.buffer_info()[0] n_bytes = n * 4 #print 'addr 0x%(addr)x %(addr)d' % {'addr':a.buffer_info()[0]}, n_bytes, buffer_size stream = stream_buffer(code, addr, n_bytes, buffer_size, 0, buffer_mode='double', save = True) if n_spus > 1: stream = parallel(stream) for buffer in stream: for lsa in syn_iter(code, buffer_size, 16): code.add(spu.lqx(current, lsa, buffer)) current.v = current + current code.add(spu.stqx(current, lsa, buffer)) prgm.add(code) proc = env.Processor() r = proc.execute(prgm, n_spus = n_spus) for i in range(0, len(a)): assert(a[i] == i + i) return # def TestMemoryMap(n_spus = 1): # import mmap # import os # filename = 'spuiter.TestMemoryMap.dat' # n = 8192 # print 'hello' # # Create a file # fw = open(filename, 'w') # fw.write('-' * (8192 + 32)) # fw.close() # # Open the file again for memory mapping # f = open(filename, 'r+') # size = os.path.getsize(filename) # m = mmap.mmap(f.fileno(), n) # print 'size:', size, n # # Create a memory descriptor # md = memory_desc('I', size = size) # md.from_ibuffer(m) # if n_spus > 1: code = env.ParallelInstructionStream() # else: code = env.InstructionStream() # current = var.SignedWord(0, code) # X = var.SignedWord(0x58585858, code) # buffer_size = 16 # # code.add(spu.stop(0xB)) # stream = stream_buffer(code, md.addr, md.size, buffer_size, 0, buffer_mode='double', save = True) # if n_spus > 1: stream = parallel(stream) # for buff in stream: # for lsa in syn_iter(code, buffer_size, 16): # code.add(spu.lqx(buff.reg, lsa.reg, current.reg)) # current.v = X # code.add(spu.stqx(buff.reg, lsa.reg, current.reg)) # proc = env.Processor() # r = proc.execute(code, n_spus = n_spus) # for i in range(0, n): # , buffer_size / 4): # # print a[i:(i+buffer_size/4)] # # assert(a[i] == i + i) # pass # return # def TestBranchHinting(): # import time # code = env.InstructionStream() # a = var.SignedWord(0, code) # s = time.time() # for i in syn_iter(code, pow(2, 16), hint=False): # a.v = a + a # e = time.time() - s # print "Without hint: ", e # s = time.time() # for i in syn_iter(code, pow(2, 16), hint=True): # a.v = a + a # e = time.time() - s # print "With hint: ", e # return if __name__=='__main__': TestSPUIter() TestVecIter() ## TestMemoryMap(1) TestContinueLabel() TestStreamBufferSingle(1) TestStreamBufferDouble(4) # TestSPUParallelIter() # ParallelTests() # TestZipIter()
# # ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image # Copyright (C) 2017 Christian Zimmermann # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import print_function, unicode_literals import tensorflow as tf import os import cv2 from general import * ops = NetworkOps class ColorHandPose3DNetwork(object): """ Network performing 3D pose estimation of a human hand from a single color image. """ def __init__(self): self.crop_size = 256 self.num_kp = 21 def init(self, session, weight_files=None, exclude_var_list=None): """ Initializes weights from pickled python dictionaries. Inputs: session: tf.Session, Tensorflow session object containing the network graph weight_files: list of str, Paths to the pickle files that are used to initialize network weights exclude_var_list: list of str, Weights that should not be loaded """ if exclude_var_list is None: exclude_var_list = list() exclude_var_list.append('PosePrior/conv_pose_0_1/biases') exclude_var_list.append('PosePrior/conv_pose_0_1/weights') exclude_var_list.append('PosePrior/conv_pose_0_2/biases') exclude_var_list.append('PosePrior/conv_pose_0_2/weights') exclude_var_list.append('PosePrior/conv_pose_1_1/biases') exclude_var_list.append('PosePrior/conv_pose_1_1/weights') exclude_var_list.append('PosePrior/conv_pose_1_2/biases') exclude_var_list.append('PosePrior/conv_pose_1_2/weights') exclude_var_list.append('PosePrior/conv_pose_2_1/biases') exclude_var_list.append('PosePrior/conv_pose_2_1/weights') exclude_var_list.append('PosePrior/conv_pose_2_2/biases') exclude_var_list.append('PosePrior/conv_pose_2_2/weights') exclude_var_list.append('PosePrior/fc_rel0/biases') exclude_var_list.append('PosePrior/fc_rel0/weights') exclude_var_list.append('PosePrior/fc_rel1/biases') exclude_var_list.append('PosePrior/fc_rel1/weights') exclude_var_list.append('PosePrior/fc_xyz/biases') exclude_var_list.append('PosePrior/fc_xyz/weights') exclude_var_list.append('ViewpointNet/conv_vp_0_1/biases') exclude_var_list.append('ViewpointNet/conv_vp_0_1/weights') exclude_var_list.append('ViewpointNet/conv_vp_0_2/biases') exclude_var_list.append('ViewpointNet/conv_vp_0_2/weights') exclude_var_list.append('ViewpointNet/conv_vp_1_1/biases') exclude_var_list.append('ViewpointNet/conv_vp_1_1/weights') exclude_var_list.append('ViewpointNet/conv_vp_1_2/biases') exclude_var_list.append('ViewpointNet/conv_vp_1_2/weights') exclude_var_list.append('ViewpointNet/conv_vp_2_1/biases') exclude_var_list.append('ViewpointNet/conv_vp_2_1/weights') exclude_var_list.append('ViewpointNet/conv_vp_2_2/biases') exclude_var_list.append('ViewpointNet/conv_vp_2_2/weights') exclude_var_list.append('ViewpointNet/fc_vp0/biases') exclude_var_list.append('ViewpointNet/fc_vp0/weights') exclude_var_list.append('ViewpointNet/fc_vp1/biases') exclude_var_list.append('ViewpointNet/fc_vp1/weights') exclude_var_list.append('ViewpointNet/fc_vp_ux/biases') exclude_var_list.append('ViewpointNet/fc_vp_ux/weights') exclude_var_list.append('ViewpointNet/fc_vp_uy/biases') exclude_var_list.append('ViewpointNet/fc_vp_uy/weights') exclude_var_list.append('ViewpointNet/fc_vp_uz/biases') exclude_var_list.append('ViewpointNet/fc_vp_uz/weights') import pickle if weight_files is None: #weight_files = ['./weights/handsegnet-rhd.pickle', './weights/posenet3d-rhd-stb-slr-finetuned.pickle'] weight_files = ['./weights/posenet3d-rhd-stb-slr-finetuned.pickle'] # Initialize with weights for file_name in weight_files: assert os.path.exists(file_name), "File not found." with open(file_name, 'rb') as fi: weight_dict = pickle.load(fi) weight_dict = {k: v for k, v in weight_dict.items() if not any([x in k for x in exclude_var_list])} if len(weight_dict) > 0: init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict) session.run(init_op, init_feed) print('Loaded %d variables from %s' % (len(weight_dict), file_name)) def inference(self, image, hand_side, evaluation): """ Full pipeline: HandSegNet + PoseNet + PosePrior. Inputs: image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted hand_side: [B, 2] tf.float32 tensor, One hot encoding if the image is showing left or right side evaluation: [] tf.bool tensor, True while evaluation false during training (controls dropout) Outputs: hand_scoremap: [B, H, W, 2] tf.float32 tensor, Scores for background and hand class image_crop: [B, 256, 256, 3] tf.float32 tensor, Hand cropped input image scale_crop: [B, 1] tf.float32 tensor, Scaling between input image and image_crop center: [B, 1] tf.float32 tensor, Center of image_crop wrt to image keypoints_scoremap: [B, 256, 256, 21] tf.float32 tensor, Scores for the hand keypoints keypoint_coord3d: [B, 21, 3] tf.float32 tensor, Normalized 3D coordinates """ # use network for hand segmentation for detection #hand_scoremap = self.inference_detection(image) #hand_scoremap = hand_scoremap[-1] #print('Original image.get_shape : ',image.get_shape()) # Intermediate data processing #hand_mask = single_obj_scoremap(hand_scoremap) #center, _, crop_size_best = calc_center_bb(hand_mask) #crop_size_best *= 1.25 #scale_crop = tf.minimum(tf.maximum(self.crop_size / crop_size_best, 0.25), 5.0) #image_crop = crop_image_from_xy(image, center, self.crop_size, scale=scale_crop) # detect keypoints in 2D keypoints_scoremap = self.inference_pose2d(image) #keypoints_scoremap = self.inference_pose2d(image) keypoints_scoremap = keypoints_scoremap[-1] # estimate most likely 3D pose # keypoint_coord3d = self._inference_pose3d(keypoints_scoremap, hand_side, evaluation) # upsample keypoint scoremap s = image.get_shape().as_list() #s = image_crop.get_shape().as_list() keypoints_scoremap = tf.image.resize_images(keypoints_scoremap, (s[1], s[2])) return keypoints_scoremap #return image_crop, scale_crop, center, keypoints_scoremap, keypoint_coord3d def inference2d(self, image): """ Only 2D part of the pipeline: HandSegNet + PoseNet. Inputs: image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted Outputs: image_crop: [B, 256, 256, 3] tf.float32 tensor, Hand cropped input image scale_crop: [B, 1] tf.float32 tensor, Scaling between input image and image_crop center: [B, 1] tf.float32 tensor, Center of image_crop wrt to image keypoints_scoremap: [B, 256, 256, 21] tf.float32 tensor, Scores for the hand keypoints """ # use network for hand segmentation for detection hand_scoremap = self.inference_detection(image) hand_scoremap = hand_scoremap[-1] # Intermediate data processing hand_mask = single_obj_scoremap(hand_scoremap) center, _, crop_size_best = calc_center_bb(hand_mask) crop_size_best *= 1.25 scale_crop = tf.minimum(tf.maximum(self.crop_size / crop_size_best, 0.25), 5.0) image_crop = crop_image_from_xy(image, center, self.crop_size, scale=scale_crop) # detect keypoints in 2D s = image_crop.get_shape().as_list() keypoints_scoremap = self.inference_pose2d(image_crop) keypoints_scoremap = keypoints_scoremap[-1] keypoints_scoremap = tf.image.resize_images(keypoints_scoremap, (s[1], s[2])) return keypoints_scoremap, image_crop, scale_crop, center @staticmethod def inference_detection(image, train=False): """ HandSegNet: Detects the hand in the input image by segmenting it. Inputs: image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted train: bool, True in case weights should be trainable Outputs: scoremap_list_large: list of [B, 256, 256, 2] tf.float32 tensor, Scores for the hand segmentation classes """ with tf.variable_scope('HandSegNet'): scoremap_list = list() layers_per_block = [2, 2, 4, 4] out_chan_list = [64, 128, 256, 512] pool_list = [True, True, True, False] # learn some feature representation, that describes the image content well x = image for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1): for layer_id in range(layer_num): x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id+1), kernel_size=3, stride=1, out_chan=chan_num, trainable=train) if pool: x = ops.max_pool(x, 'pool%d' % block_id) x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, trainable=train) encoding = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=128, trainable=train) # use encoding to detect initial scoremap x = ops.conv_relu(encoding, 'conv6_1', kernel_size=1, stride=1, out_chan=512, trainable=train) scoremap = ops.conv(x, 'conv6_2', kernel_size=1, stride=1, out_chan=2, trainable=train) scoremap_list.append(scoremap) # upsample to full size s = image.get_shape().as_list() scoremap_list_large = [tf.image.resize_images(x, (s[1], s[2])) for x in scoremap_list] return scoremap_list_large def inference_pose2d(self, image_crop, train=False): """ PoseNet: Given an image it detects the 2D hand keypoints. The image should already contain a rather tightly cropped hand. Inputs: image: [B, H, W, 3] tf.float32 tensor, Image with mean subtracted train: bool, True in case weights should be trainable Outputs: scoremap_list_large: list of [B, 256, 256, 21] tf.float32 tensor, Scores for the hand keypoints """ with tf.variable_scope('PoseNet2D'): scoremap_list = list() layers_per_block = [2, 2, 4, 2] out_chan_list = [64, 128, 256, 512] pool_list = [True, True, True, False] # learn some feature representation, that describes the image content well x = image_crop for block_id, (layer_num, chan_num, pool) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1): for layer_id in range(layer_num): x = ops.conv_relu(x, 'conv%d_%d' % (block_id, layer_id+1), kernel_size=3, stride=1, out_chan=chan_num, trainable=train) if pool: x = ops.max_pool(x, 'pool%d' % block_id) x = ops.conv_relu(x, 'conv4_3', kernel_size=3, stride=1, out_chan=256, trainable=train) x = ops.conv_relu(x, 'conv4_4', kernel_size=3, stride=1, out_chan=256, trainable=train) x = ops.conv_relu(x, 'conv4_5', kernel_size=3, stride=1, out_chan=256, trainable=train) x = ops.conv_relu(x, 'conv4_6', kernel_size=3, stride=1, out_chan=256, trainable=train) encoding = ops.conv_relu(x, 'conv4_7', kernel_size=3, stride=1, out_chan=128, trainable=train) # use encoding to detect initial scoremap x = ops.conv_relu(encoding, 'conv5_1', kernel_size=1, stride=1, out_chan=512, trainable=train) scoremap = ops.conv(x, 'conv5_2', kernel_size=1, stride=1, out_chan=self.num_kp, trainable=train) scoremap_list.append(scoremap) # iterate recurrent part a couple of times layers_per_recurrent_unit = 5 num_recurrent_units = 2 for pass_id in range(num_recurrent_units): x = tf.concat([scoremap_list[-1], encoding], 3) for rec_id in range(layers_per_recurrent_unit): x = ops.conv_relu(x, 'conv%d_%d' % (pass_id+6, rec_id+1), kernel_size=7, stride=1, out_chan=128, trainable=train) x = ops.conv_relu(x, 'conv%d_6' % (pass_id+6), kernel_size=1, stride=1, out_chan=128, trainable=train) scoremap = ops.conv(x, 'conv%d_7' % (pass_id+6), kernel_size=1, stride=1, out_chan=self.num_kp, trainable=train) scoremap_list.append(scoremap) scoremap_list_large = scoremap_list return scoremap_list_large def _inference_pose3d(self, keypoints_scoremap, hand_side, evaluation, train=False): """ PosePrior + Viewpoint: Estimates the most likely normalized 3D pose given 2D detections and hand side. Inputs: keypoints_scoremap: [B, 32, 32, 21] tf.float32 tensor, Scores for the hand keypoints hand_side: [B, 2] tf.float32 tensor, One hot encoding if the image is showing left or right side evaluation: [] tf.bool tensor, True while evaluation false during training (controls dropout) train: bool, True in case weights should be trainable Outputs: coord_xyz_rel_normed: [B, 21, 3] tf.float32 tensor, Normalized 3D coordinates """ # infer coordinates in the canonical frame coord_can = self._inference_pose3d_can(keypoints_scoremap, hand_side, evaluation, train=train) # infer viewpoint rot_mat = self._inference_viewpoint(keypoints_scoremap, hand_side, evaluation, train=train) # flip hand according to hand side cond_right = tf.equal(tf.argmax(hand_side, 1), 1) cond_right_all = tf.tile(tf.reshape(cond_right, [-1, 1, 1]), [1, self.num_kp, 3]) coord_xyz_can_flip = self._flip_right_hand(coord_can, cond_right_all) # rotate view back coord_xyz_rel_normed = tf.matmul(coord_xyz_can_flip, rot_mat) return coord_xyz_rel_normed def _inference_pose3d_can(self, keypoints_scoremap, hand_side, evaluation, train=False): """ Inference of canonical coordinates. """ with tf.variable_scope('PosePrior'): # use encoding to detect relative, normed 3d coords x = keypoints_scoremap # this is 28x28x21 s = x.get_shape().as_list() out_chan_list = [32, 64, 128] for i, out_chan in enumerate(out_chan_list): x = ops.conv_relu(x, 'conv_pose_%d_1' % i, kernel_size=3, stride=1, out_chan=out_chan, trainable=train) x = ops.conv_relu(x, 'conv_pose_%d_2' % i, kernel_size=3, stride=2, out_chan=out_chan, trainable=train) # in the end this will be 4x4xC # Estimate relative 3D coordinates out_chan_list = [512, 512] x = tf.reshape(x, [s[0], -1]) x = tf.concat([x, hand_side], 1) for i, out_chan in enumerate(out_chan_list): x = ops.fully_connected_relu(x, 'fc_rel%d' % i, out_chan=out_chan, trainable=train) x = ops.dropout(x, 0.8, evaluation) coord_xyz_rel = ops.fully_connected(x, 'fc_xyz', out_chan=self.num_kp*3, trainable=train) # reshape stuff coord_xyz_rel = tf.reshape(coord_xyz_rel, [s[0], self.num_kp, 3]) return coord_xyz_rel def _inference_viewpoint(self, keypoints_scoremap, hand_side, evaluation, train=False): """ Inference of the viewpoint. """ with tf.variable_scope('ViewpointNet'): # estimate rotation ux, uy, uz = self._rotation_estimation(keypoints_scoremap, hand_side, evaluation, train=train) # assemble rotation matrix rot_mat = self._get_rot_mat(ux, uy, uz) return rot_mat @staticmethod def _rotation_estimation(scoremap2d, hand_side, evaluation, train=False): """ Estimates the rotation from canonical coords to realworld xyz. """ # conv down scoremap to some reasonable length x = tf.concat([scoremap2d], 3) s = x.get_shape().as_list() out_chan_list = [64, 128, 256] for i, out_chan in enumerate(out_chan_list): x = ops.conv_relu(x, 'conv_vp_%d_1' % i, kernel_size=3, stride=1, out_chan=out_chan, trainable=train) x = ops.conv_relu(x, 'conv_vp_%d_2' % i, kernel_size=3, stride=2, out_chan=out_chan, trainable=train) # in the end this will be 4x4x128 # flatten x = tf.reshape(x, [s[0], -1]) # this is Bx2048 x = tf.concat([x, hand_side], 1) # Estimate Viewpoint --> 3 params out_chan_list = [256, 128] for i, out_chan in enumerate(out_chan_list): x = ops.fully_connected_relu(x, 'fc_vp%d' % i, out_chan=out_chan, trainable=train) x = ops.dropout(x, 0.75, evaluation) ux = ops.fully_connected(x, 'fc_vp_ux', out_chan=1, trainable=train) uy = ops.fully_connected(x, 'fc_vp_uy', out_chan=1, trainable=train) uz = ops.fully_connected(x, 'fc_vp_uz', out_chan=1, trainable=train) return ux, uy, uz def _get_rot_mat(self, ux_b, uy_b, uz_b): """ Returns a rotation matrix from axis and (encoded) angle.""" with tf.name_scope('get_rot_mat'): u_norm = tf.sqrt(tf.square(ux_b) + tf.square(uy_b) + tf.square(uz_b) + 1e-8) theta = u_norm # some tmp vars st_b = tf.sin(theta) ct_b = tf.cos(theta) one_ct_b = 1.0 - tf.cos(theta) st = st_b[:, 0] ct = ct_b[:, 0] one_ct = one_ct_b[:, 0] norm_fac = 1.0 / u_norm[:, 0] ux = ux_b[:, 0] * norm_fac uy = uy_b[:, 0] * norm_fac uz = uz_b[:, 0] * norm_fac trafo_matrix = self._stitch_mat_from_vecs([ct+ux*ux*one_ct, ux*uy*one_ct-uz*st, ux*uz*one_ct+uy*st, uy*ux*one_ct+uz*st, ct+uy*uy*one_ct, uy*uz*one_ct-ux*st, uz*ux*one_ct-uy*st, uz*uy*one_ct+ux*st, ct+uz*uz*one_ct]) return trafo_matrix @staticmethod def _flip_right_hand(coords_xyz_canonical, cond_right): """ Flips the given canonical coordinates, when cond_right is true. Returns coords unchanged otherwise. The returned coordinates represent those of a left hand. Inputs: coords_xyz_canonical: Nx3 matrix, containing the coordinates for each of the N keypoints """ with tf.variable_scope('flip-right-hand'): expanded = False s = coords_xyz_canonical.get_shape().as_list() if len(s) == 2: coords_xyz_canonical = tf.expand_dims(coords_xyz_canonical, 0) cond_right = tf.expand_dims(cond_right, 0) expanded = True # mirror along y axis coords_xyz_canonical_mirrored = tf.stack([coords_xyz_canonical[:, :, 0], coords_xyz_canonical[:, :, 1], -coords_xyz_canonical[:, :, 2]], -1) # select mirrored in case it was a right hand coords_xyz_canonical_left = tf.where(cond_right, coords_xyz_canonical_mirrored, coords_xyz_canonical) if expanded: coords_xyz_canonical_left = tf.squeeze(coords_xyz_canonical_left, [0]) return coords_xyz_canonical_left @staticmethod def _stitch_mat_from_vecs(vector_list): """ Stitches a given list of vectors into a 3x3 matrix. Input: vector_list: list of 9 tensors, which will be stitched into a matrix. list contains matrix elements in a row-first fashion (m11, m12, m13, m21, m22, m23, m31, m32, m33). Length of the vectors has to be the same, because it is interpreted as batch dimension. """ assert len(vector_list) == 9, "There have to be exactly 9 tensors in vector_list." batch_size = vector_list[0].get_shape().as_list()[0] vector_list = [tf.reshape(x, [1, batch_size]) for x in vector_list] trafo_matrix = tf.dynamic_stitch([[0], [1], [2], [3], [4], [5], [6], [7], [8]], vector_list) trafo_matrix = tf.reshape(trafo_matrix, [3, 3, batch_size]) trafo_matrix = tf.transpose(trafo_matrix, [2, 0, 1]) return trafo_matrix
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import argparse import fnmatch import logging import json import os import sys import time import unittest from builtins import str from collections import OrderedDict from autothreadharness.harness_case import HarnessCase from autothreadharness.open_thread_controller import OpenThreadController from autothreadharness import settings logging.basicConfig(level=logging.INFO) logger = logging.getLogger() """Logger: The global logger""" logger.setLevel(logging.INFO) RESUME_SCRIPT_PATH = ( '%appdata%\\Microsoft\\Windows\\Start Menu\\Programs\\' 'Startup\\continue_harness.bat' ) class SimpleTestResult(unittest.TestResult): executions = 0 def __init__( self, path, auto_reboot_args=None, keep_explorer=False, add_all_devices=False, ): """Record test results in json file Args: path (str): File path to record the results auto_reboot (bool): Whether reboot when harness die """ super(SimpleTestResult, self).__init__() self.path = path self.auto_reboot_args = auto_reboot_args self.result = json.load(open(self.path, 'r')) self.log_handler = None self.started = None self.keep_explorer = keep_explorer self.add_all_devices = add_all_devices SimpleTestResult.executions += 1 logger.info('Initial state is %s', json.dumps(self.result, indent=2)) def startTest(self, test): logger.info( '\n========================================\n%s\n========================================', test.__class__.__name__, ) test.add_all_devices = self.add_all_devices # create start up script if auto reboot enabled if self.auto_reboot_args: test.auto_reboot = True os.system( 'echo %s > "%s"' % ( ' '.join( self.auto_reboot_args + ['-c', test.__class__.__name__] ), RESUME_SCRIPT_PATH, ) ) # record start timestamp self.started = time.strftime('%Y-%m-%dT%H:%M:%S') os.system('mkdir %s' % test.result_dir) self.log_handler = logging.FileHandler( '%s\\auto-%s.log' % (test.result_dir, time.strftime('%Y%m%d%H%M%S')) ) self.log_handler.setLevel(logging.DEBUG) self.log_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s %(message)s') ) logger.addHandler(self.log_handler) def add_result(self, test, passed, error=None): """Record test result into json file Args: test (TestCase): The test just run passed (bool): Whether the case is passed """ self.result[str(test.__class__.__name__)] = { 'started': self.started, 'stopped': time.strftime('%Y-%m-%dT%H:%M:%S'), 'passed': passed, 'error': error, 'executions': SimpleTestResult.executions, } if self.auto_reboot_args: os.system('del "%s"' % RESUME_SCRIPT_PATH) json.dump( OrderedDict(sorted(self.result.items(), key=lambda t: t[0])), open(self.path, 'w'), indent=2, ) # save logs logger.removeHandler(self.log_handler) self.log_handler.close() self.log_handler = None time.sleep(2) # close explorers if not self.keep_explorer: os.system('taskkill /f /im explorer.exe && start explorer.exe') def addSuccess(self, test): logger.info('case[%s] pass', test.__class__.__name__) super(SimpleTestResult, self).addSuccess(test) self.add_result(test, True) def addFailure(self, test, err): logger.warning('case[%s] fail', test.__class__.__name__) super(SimpleTestResult, self).addFailure(test, err) self.add_result(test, False) def addError(self, test, err): logger.error('case[%s] error', test.__class__.__name__, exc_info=err) if err and err[0] is SystemExit: if self.auto_reboot_args: logger.warning('rebooting..') os.system('shutdown /r /t 1') else: logger.warning('exiting..') sys.exit(1) super(SimpleTestResult, self).addError(test, err) self.add_result(test, None, str(err[1])) def list_devices(names=None, continue_from=None, **kwargs): """List devices in settings file and print versions""" if not names: names = [ device for device, _type in settings.GOLDEN_DEVICES if _type == 'OpenThread' ] if continue_from: continue_from = names.index(continue_from) else: continue_from = 0 for port in names[continue_from:]: try: with OpenThreadController(port) as otc: print('%s: %s' % (port, otc.version)) except BaseException: logger.exception('failed to get version of %s' % port) def discover( names=None, pattern=['*.py'], skip='efp', dry_run=False, blacklist=None, name_greps=None, manual_reset=False, delete_history=False, max_devices=0, continue_from=None, result_file='./result.json', auto_reboot=False, keep_explorer=False, add_all_devices=False, ): """Discover all test cases and skip those passed Args: pattern (str): Pattern to match case modules, refer python's unittest documentation for more details skip (str): types cases to skip """ if not os.path.exists(settings.OUTPUT_PATH): os.mkdir(settings.OUTPUT_PATH) if delete_history: os.system('del history.json') if blacklist: try: excludes = [ line.strip('\n') for line in open(blacklist, 'r').readlines() if not line.startswith('#') ] except BaseException: logger.exception('Failed to open test case black list file') raise else: excludes = [] log = None if os.path.isfile(result_file): try: log = json.load(open(result_file, 'r')) except BaseException: logger.exception('Failed to open result file') if not log: log = {} json.dump(log, open(result_file, 'w'), indent=2) suite = unittest.TestSuite() discovered = unittest.defaultTestLoader.discover('cases', pattern) if names and continue_from: names = names[names.index(continue_from):] for s1 in discovered: for s2 in s1: for case in s2: if case.__class__ is HarnessCase: continue case_name = str(case.__class__.__name__) # grep name if name_greps and not any( fnmatch.fnmatch(case_name, name_grep) for name_grep in name_greps ): logger.info('case[%s] skipped by name greps', case_name) continue # whitelist if len(names) and case_name not in names: logger.info('case[%s] skipped', case_name) continue # skip cases if case_name in log.keys(): if ( (log[case_name]['passed'] and ('p' in skip)) or ( log[case_name]['passed'] is False and ('f' in skip) ) or (log[case_name]['passed'] is None and ('e' in skip)) ): logger.warning( 'case[%s] skipped for its status[%s]', case_name, log[case_name]['passed'], ) continue # continue from if continue_from: if continue_from != case_name: logger.warning( 'case[%s] skipped for continue from[%s]', case_name, continue_from, ) continue else: continue_from = None # black list if case_name in excludes: logger.warning('case[%s] skipped for blacklist', case_name) continue # max devices if max_devices and case.golden_devices_required > max_devices: logger.warning( 'case[%s] skipped for exceeding max golden devices allowed[%d]', case_name, max_devices, ) continue suite.addTest(case) logger.info('case[%s] added', case_name) if auto_reboot: argv = [] argv.append('"%s"' % os.sep.join([os.getcwd(), 'start.bat'])) argv.extend(['-p', pattern]) argv.extend(['-k', skip]) argv.extend(['-o', result_file]) argv.append('-a') if manual_reset: argv.append('-m') if delete_history: argv.append('-d') auto_reboot_args = argv + names else: auto_reboot_args = None os.system('del "%s"' % RESUME_SCRIPT_PATH) # manual reset if manual_reset: settings.PDU_CONTROLLER_TYPE = 'MANUAL_PDU_CONTROLLER' settings.PDU_CONTROLLER_OPEN_PARAMS = {} settings.PDU_CONTROLLER_REBOOT_PARAMS = {} result = SimpleTestResult( result_file, auto_reboot_args, keep_explorer, add_all_devices ) for case in suite: logger.info(case.__class__.__name__) if dry_run: return suite.run(result) return result def main(): parser = argparse.ArgumentParser( description='Thread harness test case runner' ) parser.add_argument( '--auto-reboot', '-a', action='store_true', default=False, help='restart system when harness service die', ) parser.add_argument( 'names', metavar='NAME', type=str, nargs='*', default=None, help='test case name, omit to test all', ) parser.add_argument( '--blacklist', '-b', metavar='BLACKLIST_FILE', type=str, help='file to list test cases to skip', default=None, ) parser.add_argument( '--continue-from', '-c', type=str, default=None, help='first case to test', ) parser.add_argument( '--delete-history', '-d', action='store_true', default=False, help='clear history on startup', ) parser.add_argument( '--keep-explorer', '-e', action='store_true', default=False, help='do not restart explorer.exe at the end', ) parser.add_argument( '--name-greps', '-g', action='append', default=None, help='grep case by names', ) parser.add_argument( '--list-file', '-i', type=str, default=None, help='file to list cases names to test', ) parser.add_argument( '--skip', '-k', metavar='SKIP', type=str, help='type of results to skip.' 'e for error, f for fail, p for pass.', default='', ) parser.add_argument( '--list-devices', '-l', action='store_true', default=False, help='list devices', ) parser.add_argument( '--manual-reset', '-m', action='store_true', default=False, help='reset devices manually', ) parser.add_argument( '--dry-run', '-n', action='store_true', default=False, help='just show what to run', ) parser.add_argument( '--result-file', '-o', type=str, default=settings.OUTPUT_PATH + '\\result.json', help='file to store and read current status', ) parser.add_argument( '--pattern', '-p', metavar='PATTERN', type=str, help='file name pattern, default to "*.py"', default='*.py', ) parser.add_argument( '--rerun-fails', '-r', type=int, default=0, help='number of times to rerun failed test cases', ) parser.add_argument( '--add-all-devices', '-t', action='store_true', default=False, help='add all devices to the test bed', ) parser.add_argument( '--max-devices', '-u', type=int, default=0, help='max golden devices allowed', ) args = vars(parser.parse_args()) if args['list_file']: try: names = [ line.strip('\n') for line in open(args['list_file'], 'r').readlines() if not line.startswith('#') ] except BaseException: logger.exception('Failed to open test case list file') raise else: args['names'] = args['names'] + names args.pop('list_file') if args.pop('list_devices', False): list_devices(**args) return rerun_fails = args.pop('rerun_fails') result = discover(**args) if rerun_fails > 0: for i in range(rerun_fails): failed_names = { name for name in result.result if result.result[name]['passed'] is False } if not failed_names: break logger.info('Rerunning failed test cases') logger.info('Rerun #{}:'.format(i + 1)) result = discover( names=failed_names, pattern=args['pattern'], skip='', result_file=args['result_file'], auto_reboot=args['auto_reboot'], keep_explorer=args['keep_explorer'], add_all_devices=args['add_all_devices'], ) if __name__ == '__main__': main()
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Define API Loggers.""" from google.protobuf.json_format import MessageToDict from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud.logging.resource import Resource _GLOBAL_RESOURCE = Resource(type='global', labels={}) class Logger(object): """Loggers represent named targets for log entries. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs :type name: str :param name: the name of the logger :type client: :class:`google.cloud.logging.client.Client` :param client: A client which holds credentials and project configuration for the logger (which requires a project). :type labels: dict :param labels: (optional) mapping of default labels for entries written via this logger. """ def __init__(self, name, client, labels=None): self.name = name self._client = client self.labels = labels @property def client(self): """Clent bound to the logger.""" return self._client @property def project(self): """Project bound to the logger.""" return self._client.project @property def full_name(self): """Fully-qualified name used in logging APIs""" return 'projects/%s/logs/%s' % (self.project, self.name) @property def path(self): """URI path for use in logging APIs""" return '/%s' % (self.full_name,) def _require_client(self, client): """Check client or verify over-ride. :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current logger. :rtype: :class:`google.cloud.logging.client.Client` :returns: The client passed in or the currently bound client. """ if client is None: client = self._client return client def batch(self, client=None): """Return a batch to use as a context manager. :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current topic. :rtype: :class:`Batch` :returns: A batch to use as a context manager. """ client = self._require_client(client) return Batch(self, client) def _make_entry_resource(self, text=None, info=None, message=None, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """Return a log entry resource of the appropriate type. Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`. Only one of ``text``, ``info``, or ``message`` should be passed. :type text: str :param text: (Optional) text payload :type info: dict :param info: (Optional) struct payload :type message: :class:`~google.protobuf.message.Message` :param message: (Optional) The protobuf payload to log. :type labels: dict :param labels: (Optional) labels passed in to calling method. :type insert_id: str :param insert_id: (Optional) unique ID for log entry. :type severity: str :param severity: (Optional) severity of event being logged. :type http_request: dict :param http_request: (Optional) info about HTTP request associated with the entry :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) timestamp of event being logged. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the entry :rtype: dict :returns: The JSON resource created. """ entry = { 'logName': self.full_name, 'resource': resource._to_dict(), } if text is not None: entry['textPayload'] = text if info is not None: entry['jsonPayload'] = info if message is not None: # NOTE: If ``message`` contains an ``Any`` field with an # unknown type, this will fail with a ``TypeError``. # However, since ``message`` will be provided by a user, # the assumption is that any types needed for the # protobuf->JSON conversion will be known from already # imported ``pb2`` modules. entry['protoPayload'] = MessageToDict(message) if labels is None: labels = self.labels if labels is not None: entry['labels'] = labels if insert_id is not None: entry['insertId'] = insert_id if severity is not None: entry['severity'] = severity if http_request is not None: entry['httpRequest'] = http_request if timestamp is not None: entry['timestamp'] = _datetime_to_rfc3339(timestamp) return entry def log_text(self, text, client=None, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """API call: log a text message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write :type text: str :param text: the log message. :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current logger. :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: Monitored resource of the entry, defaults to the global resource type. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. """ client = self._require_client(client) entry_resource = self._make_entry_resource( text=text, labels=labels, insert_id=insert_id, severity=severity, http_request=http_request, timestamp=timestamp, resource=resource) client.logging_api.write_entries([entry_resource]) def log_struct(self, info, client=None, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """API call: log a structured message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write :type info: dict :param info: the log entry information :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current logger. :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: Monitored resource of the entry, defaults to the global resource type. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. """ client = self._require_client(client) entry_resource = self._make_entry_resource( info=info, labels=labels, insert_id=insert_id, severity=severity, http_request=http_request, timestamp=timestamp, resource=resource) client.logging_api.write_entries([entry_resource]) def log_proto(self, message, client=None, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """API call: log a protobuf message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list :type message: :class:`~google.protobuf.message.Message` :param message: The protobuf message to be logged. :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current logger. :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: Monitored resource of the entry, defaults to the global resource type. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. """ client = self._require_client(client) entry_resource = self._make_entry_resource( message=message, labels=labels, insert_id=insert_id, severity=severity, http_request=http_request, timestamp=timestamp, resource=resource) client.logging_api.write_entries([entry_resource]) def delete(self, client=None): """API call: delete all entries in a logger via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current logger. """ client = self._require_client(client) client.logging_api.logger_delete(self.project, self.name) def list_entries(self, projects=None, filter_=None, order_by=None, page_size=None, page_token=None): """Return a page of log entries. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list :type projects: list of strings :param projects: project IDs to include. If not passed, defaults to the project bound to the client. :type filter_: str :param filter_: a filter expression. See https://cloud.google.com/logging/docs/view/advanced_filters :type order_by: str :param order_by: One of :data:`~google.cloud.logging.ASCENDING` or :data:`~google.cloud.logging.DESCENDING`. :type page_size: int :param page_size: maximum number of entries to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. :rtype: :class:`~google.api.core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry` accessible to the current logger. """ log_filter = 'logName=%s' % (self.full_name,) if filter_ is not None: filter_ = '%s AND %s' % (filter_, log_filter) else: filter_ = log_filter return self.client.list_entries( projects=projects, filter_=filter_, order_by=order_by, page_size=page_size, page_token=page_token) class Batch(object): """Context manager: collect entries to log via a single API call. Helper returned by :meth:`Logger.batch` :type logger: :class:`google.cloud.logging.logger.Logger` :param logger: the logger to which entries will be logged. :type client: :class:`google.cloud.logging.client.Client` :param client: The client to use. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the batch, defaults to None, which requires that every entry should have a resource specified. Since the methods used to write entries default the entry's resource to the global resource type, this parameter is only required if explicitly set to None. If no entries' resource are set to None, this parameter will be ignored on the server. """ def __init__(self, logger, client, resource=None): self.logger = logger self.entries = [] self.client = client self.resource = resource def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: self.commit() def log_text(self, text, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """Add a text entry to be logged during :meth:`commit`. :type text: str :param text: the text entry :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the entry. Defaults to the global resource type. If set to None, the resource of the batch is used for this entry. If both this resource and the Batch resource are None, the API will return an error. """ self.entries.append( ('text', text, labels, insert_id, severity, http_request, timestamp, resource)) def log_struct(self, info, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """Add a struct entry to be logged during :meth:`commit`. :type info: dict :param info: the struct entry :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the entry. Defaults to the global resource type. If set to None, the resource of the batch is used for this entry. If both this resource and the Batch resource are None, the API will return an error. """ self.entries.append( ('struct', info, labels, insert_id, severity, http_request, timestamp, resource)) def log_proto(self, message, labels=None, insert_id=None, severity=None, http_request=None, timestamp=None, resource=_GLOBAL_RESOURCE): """Add a protobuf entry to be logged during :meth:`commit`. :type message: protobuf message :param message: the protobuf entry :type labels: dict :param labels: (optional) mapping of labels for the entry. :type insert_id: str :param insert_id: (optional) unique ID for log entry. :type severity: str :param severity: (optional) severity of event being logged. :type http_request: dict :param http_request: (optional) info about HTTP request associated with the entry. :type timestamp: :class:`datetime.datetime` :param timestamp: (optional) timestamp of event being logged. :type resource: :class:`~google.cloud.logging.resource.Resource` :param resource: (Optional) Monitored resource of the entry. Defaults to the global resource type. If set to None, the resource of the batch is used for this entry. If both this resource and the Batch resource are None, the API will return an error. """ self.entries.append( ('proto', message, labels, insert_id, severity, http_request, timestamp, resource)) def commit(self, client=None): """Send saved log entries as a single API call. :type client: :class:`~google.cloud.logging.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current batch. """ if client is None: client = self.client kwargs = { 'logger_name': self.logger.full_name, } if self.resource is not None: kwargs['resource'] = self.resource._to_dict() if self.logger.labels is not None: kwargs['labels'] = self.logger.labels entries = [] for (entry_type, entry, labels, iid, severity, http_req, timestamp, resource) in self.entries: if entry_type == 'text': info = {'textPayload': entry} elif entry_type == 'struct': info = {'jsonPayload': entry} elif entry_type == 'proto': # NOTE: If ``entry`` contains an ``Any`` field with an # unknown type, this will fail with a ``TypeError``. # However, since ``entry`` was provided by a user in # ``Batch.log_proto``, the assumption is that any types # needed for the protobuf->JSON conversion will be known # from already imported ``pb2`` modules. info = {'protoPayload': MessageToDict(entry)} else: raise ValueError('Unknown entry type: %s' % (entry_type,)) if resource is not None: info['resource'] = resource._to_dict() if labels is not None: info['labels'] = labels if iid is not None: info['insertId'] = iid if severity is not None: info['severity'] = severity if http_req is not None: info['httpRequest'] = http_req if timestamp is not None: info['timestamp'] = _datetime_to_rfc3339(timestamp) entries.append(info) client.logging_api.write_entries(entries, **kwargs) del self.entries[:]
#!/usr/bin/env python class tree_node(object): def __init__(self, data): self.data = data self.left = None self.right = None def create_minimum_BST(data_list, start, end): if end < start: return None mid = (start + end) / 2 n = tree_node(data_list[mid]) n.left = create_minimum_BST(data_list, start, mid-1) n.right = create_minimum_BST(data_list, mid+1, end) return n def in_order_traverse(root): if root is None: return in_order_traverse(root.left) print root.data in_order_traverse(root.right) def pre_order_traverse(root): if root is None: return print root.data pre_order_traverse(root.left) pre_order_traverse(root.right) def post_order_traverse(root): if root is None: return post_order_traverse(root.left) post_order_traverse(root.right) print root.data def dfs(root): if root is None: return stack = [root,] while len(stack) > 0: node = stack.pop() print node.data if node.right is not None: stack.append(node.right) if node.left is not None: stack.append(node.left) from collections import deque def bfs(root): if root is None: return queue = deque([root]) while len(queue) > 0: node = queue.popleft() print node.data if node.left is not None: queue.append(node.left) if node.right is not None: queue.append(node.right) def copy_tree(root, new_node): if root.left is not None: new_node.left = tree_node(root.left.data) copy_tree(root.left, new_node.left) if root.right is not None: new_node.right = tree_node(root.right.data) copy_tree(root.right, new_node.right) return new_node # Note: # inorder preorder postorder are all DFS # if you want to implement inorder, you need to first push right to stack since stack is LIFO # but in BFS, just use normal left -> right would be fine def get_height(root): if root is None: return 0 return max( get_height(root.left), get_height(root.right)) + 1 def is_balance(root): if root is None: return True if abs(get_height(root.left) - get_height(root.right)) > 1: return False return is_balance(root.left) and is_balance(root.right) def covers(root, p): if root is None: return False if root == p: return True return covers(root.left, p) or covers(root.right, p) # store the result of cover so that we don't need to calculate it again def first_common_ancestor(root, p, q): # p q not a child of root if not (covers(root, p) and covers(root,q)): return None p_is_left = covers(root.left, p) q_is_left = covers(root.left, p) # Diff sides if p_is_left != q_is_left: return root elif p_is_left: return first_common_ancestor(root.left, p, q) else: return first_common_ancestor(root.right, p, q) def is_subtree(t1, t2): if t1 == None: return False if t1.data == t2.data: if is_match(t1, t2): return True return is_subtree(t1.left, t2) or is_subtree(t1.right, t2) def is_match(t1,t2): if t1 is None and t2 is None: return True elif t1 is None or t2 is None: return False else: if t1.data != t2.data: return False else: return is_match(t1.left, t2.left) and is_match(t1.right, t2.right) def print_path(node, path_list): path_list.append(node) if node.left is None and node.right is None: s = '' for key in path_list: s += str(key.data) + ' ' print s return if node.left is not None: print_path(node.left, path_list) path_list.pop() if node.right is not None: print_path(node.right, path_list) path_list.pop() def is_full(root): if root is None: return True if root.left is None and root.right is None: return True if root.left is None or root.right is None: return False return is_full(root.left) && is_full(root.right) def is_complete(root): queue = deque([root,]) empty = False while len(queue) > 0: n = queue.popleft() if n.left is None: empty = True else: if empty: return False queue.append(n.left) if n.right is None: empty = True else: if empty: return False queue.append(n.right) return True def is_symmetry(n1, n2): if n1 is None and n2 is None: return True if n1 is None or n2 is None: return False if n1.data != n2.data: return False return is_symmetry(n1.left, n2.right) and is_symmetry(n1.right, n2.left) def is_symmetry(root): if root is None: return True return is_symmetry(root.left, root.right) def mirrow_tree(root): if root is None: return mirrow_tree(root.left, root.left) # Consider the case that t1 is None and T2 is not def mirrow_tree(n1, n2): if n1 is None or n2 is None: return tmp = n1.data n1.data = n2.data n2.data = tmp if n1.left is not None and n2.right is not None: mirrow_tree(n1.left, n2.right) if n1.right is not None and n2.left is not None: mirrow_tree(n1.right, n2.left) # Not going to add more to here. Just add four more check here # BFS way def create_list_level_tree(root): result = [[root,],] prev = [root,] current = [] while len(prev) > 0: for tree_node in prev: if tree_node.left is not None: current.append(tree_node.left) if tree_node.right is not None: current.append(tree_node.right) result.append(current) prev = current current = [] return result # DFS way def create_list_level_tree(root): pass # Check if a BT is a BST, use in-order traverse and copy to a list def check_bst(root): bst_list = [] def copy_bst(root): if root is None: return copy_bst(root.left) bst_list.append(root) copy_bst(root.right) prev = bst_list[0] for node in bst_list[1:]: if prev.data >= node.data: return False else: prev = node return True # Use a left node < current < right node def check_bst(root, min_value, max_value): if root is None: return True if root.data < min_value or root.data > max_value: return False if not check_bst(root.left, min_value, root.data) or not check_bst(root.right, root.data, max_value): return False return True def get_rank(root, num): pass def tree_diameter(root, num): pass if __name__ == '__main__': data_list = [1,3,4,5,6,8,9,10,13,14,17,18] root = create_minimum_BST(data_list, 0, len(data_list)-1) print root.data,root.left.data,root.right.data print '\nIn Order Traverse' in_order_traverse(root) print '\nPre Order Traverse' pre_order_traverse(root) print '\Post Order Traverse' post_order_traverse(root) print '\nDFS' dfs(root) print '\nBFS' bfs(root) #print '\nCopy Tree' print '\nPrint Path' path_list = [] print_path(root, path_list)
import psycopg2 import psycopg2.extras import psycopg2.extensions import re from dbobject_cache import DBObjectsCache object_cache = None ''' :type object_cache: DBObjectsCache''' db_credentials = {} config_settings = None psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) def execute_on_host(hostname, port, dbname, user, password, sql, params=None): data = [] conn = None if user is None and password is None: user, password = db_credentials['{}:{}:{}'.format(dbname, hostname, port)] try: conn = psycopg2.connect(host=hostname, port=port, dbname=dbname, user=user, password=password, connect_timeout='3') cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) cur.execute('set transaction read only') cur.execute(sql, params) data = cur.fetchall() except Exception as e: print 'ERROR execution failed on {}:{} - {}'.format(hostname, port, e.message) finally: if conn and not conn.closed: conn.close() return data def execute_on_db_uniq(db_uniq, sql, params=None, dict=False): """ db_uniq = dbname:hostname:port """ data = [] column_names = [] error = None conn = None user, password = db_credentials[db_uniq] hostname = db_uniq.split(':')[0] port = db_uniq.split(':')[1] dbname = db_uniq.split(':')[2] try: conn = psycopg2.connect(host=hostname, port=port, dbname=dbname, user=user, password=password, connect_timeout='3') if dict: cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) else: cur = conn.cursor() cur.execute('set transaction read only') cur.execute(sql, params) data = cur.fetchall() column_names = [x[0] for x in cur.description] except Exception as e: print 'ERROR execution failed on {}:{} - {}'.format(hostname, port, e.message) error = e.message finally: if conn and not conn.closed: conn.close() return data, column_names, error def get_column_info(dbuniq, table_name, column_names): ret = [] for cn in column_names: ci = {'column_name': cn} for cache_info in object_cache.cache[dbuniq][table_name]['columns']: if cn == cache_info['column_name']: ci = cache_info break ret.append(ci) return ret def get_list_of_dbs_on_instance(host, port, db, user, password): sql = """select datname from pg_database where not datistemplate""" return [x['datname'] for x in execute_on_host(host, port, db, user, password, sql)] def get_children_for_dbuniq_table(db_uniq, table): sql = """ with recursive q_children_oids(oid) as ( select inhrelid as oid from pg_catalog.pg_inherits where inhparent = regclass(%s)::oid union all select i.inhrelid from q_children_oids q join pg_catalog.pg_inherits i on q.oid = i.inhparent ) select quote_ident(n.nspname)||'.'||quote_ident(c.relname) as full_table_name, coalesce((select array_agg(attname::text order by attnum) from pg_attribute where attrelid = c.oid and attnum >= 0 and not attisdropped), '{}'::text[]) as columns, (select count(*) from pg_inherits where inhparent = c.oid) as children_count, (select count(*) from pg_inherits where inhrelid = c.oid) > 0 as is_inherited from pg_class c join pg_namespace n on c.relnamespace = n.oid where c.oid in (select oid from q_children_oids) and c.relkind in ('r', 'v') order by 1 """ data, column_names, error = execute_on_db_uniq(db_uniq, sql, (table,), dict=True) # print data, column_names, error return [(x['full_table_name'], x) for x in data] def add_db_to_object_cache(object_cache, host, port, db, user, password, tables=True, views=False): sql = """ select quote_ident(n.nspname)||'.'||quote_ident(c.relname) as full_table_name, coalesce((select array_agg(attname::text order by attnum) from pg_attribute where attrelid = c.oid and attnum >= 0 and not attisdropped), '{}'::text[]) as columns, (select count(*) from pg_inherits where inhparent = c.oid) as children_count, (select count(*) from pg_inherits where inhrelid = c.oid) > 0 as is_inherited from pg_class c join pg_namespace n on c.relnamespace = n.oid where c.relkind = ANY(%s) --c.relkind in ('r', 'v') and not n.nspname in ('information_schema', 'pg_catalog') """ table_type = [] if tables: table_type.append('r') if views: table_type.extend(('v', 'm')) if len(table_type) == 0: raise Exception('Views and/or Tables exposing must be enabled!') data = execute_on_host(host, port, db, user, password, sql, (table_type,)) for td in data: # print td object_cache.add_table_to_cache(host, port, db, td['full_table_name'], DBObjectsCache.formulate_table(td)) # TODO db_credentials['{}:{}:{}'.format(host, port, db)] = (user, password) def apply_regex_filters_to_list(input_list, filter_pattern_list, filter_type): if not filter_pattern_list: return input_list white_ret = set() black_ret = set(input_list) if filter_type not in ['whitelist', 'blacklist']: raise Exception('Invalid input: ' + filter_type) for pattern in filter_pattern_list: p = re.compile(pattern) if filter_type == 'whitelist': white_ret.update(filter(lambda x: p.match(x), input_list)) else: black_ret.difference_update(filter(lambda x: p.match(x), black_ret)) return list(black_ret) if filter_type == 'blacklist' else list(white_ret) def initialize_db_object_cache(settings): """ read and store all tables/columns for all db """ instances = settings['instances'] global config_settings config_settings = settings expose_tables = settings['features'].get('expose_tables', True) expose_views = settings['features'].get('expose_views', False) expose_all_dbs = settings['features'].get('expose_all_dbs', True) dbname_blacklist = settings['dbname_visibility_control'].get('dbname_blacklist', []) dbname_whitelist = settings['dbname_visibility_control'].get('dbname_whitelist', []) global object_cache ''' :type : DBObjectsCache''' if not object_cache: object_cache = DBObjectsCache() for inst_name, inst_data in instances.iteritems(): if not expose_all_dbs and 'databases' not in inst_data: raise Exception('Explicit list of allowed DBs needed for {}'.format(inst_name)) dbs = [] if 'databases' not in inst_data: dbs = get_list_of_dbs_on_instance(inst_data['hostname'], inst_data['port'], 'postgres', inst_data['user'], inst_data['password']) if dbname_whitelist: dbs = apply_regex_filters_to_list(dbs, dbname_whitelist, 'whitelist') if dbname_blacklist: dbs = apply_regex_filters_to_list(dbs, dbname_blacklist, 'blacklist') else: dbs = inst_data['databases'] for db in dbs: print 'initializing cache for cluster {}, db {}'.format(inst_name, db) add_db_to_object_cache(object_cache, inst_data['hostname'], inst_data['port'], db, inst_data['user'], inst_data['password'], expose_tables, expose_views) print 'initializing finished' for db_uniq in object_cache.cache: print 'Found DB:', db_uniq, ', objects:', len(object_cache.cache[db_uniq]) # print object_cache.cache[db_uniq].keys() if __name__ == '__main__': # print get_list_of_dbs_on_instance('localhost', 5432, 'postgres', 'postgres', 'postgres') object_cache = DBObjectsCache() add_db_to_object_cache(object_cache, 'localhost', 5432, 'postgres', 'postgres', 'postgres') # print object_cache # print object_cache.get_dbuniq_and_table_full_name('pos', 'fk') print get_children_for_dbuniq_table('postgres:localhost:5432', 'public.p1') # print apply_regex_filters_to_list(['local_db', 'local_db_temp'], ['.*_temp'], 'whitelist') # print apply_regex_filters_to_list(['local_db', 'local_db_temp'], ['.*_temp'], 'blacklist')
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Finds perf trybots that can run telemetry tests.""" import json import logging import os import re import subprocess import sys import urllib2 from telemetry.core import platform from telemetry import decorators from telemetry.internal.browser import possible_browser from telemetry.internal.platform import trybot_device CHROMIUM_CONFIG_FILENAME = 'tools/run-perf-test.cfg' BLINK_CONFIG_FILENAME = 'Tools/run-perf-test.cfg' SUCCESS, NO_CHANGES, ERROR = range(3) # Unsupported Perf bisect bots. EXCLUDED_BOTS = { 'win_xp_perf_bisect', # Goma issues: crbug.com/330900 'linux_perf_tester', 'linux_perf_bisector', 'win_perf_bisect_builder', 'win64_nv_tester', 'winx64_bisect_builder', 'linux_perf_bisect_builder', 'mac_perf_bisect_builder', 'android_perf_bisect_builder', 'android_arm64_perf_bisect_builder' } INCLUDE_BOTS = [ 'trybot-all', 'trybot-all-win', 'trybot-all-mac', 'trybot-all-linux', 'trybot-all-android' ] class TrybotError(Exception): def __str__(self): return '%s\nError running tryjob.' % self.args[0] class PossibleTrybotBrowser(possible_browser.PossibleBrowser): """A script that sends a job to a trybot.""" def __init__(self, browser_type, _): target_os = browser_type.split('-')[1] self._builder_names = _GetBuilderNames(browser_type) super(PossibleTrybotBrowser, self).__init__(browser_type, target_os, True) def Create(self, finder_options): raise NotImplementedError() def SupportsOptions(self, finder_options): if ((finder_options.device and finder_options.device != 'trybot') or finder_options.cros_remote or finder_options.extensions_to_load or finder_options.profile_dir): return False return True def IsRemote(self): return True def _RunProcess(self, cmd): logging.debug('Running process: "%s"', ' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() returncode = proc.poll() return (returncode, out, err) def _UpdateConfigAndRunTryjob(self, bot_platform, cfg_file_path): """Updates perf config file, uploads changes and excutes perf try job. Args: bot_platform: Name of the platform to be generated. cfg_file_path: Perf config file path. Returns: (result, msg) where result is one of: SUCCESS if a tryjob was sent NO_CHANGES if there was nothing to try, ERROR if a tryjob was attempted but an error encountered and msg is an error message if an error was encountered, or rietveld url if success, otherwise throws TrybotError exception. """ config = self._GetPerfConfig(bot_platform) try: config_file = open(cfg_file_path, 'w') except IOError: msg = 'Cannot find %s. Please run from src dir.' % cfg_file_path return (ERROR, msg) config_file.write('config = %s' % json.dumps( config, sort_keys=True, indent=2, separators=(',', ': '))) config_file.close() # Commit the config changes locally. returncode, out, err = self._RunProcess( ['git', 'commit', '-a', '-m', 'bisect config: %s' % bot_platform]) if returncode: raise TrybotError('Could not commit bisect config change for %s,' ' error %s' % (bot_platform, err)) # Upload the CL to rietveld and run a try job. returncode, out, err = self._RunProcess([ 'git', 'cl', 'upload', '-f', '--bypass-hooks', '-m', 'CL for perf tryjob on %s' % bot_platform ]) if returncode: raise TrybotError('Could upload to rietveld for %s, error %s' % (bot_platform, err)) match = re.search(r'https://codereview.chromium.org/[\d]+', out) if not match: raise TrybotError('Could not upload CL to rietveld for %s! Output %s' % (bot_platform, out)) rietveld_url = match.group(0) # Generate git try command for available bots. git_try_command = ['git', 'cl', 'try', '-m', 'tryserver.chromium.perf'] for bot in self._builder_names[bot_platform]: git_try_command.extend(['-b', bot]) returncode, out, err = self._RunProcess(git_try_command) if returncode: raise TrybotError('Could not try CL for %s, error %s' % (bot_platform, err)) return (SUCCESS, rietveld_url) def _GetPerfConfig(self, bot_platform): """Generates the perf config for try job. Args: bot_platform: Name of the platform to be generated. Returns: A dictionary with perf config parameters. """ # Generate the command line for the perf trybots target_arch = 'ia32' arguments = sys.argv if any(arg == '--chrome-root' or arg.startswith('--chrome-root=') for arg in arguments): raise ValueError( 'Trybot does not suport --chrome-root option set directly ' 'through command line since it may contain references to your local ' 'directory') if bot_platform in ['win', 'win-x64']: arguments[0] = 'python tools\\perf\\run_benchmark' else: arguments[0] = './tools/perf/run_benchmark' for index, arg in enumerate(arguments): if arg.startswith('--browser='): if bot_platform == 'android': arguments[index] = '--browser=android-chromium' elif any('x64' in bot for bot in self._builder_names[bot_platform]): arguments[index] = '--browser=release_x64' target_arch = 'x64' else: arguments[index] = '--browser=release' command = ' '.join(arguments) return { 'command': command, 'repeat_count': '1', 'max_time_minutes': '120', 'truncate_percent': '0', 'target_arch': target_arch, } def _AttemptTryjob(self, cfg_file_path): """Attempts to run a tryjob from the current directory. This is run once for chromium, and if it returns NO_CHANGES, once for blink. Args: cfg_file_path: Path to the config file for the try job. Returns: Returns SUCCESS if a tryjob was sent, NO_CHANGES if there was nothing to try, ERROR if a tryjob was attempted but an error encountered. """ source_repo = 'chromium' if cfg_file_path == BLINK_CONFIG_FILENAME: source_repo = 'blink' # TODO(prasadv): This method is quite long, we should consider refactor # this by extracting to helper methods. returncode, original_branchname, err = self._RunProcess( ['git', 'rev-parse', '--abbrev-ref', 'HEAD']) if returncode: msg = 'Must be in a git repository to send changes to trybots.' if err: msg += '\nGit error: %s' % err logging.error(msg) return ERROR original_branchname = original_branchname.strip() # Check if the tree is dirty: make sure the index is up to date and then # run diff-index self._RunProcess(['git', 'update-index', '--refresh', '-q']) returncode, out, err = self._RunProcess(['git', 'diff-index', 'HEAD']) if out: logging.error( 'Cannot send a try job with a dirty tree. Commit locally first.') return ERROR # Make sure the tree does have local commits. returncode, out, err = self._RunProcess( ['git', 'log', 'origin/master..HEAD']) if not out: return NO_CHANGES # Create/check out the telemetry-tryjob branch, and edit the configs # for the tryjob there. returncode, out, err = self._RunProcess( ['git', 'checkout', '-b', 'telemetry-tryjob']) if returncode: logging.error('Error creating branch telemetry-tryjob. ' 'Please delete it if it exists.\n%s', err) return ERROR try: returncode, out, err = self._RunProcess( ['git', 'branch', '--set-upstream-to', 'origin/master']) if returncode: logging.error('Error in git branch --set-upstream-to: %s', err) return ERROR for bot_platform in self._builder_names: try: results, output = self._UpdateConfigAndRunTryjob( bot_platform, cfg_file_path) if results == ERROR: logging.error(output) return ERROR print ('Uploaded %s try job to rietveld for %s platform. ' 'View progress at %s' % (source_repo, bot_platform, output)) except TrybotError, err: print err logging.error(err) finally: # Checkout original branch and delete telemetry-tryjob branch. # TODO(prasadv): This finally block could be extracted out to be a # separate function called _CleanupBranch. returncode, out, err = self._RunProcess( ['git', 'checkout', original_branchname]) if returncode: logging.error('Could not check out %s. Please check it out and ' 'manually delete the telemetry-tryjob branch. ' ': %s', original_branchname, err) return ERROR # pylint: disable=lost-exception logging.info('Checked out original branch: %s', original_branchname) returncode, out, err = self._RunProcess( ['git', 'branch', '-D', 'telemetry-tryjob']) if returncode: logging.error('Could not delete telemetry-tryjob branch. ' 'Please delete it manually: %s', err) return ERROR # pylint: disable=lost-exception logging.info('Deleted temp branch: telemetry-tryjob') return SUCCESS def RunRemote(self): """Sends a tryjob to a perf trybot. This creates a branch, telemetry-tryjob, switches to that branch, edits the bisect config, commits it, uploads the CL to rietveld, and runs a tryjob on the given bot. """ # First check if there are chromium changes to upload. status = self._AttemptTryjob(CHROMIUM_CONFIG_FILENAME) if status not in [SUCCESS, ERROR]: # If we got here, there are no chromium changes to upload. Try blink. os.chdir('third_party/WebKit/') status = self._AttemptTryjob(BLINK_CONFIG_FILENAME) os.chdir('../..') if status not in [SUCCESS, ERROR]: logging.error('No local changes found in chromium or blink trees. ' 'browser=%s argument sends local changes to the ' 'perf trybot(s): %s.', self.browser_type, self._builder_names.values()) def _InitPlatformIfNeeded(self): if self._platform: return self._platform = platform.GetHostPlatform() # pylint: disable=W0212 self._platform_backend = self._platform._platform_backend def SelectDefaultBrowser(_): return None def CanFindAvailableBrowsers(): return True @decorators.Cache def _GetTrybotList(): f = urllib2.urlopen( 'http://build.chromium.org/p/tryserver.chromium.perf/json') builders = json.loads(f.read()).get('builders', {}).keys() builders = ['trybot-%s' % bot.replace('_perf_bisect', '').replace('_', '-') for bot in builders if bot not in EXCLUDED_BOTS] builders.extend(INCLUDE_BOTS) return sorted(builders) def _GetBuilderNames(browser_type): """ Return platform and its available bot name as dictionary.""" if 'all' not in browser_type: bot = ['%s_perf_bisect' % browser_type.replace( 'trybot-', '').replace('-', '_')] bot_platform = browser_type.split('-')[1] if 'x64' in browser_type: bot_platform += '-x64' return {bot_platform: bot} f = urllib2.urlopen( 'http://build.chromium.org/p/tryserver.chromium.perf/json') builders = json.loads(f.read()).get('builders', {}).keys() # Exclude unsupported bots like win xp and some dummy bots. builders = [bot for bot in builders if bot not in EXCLUDED_BOTS] platform_and_bots = {} for os_name in ['linux', 'android', 'mac', 'win']: platform_and_bots[os_name] = [bot for bot in builders if os_name in bot] # Special case for Windows x64, consider it as separate platform # config config should contain target_arch=x64 and --browser=release_x64. win_x64_bots = [platform_and_bots['win'].pop(i) for i, win_bot in enumerate(platform_and_bots['win']) if 'x64' in win_bot] platform_and_bots['win-x64'] = win_x64_bots if 'all-win' in browser_type: return {'win': platform_and_bots['win'], 'win-x64': platform_and_bots['win-x64']} if 'all-mac' in browser_type: return {'mac': platform_and_bots['mac']} if 'all-android' in browser_type: return {'android': platform_and_bots['android']} if 'all-linux' in browser_type: return {'linux': platform_and_bots['linux']} return platform_and_bots def FindAllBrowserTypes(finder_options): # Listing browsers requires an http request; only do this if the user is # running with browser=list or a browser=trybot-* argument. if (finder_options.browser_type and (finder_options.browser_type == 'list' or finder_options.browser_type.startswith('trybot'))): return _GetTrybotList() return [] def FindAllAvailableBrowsers(finder_options, device): """Find all perf trybots on tryserver.chromium.perf.""" if not isinstance(device, trybot_device.TrybotDevice): return [] return [PossibleTrybotBrowser(b, finder_options) for b in FindAllBrowserTypes(finder_options)]
# =============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # =============enthought library imports======================= from __future__ import absolute_import from chaco.api import AbstractOverlay, BaseTool # =============standard library imports ======================== from numpy import vstack from traits.api import Any, Str # =============local library imports ========================== class RectSelectionOverlay(AbstractOverlay): tool = Any def overlay(self, component, gc, *args, **kw): with gc: sp = self.tool._start_pos ep = self.tool._end_pos if sp and ep: x, y = sp x2, y2 = ep gc.set_fill_color([1, 0, 0, 0.25]) gc.set_stroke_color([1, 0, 0, 0.25]) gc.rect(x, y, x2 - x + 1, y2 - y + 1) gc.draw_path() class RectSelectionTool(BaseTool): """ """ filter_near_edge = False threshold = 5 hover_metadata_name = Str('hover') persistent_hover = False selection_metadata_name = Str('selections') group_id = 0 _start_pos = None _end_pos = None _cached_data = None def select_key_pressed(self, event): if event.character == 'Esc': self._end_select(event) def normal_mouse_enter(self, event): event.window.set_pointer('arrow') def normal_mouse_leave(self, event): event.window.set_pointer('arrow') def _get_selection_token(self, event): return self.component.map_index((event.x, event.y), threshold=self.threshold) def _already_selected(self, token): already = False plot = self.component for name in ('index', 'value'): if not hasattr(plot, name): continue md = getattr(plot, name).metadata if md is None or self.selection_metadata_name not in md: continue if token in md[self.selection_metadata_name]: already = True break return already def normal_left_dclick(self, event): if self._end_pos is None: self.component.index.metadata[self.selection_metadata_name] = [] elif abs(self._end_pos[0] - self._start_pos[0]) < 2 and \ abs(self._end_pos[1] - self._start_pos[1]) < 2: self.component.index.metadata[self.selection_metadata_name] = [] def normal_left_down(self, event): if not event.handled: token = self._get_selection_token(event) if token is None: if not self._near_edge(event): self._start_select(event) else: if self._already_selected(token): self._deselect_token(token) else: self._select_token(token) event.handled = True def select_mouse_leave(self, event): self._end_select(event) def _near_edge(self, event, tol=5): if self.filter_near_edge: ex = event.x ey = event.y x, x2 = self.component.x, self.component.x2 y, y2 = self.component.y, self.component.y2 if abs(ex - x) < tol or abs(ex - x2) < tol: return True elif abs(ey - y) < tol or abs(ey - y2) < tol: return True def _deselect_token(self, token): plot = self.component for name in ('index', 'value'): if not hasattr(plot, name): continue md = getattr(plot, name).metadata if self.selection_metadata_name in md: if token in md[self.selection_metadata_name]: new = md[self.selection_metadata_name][:] new.remove(token) md[self.selection_metadata_name] = new def _select_token(self, token, append=True): plot = self.component for name in ('index',): if not hasattr(plot, name): continue md = getattr(plot, name).metadata selection = md.get(self.selection_metadata_name, None) if selection is None: md[self.selection_metadata_name] = [token] else: if append: if token not in md[self.selection_metadata_name]: new_list = md[self.selection_metadata_name] + [token] md[self.selection_metadata_name] = new_list def select_left_up(self, event): self._update_selection() self._end_select(event) self.component.request_redraw() def select_mouse_move(self, event): self._end_pos = (event.x, event.y) self.component.request_redraw() def _update_selection(self): comp = self.component index = comp.index ind = [] if self._start_pos and self._end_pos: x, y = self._start_pos x2, y2 = self._end_pos # normalize points so that x,y is always upper left of selection box if y2 > y: y2, y = y, y2 if x2 < x: x2, x = x, x2 elif x2 < x: x2, x = x, x2 if abs(x - x2) > 3 and abs(y - y2) > 3: dx, dy = comp.map_data([x, y]) dx2, dy2 = comp.map_data([x2, y2]) data = self._cached_data if data is None: datax = index.get_data() datay = comp.value.get_data() data = vstack([datax, datay]).transpose() self._cached_data = data ind = [i for i, (xi, yi) in enumerate(data) if dx <= xi <= dx2 and dy2 <= yi <= dy] selection = index.metadata[self.selection_metadata_name] nind = list(set(ind) ^ set(selection)) index.metadata[self.selection_metadata_name] = nind def _end_select(self, event): self.event_state = 'normal' event.window.set_pointer('arrow') self._end_pos = None self.component.request_redraw() def _start_select(self, event): self._start_pos = (event.x, event.y) # self._end_pos = (event.x, event.y) self.event_state = 'select' event.window.set_pointer('cross') # ============= EOF =====================================
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 # pylint: disable=ungrouped-imports """Dataset generator.""" __all__ = ['DataLoader'] import pickle import io import sys import signal import multiprocessing import multiprocessing.queues from multiprocessing.reduction import ForkingPickler from multiprocessing.pool import ThreadPool import threading import numpy as np try: import multiprocessing.resource_sharer except ImportError: pass from . import sampler as _sampler from ... import nd, context from ...util import is_np_shape, is_np_array, set_np from ... import numpy as _mx_np # pylint: disable=reimported if sys.platform == 'darwin' or sys.platform == 'win32': def rebuild_ndarray(*args): """Rebuild ndarray from pickled shared memory""" # pylint: disable=no-value-for-parameter return nd.NDArray(nd.ndarray._new_from_shared_mem(*args)) def reduce_ndarray(data): """Reduce ndarray to shared memory handle""" return rebuild_ndarray, data._to_shared_mem() else: def rebuild_ndarray(pid, fd, shape, dtype): """Rebuild ndarray from pickled shared memory""" # pylint: disable=no-value-for-parameter fd = fd.detach() return nd.NDArray(nd.ndarray._new_from_shared_mem(pid, fd, shape, dtype)) def reduce_ndarray(data): """Reduce ndarray to shared memory handle""" # keep a local ref before duplicating fd data = data.as_in_context(context.Context('cpu_shared', 0)) pid, fd, shape, dtype = data._to_shared_mem() fd = multiprocessing.reduction.DupFd(fd) return rebuild_ndarray, (pid, fd, shape, dtype) ForkingPickler.register(nd.NDArray, reduce_ndarray) if sys.platform == 'darwin' or sys.platform == 'win32': def rebuild_np_ndarray(*args): """Rebuild ndarray from pickled shared memory""" # pylint: disable=no-value-for-parameter return _mx_np.ndarray(nd.ndarray._new_from_shared_mem(*args)) def reduce_np_ndarray(data): """Reduce ndarray to shared memory handle""" return rebuild_np_ndarray, data._to_shared_mem() else: def rebuild_np_ndarray(pid, fd, shape, dtype): """Rebuild ndarray from pickled shared memory""" # pylint: disable=no-value-for-parameter fd = fd.detach() return _mx_np.ndarray(nd.ndarray._new_from_shared_mem(pid, fd, shape, dtype)) def reduce_np_ndarray(data): """Reduce ndarray to shared memory handle""" # keep a local ref before duplicating fd data = data.as_in_context(context.Context('cpu_shared', 0)) pid, fd, shape, dtype = data._to_shared_mem() fd = multiprocessing.reduction.DupFd(fd) return rebuild_np_ndarray, (pid, fd, shape, dtype) ForkingPickler.register(_mx_np.ndarray, reduce_np_ndarray) class ConnectionWrapper(object): """Connection wrapper for multiprocessing that supports sending NDArray via shared memory.""" def __init__(self, conn): self._conn = conn def send(self, obj): """Send object""" buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) self.send_bytes(buf.getvalue()) def recv(self): """Receive object""" buf = self.recv_bytes() return pickle.loads(buf) def __getattr__(self, name): """Emmulate conn""" attr = self.__dict__.get('_conn', None) return getattr(attr, name) class Queue(multiprocessing.queues.Queue): """Wrapper for multiprocessing queue that dumps NDArray with shared memory.""" def __init__(self, *args, **kwargs): super().__init__(*args, ctx=multiprocessing.get_context(), **kwargs) self._reader = ConnectionWrapper(self._reader) self._writer = ConnectionWrapper(self._writer) self._send = self._writer.send self._recv = self._reader.recv class SimpleQueue(multiprocessing.queues.SimpleQueue): """Wrapper for multiprocessing SimpleQueue that dumps NDArray with shared memory. SimpleQueue don't use threading internally. """ def __init__(self, *args, **kwargs): super().__init__(*args, ctx=multiprocessing.get_context(), **kwargs) self._reader = ConnectionWrapper(self._reader) self._writer = ConnectionWrapper(self._writer) self._send = self._writer.send self._recv = self._reader.recv def default_batchify_fn(data): """Collate data into batch.""" if isinstance(data[0], nd.NDArray): return _mx_np.stack(data) if is_np_array() else nd.stack(*data) elif isinstance(data[0], tuple): data = zip(*data) return [default_batchify_fn(i) for i in data] else: data = np.asarray(data) array_fn = _mx_np.array if is_np_array() else nd.array return array_fn(data, dtype=data.dtype) def default_mp_batchify_fn(data): """Collate data into batch. Use shared memory for stacking.""" if isinstance(data[0], nd.NDArray): empty_fn = _mx_np.empty if is_np_array() else nd.empty out = empty_fn((len(data),) + data[0].shape, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) if is_np_array(): return _mx_np.stack(data, out=out) else: return nd.stack(*data, out=out) elif isinstance(data[0], tuple): data = zip(*data) return [default_mp_batchify_fn(i) for i in data] else: data = np.asarray(data) array_fn = _mx_np.array if is_np_array() else nd.array return array_fn(data, dtype=data.dtype, ctx=context.Context('cpu_shared', 0)) def _as_in_context(data, ctx): """Move data into new context.""" if isinstance(data, nd.NDArray): return data.as_in_context(ctx) elif isinstance(data, (list, tuple)): return [_as_in_context(d, ctx) for d in data] return data def worker_loop_v1(dataset, key_queue, data_queue, batchify_fn): """Worker loop for multiprocessing DataLoader.""" while True: idx, samples = key_queue.get() if idx is None: break batch = batchify_fn([dataset[i] for i in samples]) data_queue.put((idx, batch)) def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False, pin_device_id=0, data_buffer_lock=None): """Fetcher loop for fetching data from queue and put in reorder dict.""" while True: idx, batch = data_queue.get() if idx is None: break if pin_memory: batch = _as_in_context(batch, context.cpu_pinned(pin_device_id)) else: batch = _as_in_context(batch, context.cpu()) if data_buffer_lock is not None: with data_buffer_lock: data_buffer[idx] = batch else: data_buffer[idx] = batch class _MultiWorkerIterV1(object): """Internal multi-worker iterator for DataLoader.""" def __init__(self, num_workers, dataset, batchify_fn, batch_sampler, pin_memory=False, pin_device_id=0, worker_fn=worker_loop_v1): assert num_workers > 0, "_MultiWorkerIter is not for {} workers".format(num_workers) self._num_workers = num_workers self._dataset = dataset self._batchify_fn = batchify_fn self._batch_sampler = batch_sampler self._key_queue = Queue() self._data_queue = SimpleQueue() self._data_buffer = {} self._data_buffer_lock = threading.Lock() self._rcvd_idx = 0 self._sent_idx = 0 self._iter = iter(self._batch_sampler) self._shutdown = False workers = [] for _ in range(self._num_workers): worker = multiprocessing.Process( target=worker_fn, args=(self._dataset, self._key_queue, self._data_queue, self._batchify_fn)) worker.daemon = True worker.start() workers.append(worker) self._workers = workers self._fetcher = threading.Thread( target=fetcher_loop_v1, args=(self._data_queue, self._data_buffer, pin_memory, pin_device_id, self._data_buffer_lock)) self._fetcher.daemon = True self._fetcher.start() # pre-fetch for _ in range(2 * self._num_workers): self._push_next() def __len__(self): return len(self._batch_sampler) def __del__(self): self.shutdown() def _push_next(self): """Assign next batch workload to workers.""" r = next(self._iter, None) if r is None: return self._key_queue.put((self._sent_idx, r)) self._sent_idx += 1 def __next__(self): assert not self._shutdown, "call __next__ after shutdown is forbidden" if self._rcvd_idx == self._sent_idx: assert not self._data_buffer, "Data buffer should be empty at this moment" self.shutdown() raise StopIteration while True: if self._rcvd_idx in self._data_buffer: with self._data_buffer_lock: batch = self._data_buffer.pop(self._rcvd_idx) self._rcvd_idx += 1 self._push_next() return batch def next(self): return self.__next__() def __iter__(self): return self def shutdown(self): """Shutdown internal workers by pushing terminate signals.""" if not self._shutdown: # send shutdown signal to the fetcher and join data queue first # Remark: loop_fetcher need to be joined prior to the workers. # otherwise, the fetcher may fail at getting data self._data_queue.put((None, None)) self._fetcher.join() # send shutdown signal to all worker processes for _ in range(self._num_workers): self._key_queue.put((None, None)) # force shut down any alive worker processes for w in self._workers: if w.is_alive(): w.terminate() self._shutdown = True class DataLoaderV1(object): """Loads data from a dataset and returns mini-batches of data. Parameters ---------- dataset : Dataset Source dataset. Note that numpy and mxnet arrays can be directly used as a Dataset. batch_size : int Size of mini-batch. shuffle : bool Whether to shuffle the samples. sampler : Sampler The sampler to use. Either specify sampler or shuffle, not both. last_batch : {'keep', 'discard', 'rollover'} How to handle the last batch if batch_size does not evenly divide `len(dataset)`. keep - A batch with less samples than previous batches is returned. discard - The last batch is discarded if its incomplete. rollover - The remaining samples are rolled over to the next epoch. batch_sampler : Sampler A sampler that returns mini-batches. Do not specify batch_size, shuffle, sampler, and last_batch if batch_sampler is specified. batchify_fn : callable Callback function to allow users to specify how to merge samples into a batch. Defaults to `default_batchify_fn`:: def default_batchify_fn(data): if isinstance(data[0], nd.NDArray): return nd.stack(*data) elif isinstance(data[0], tuple): data = zip(*data) return [default_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype) num_workers : int, default 0 The number of multiprocessing workers to use for data preprocessing. pin_memory : boolean, default False If ``True``, the dataloader will copy NDArrays into pinned memory before returning them. Copying from CPU pinned memory to GPU is faster than from normal CPU memory. pin_device_id : int, default 0 The device id to use for allocating pinned memory if pin_memory is ``True`` """ def __init__(self, dataset, batch_size=None, shuffle=False, sampler=None, last_batch=None, batch_sampler=None, batchify_fn=None, num_workers=0, pin_memory=False, pin_device_id=0): self._dataset = dataset self._pin_memory = pin_memory self._pin_device_id = pin_device_id if batch_sampler is None: if batch_size is None: raise ValueError("batch_size must be specified unless " \ "batch_sampler is specified") if sampler is None: if shuffle: sampler = _sampler.RandomSampler(len(dataset)) else: sampler = _sampler.SequentialSampler(len(dataset)) elif shuffle: raise ValueError("shuffle must not be specified if sampler is specified") batch_sampler = _sampler.BatchSampler( sampler, batch_size, last_batch if last_batch else 'keep') elif batch_size is not None or shuffle or sampler is not None or \ last_batch is not None: raise ValueError("batch_size, shuffle, sampler and last_batch must " \ "not be specified if batch_sampler is specified.") self._batch_sampler = batch_sampler self._num_workers = num_workers if num_workers >= 0 else 0 if batchify_fn is None: if num_workers > 0: self._batchify_fn = default_mp_batchify_fn else: self._batchify_fn = default_batchify_fn else: self._batchify_fn = batchify_fn def __iter__(self): if self._num_workers == 0: def same_process_iter(): for batch in self._batch_sampler: ret = self._batchify_fn([self._dataset[idx] for idx in batch]) if self._pin_memory: ret = _as_in_context(ret, context.cpu_pinned(self._pin_device_id)) yield ret return same_process_iter() # multi-worker return _MultiWorkerIterV1(self._num_workers, self._dataset, self._batchify_fn, self._batch_sampler, self._pin_memory, self._pin_device_id) def __len__(self): return len(self._batch_sampler) def _thread_worker_initializer(active_shape, active_array): """Initializer for ThreadPool.""" set_np(shape=active_shape, array=active_array) _worker_dataset = None def _worker_initializer(dataset, active_shape, active_array): """Initialier for processing pool.""" # global dataset is per-process based and only available in worker processes # this is only necessary to handle MXIndexedRecordIO because otherwise dataset # can be passed as argument global _worker_dataset _worker_dataset = dataset set_np(shape=active_shape, array=active_array) def _worker_fn(samples, batchify_fn, dataset=None): """Function for processing data in worker process.""" # pylint: disable=unused-argument # it is required that each worker process has to fork a new MXIndexedRecordIO handle # preserving dataset as global variable can save tons of overhead and is safe in new process global _worker_dataset batch = batchify_fn([_worker_dataset[i] for i in samples]) buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch) return buf.getvalue() def _thread_worker_fn(samples, batchify_fn, dataset): """Threadpool worker function for processing data.""" return batchify_fn([dataset[i] for i in samples]) class _MultiWorkerIter(object): """Internal multi-worker iterator for DataLoader.""" def __init__(self, worker_pool, batchify_fn, batch_sampler, pin_memory=False, pin_device_id=0, worker_fn=_worker_fn, prefetch=0, dataset=None, data_loader=None, timeout=120): self._worker_pool = worker_pool self._batchify_fn = batchify_fn self._batch_sampler = batch_sampler self._data_buffer = {} self._rcvd_idx = 0 self._sent_idx = 0 self._iter = iter(self._batch_sampler) self._worker_fn = worker_fn self._pin_memory = pin_memory self._pin_device_id = pin_device_id self._dataset = dataset self._data_loader = data_loader self._timeout = timeout # pre-fetch for _ in range(prefetch): self._push_next() def __len__(self): return len(self._batch_sampler) def _push_next(self): """Assign next batch workload to workers.""" r = next(self._iter, None) if r is None: return async_ret = self._worker_pool.apply_async( self._worker_fn, (r, self._batchify_fn, self._dataset)) self._data_buffer[self._sent_idx] = async_ret self._sent_idx += 1 def __next__(self): self._push_next() if self._rcvd_idx == self._sent_idx: assert not self._data_buffer, "Data buffer should be empty at this moment" raise StopIteration assert self._rcvd_idx < self._sent_idx, "rcvd_idx must be smaller than sent_idx" assert self._rcvd_idx in self._data_buffer, "fatal error with _push_next, rcvd_idx missing" ret = self._data_buffer.pop(self._rcvd_idx) try: if self._dataset is None: batch = pickle.loads(ret.get(self._timeout)) else: batch = ret.get(self._timeout) if self._pin_memory: batch = _as_in_context(batch, context.cpu_pinned(self._pin_device_id)) self._rcvd_idx += 1 return batch except multiprocessing.context.TimeoutError: msg = '''Worker timed out after {} seconds. This might be caused by \n - Slow transform. Please increase timeout to allow slower data loading in each worker. '''.format(self._timeout) if not isinstance(self._worker_pool, multiprocessing.pool.ThreadPool): msg += '''- Insufficient shared_memory if `timeout` is large enough. Please consider reduce `num_workers` or increase shared_memory in system. ''' print(msg) raise except Exception: self._worker_pool.terminate() raise def next(self): return self.__next__() def __iter__(self): return self class DataLoader(object): """Loads data from a dataset and returns mini-batches of data. Parameters ---------- dataset : Dataset Source dataset. Note that numpy and mxnet arrays can be directly used as a Dataset. batch_size : int Size of mini-batch. shuffle : bool Whether to shuffle the samples. sampler : Sampler The sampler to use. Either specify sampler or shuffle, not both. last_batch : {'keep', 'discard', 'rollover'} How to handle the last batch if batch_size does not evenly divide `len(dataset)`. keep - A batch with less samples than previous batches is returned. discard - The last batch is discarded if its incomplete. rollover - The remaining samples are rolled over to the next epoch. batch_sampler : Sampler A sampler that returns mini-batches. Do not specify batch_size, shuffle, sampler, and last_batch if batch_sampler is specified. batchify_fn : callable Callback function to allow users to specify how to merge samples into a batch. Defaults to `default_batchify_fn`:: def default_batchify_fn(data): if isinstance(data[0], nd.NDArray): return nd.stack(*data) elif isinstance(data[0], tuple): data = zip(*data) return [default_batchify_fn(i) for i in data] else: data = np.asarray(data) return nd.array(data, dtype=data.dtype) num_workers : int, default 0 The number of multiprocessing workers to use for data preprocessing. pin_memory : boolean, default False If ``True``, the dataloader will copy NDArrays into pinned memory before returning them. Copying from CPU pinned memory to GPU is faster than from normal CPU memory. pin_device_id : int, default 0 The device id to use for allocating pinned memory if pin_memory is ``True`` prefetch : int, default is `num_workers * 2` The number of prefetching batches only works if `num_workers` > 0. If `prefetch` > 0, it allow worker process to prefetch certain batches before acquiring data from iterators. Note that using large prefetching batch will provide smoother bootstrapping performance, but will consume more shared_memory. Using smaller number may forfeit the purpose of using multiple worker processes, try reduce `num_workers` in this case. By default it defaults to `num_workers * 2`. thread_pool : bool, default False If ``True``, use threading pool instead of multiprocessing pool. Using threadpool can avoid shared memory usage. If `DataLoader` is more IO bounded or GIL is not a killing problem, threadpool version may achieve better performance than multiprocessing. timeout : int, default is 120 The timeout in seconds for each worker to fetch a batch data. Only modify this number unless you are experiencing timeout and you know it's due to slow data loading. Sometimes full `shared_memory` will cause all workers to hang and causes timeout. In these cases please reduce `num_workers` or increase system `shared_memory` size instead. """ def __init__(self, dataset, batch_size=None, shuffle=False, sampler=None, last_batch=None, batch_sampler=None, batchify_fn=None, num_workers=0, pin_memory=False, pin_device_id=0, prefetch=None, thread_pool=False, timeout=120): self._dataset = dataset self._pin_memory = pin_memory self._pin_device_id = pin_device_id self._thread_pool = thread_pool self._timeout = timeout assert timeout > 0, "timeout must be positive, given {}".format(timeout) if batch_sampler is None: if batch_size is None: raise ValueError("batch_size must be specified unless " \ "batch_sampler is specified") if sampler is None: if shuffle: sampler = _sampler.RandomSampler(len(dataset)) else: sampler = _sampler.SequentialSampler(len(dataset)) elif shuffle: raise ValueError("shuffle must not be specified if sampler is specified") batch_sampler = _sampler.BatchSampler( sampler, batch_size, last_batch if last_batch else 'keep') elif batch_size is not None or shuffle or sampler is not None or \ last_batch is not None: raise ValueError("batch_size, shuffle, sampler and last_batch must " \ "not be specified if batch_sampler is specified.") self._batch_sampler = batch_sampler self._num_workers = num_workers if num_workers >= 0 else 0 self._worker_pool = None self._prefetch = max(0, int(prefetch) if prefetch is not None else 2 * self._num_workers) if self._num_workers > 0: if self._thread_pool: self._worker_pool = ThreadPool(self._num_workers, initializer=_thread_worker_initializer, initargs=(is_np_shape(), is_np_array())) else: # set ignore keyboard interupt signal before forking processes original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) self._worker_pool = multiprocessing.Pool( self._num_workers, initializer=_worker_initializer, initargs=[self._dataset, is_np_shape(), is_np_array()]) # resume keyboard interupt signal in main process signal.signal(signal.SIGINT, original_sigint_handler) if batchify_fn is None: if num_workers > 0: self._batchify_fn = default_mp_batchify_fn else: self._batchify_fn = default_batchify_fn else: self._batchify_fn = batchify_fn def __iter__(self): if self._num_workers == 0: def same_process_iter(): for batch in self._batch_sampler: ret = self._batchify_fn([self._dataset[idx] for idx in batch]) if self._pin_memory: ret = _as_in_context(ret, context.cpu_pinned(self._pin_device_id)) yield ret return same_process_iter() # multi-worker return _MultiWorkerIter(self._worker_pool, self._batchify_fn, self._batch_sampler, pin_memory=self._pin_memory, pin_device_id=self._pin_device_id, worker_fn=_thread_worker_fn if self._thread_pool else _worker_fn, prefetch=self._prefetch, dataset=self._dataset if self._thread_pool else None, data_loader=self, timeout=self._timeout) def __len__(self): return len(self._batch_sampler) def __del__(self): if self._worker_pool: # manually terminate due to a bug that pool is not automatically terminated # https://bugs.python.org/issue34172 assert isinstance(self._worker_pool, multiprocessing.pool.Pool) self._worker_pool.terminate()
""" Swaggy Jenkins Jenkins API clients generated from Swagger / Open API specification # noqa: E501 The version of the OpenAPI document: 1.1.2-pre.0 Contact: blah@cliffano.com Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from swaggyjenkins.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from swaggyjenkins.exceptions import ApiAttributeError def lazy_import(): from swaggyjenkins.model.user import User globals()['User'] = User class Users(ModelSimple): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'value': ([User],), } @cached_property def discriminator(): return None attribute_map = {} read_only_vars = set() _composed_schemas = None required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): """Users - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] ([User]): # noqa: E501 Keyword Args: value ([User]): # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): """Users - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] ([User]): # noqa: E501 Keyword Args: value ([User]): # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) self = super(OpenApiModel, cls).__new__(cls) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) return self
#coding=utf-8 import math from nose.tools import (assert_equals, assert_dict_equal, assert_not_equals, assert_tuple_equal, assert_true, assert_false, assert_list_equal, assert_is_none, assert_items_equal, assert_almost_equal, raises) import ssdb from ssdb.connection import Connection,ConnectionPool,BlockingConnectionPool from ssdb.client import SSDB class TestClient(object): def setUp(self): pool = BlockingConnectionPool( connection_class=Connection, max_connections=2, timeout=5, host = '127.0.0.1', port = 8888) self.client = SSDB(connection_pool=pool) #self.client = SSDB(host='127.0.0.1', port=8888) print('set UP') def tearDown(self): print('tear down') def test_get(self): a = self.client.set('get_test','321') assert_true(a) b = self.client.get('get_test') assert_equals(b,'321') c = self.client.delete('get_test') assert_true(c) d = self.client.get('get_none') assert_is_none(d) def test_set(self): a = self.client.set('set_test','123') assert_true(a) c = self.client.exists('set_test') assert_true(c) b = self.client.delete('set_test') assert_true(b) c = self.client.exists('set_test') assert_false(c) def test_getset(self): self.client.delete('getset_test') a = self.client.getset('getset_test','abc') assert_is_none(a) b = self.client.get('getset_test') assert_equals(b,'abc') c = self.client.set('getset_test','abc') assert_true(c) d = self.client.getset('getset_test','defg') assert_equals(d,'abc') e = self.client.getset('getset_test','hijk') assert_equals(e,'defg') f = self.client.delete('getset_test') assert_true(f) g = self.client.exists('getset_test') assert_false(g) def test_setnx(self): self.client.delete('setnx_test') a = self.client.setnx('setnx_test','abc') assert_true(a) b = self.client.get('setnx_test') assert_equals(b,'abc') c = self.client.setnx('setnx_test','def') assert_false(c) f = self.client.delete('setnx_test') assert_true(f) g = self.client.exists('setnx_test') assert_false(g) def test_bit(self): self.client.delete('bit_test') self.client.set('bit_test',1) a = self.client.countbit('bit_test') assert_equals(a,3) a = self.client.setbit('bit_test', 1, 1) assert_false(a) a = self.client.getbit('bit_test', 1) assert_true(a) b = self.client.get('bit_test') assert_equals(b,'3') c = self.client.setbit('bit_test', 2, 1) assert_false(c) b = self.client.get('bit_test') assert_equals(b,'7') c = self.client.setbit('bit_test', 2, 0) assert_true(c) c = self.client.getbit('bit_test', 2) assert_false(c) c = self.client.set('bit_test', '1234567890') c = self.client.countbit('bit_test', 0, 1) assert_equals(c,3) c = self.client.countbit('bit_test', 3, -3) assert_equals(c,16) f = self.client.delete('bit_test') assert_true(f) def test_str(self): self.client.delete('str_test') self.client.set('str_test',"abc12345678") a = self.client.substr('str_test', 2, 4) assert_equals(a, "c123") a = self.client.substr('str_test', -2, 2) assert_equals(a, "78") a = self.client.substr('str_test', 1, -1) assert_equals(a, "bc1234567") a = self.client.strlen('str_test') assert_equals(a, 11) f = self.client.delete('str_test') assert_true(f) @raises(ValueError) def test_incr(self): a = self.client.delete('incr0') assert_true(a) a = self.client.set('incr0',10) assert_true(a) a = self.client.get('incr0') assert_equals(a,'10') a = self.client.incr('incr0',2) assert_equals(a,12) a = self.client.incr('incr0',-2) assert_equals(a,10) b = self.client.get('incr0') assert_equals(int(b),a) a = self.client.delete('incr0') assert_true(a) c = self.client.incr('incr0', 'abc') @raises(ValueError) def test_decr(self): a = self.client.delete('decr0') assert_true(a) a = self.client.set('decr0',10) assert_true(a) a = self.client.get('decr0') assert_equals(a,'10') a = self.client.decr('decr0',3) assert_equals(a,7) b = self.client.get('decr0') assert_equals(int(b),a) a = self.client.delete('decr0') assert_true(a) c = self.client.decr('decr0', -2) def test_multi_set(self): params = { 'aa':1, 'bb':2, 'cc':3, 'dd':4, } a = self.client.multi_set(**params) assert_equals(a,4) b = self.client.get('aa') assert_equals(b,'1') b = self.client.get('bb') assert_equals(b,'2') b = self.client.get('cc') assert_equals(b,'3') b = self.client.get('dd') assert_equals(b,'4') d = self.client.delete('aa') assert_true(d) d = self.client.delete('bb') assert_true(d) d = self.client.delete('cc') assert_true(d) d = self.client.delete('dd') assert_true(d) def test_multi_get(self): params = { 'aa': 'a1', 'bb': 'b2', 'cc': 'c3', 'dd': 'd4', } a = self.client.multi_set(**params) assert_equals(a,len(params)) r = self.client.multi_get(*params.keys()) assert_dict_equal(r,params) d = self.client.multi_del(*params.keys()) assert_equals(d,len(params)) def test_keys(self): params = { 'uuu0': 'a1', 'uuu1': 'b2', 'uuu2': 'c3', 'uuu3': 'd4', 'uuu4': 'e5', 'uuu5': 'f6', 'uuu6': 'g7', 'uuu7': 'h8', } a = self.client.multi_set(**params) assert_equals(a,len(params)) b = self.client.keys('uuu ','uuuu',10) assert_items_equal(b,params.keys()) d = self.client.multi_del(*params.keys()) assert_equals(d,len(params)) def test_scan(self): keys = [ 'zzz0', 'zzz1', 'zzz2', 'zzz3', 'zzz4', 'zzz5', 'zzz6', 'zzz7' ] values = [ 'a1', 'b2', 'c3', 'd4', 'e5', 'f6', 'g7', 'h8' ] params = {} for i in range(len(keys)): params[keys[i]] = values[i] a = self.client.multi_set(**params) assert_equals(a,len(params)) b = self.client.scan('zzz ','zzzz',10) assert_dict_equal(b,params) index = 0 for k,v in b.items(): assert_equals(k, keys[index]) assert_equals(v, values[index]) index += 1 d = self.client.multi_del(*params.keys()) assert_equals(d,len(params)) def test_rscan(self): keys = [ 'zzzz0', 'zzzz1', 'zzzz2', 'zzzz3', 'zzzz4', 'zzzz5', 'zzzz6', 'zzzz7' ] values = [ 'aa1', 'bb2', 'cc3', 'dd4', 'ee5', 'ff6', 'gg7', 'hh8' ] params = {} for i in range(len(keys)): params[keys[i]] = values[i] a = self.client.multi_set(**params) assert_equals(a,len(params)) b = self.client.rscan('zzzzz','zzzz ',10) assert_dict_equal(b,params) index = 0 c = len(keys) for k,v in b.items(): assert_equals(k, keys[c-index-1]) assert_equals(v, values[c-index-1]) index += 1 d = self.client.multi_del(*params.keys()) assert_equals(d,len(params)) def test_hset(self): a = self.client.hset('test_hset', 'keya', 'abc123') assert_true(a) a = self.client.hset('test_hset', 'keyb', 'def456') assert_true(a) b = self.client.hget('test_hset', 'keya') assert_equals(b, 'abc123') b = self.client.hget('test_hset', 'keyb') assert_equals(b, 'def456') d = self.client.hclear('test_hset') assert_true(d) def test_hdel(self): a = self.client.hset('test_hdel', 'keya', 'abc123') assert_true(a) a = self.client.hset('test_hdel', 'keyb', 'def456') assert_true(a) e = self.client.hexists('test_hdel', 'keya') assert_true(e) e = self.client.hexists('test_hdel', 'keyb') assert_true(e) b = self.client.hexists('test_hdel', 'keyc') assert_false(b) b = self.client.hdel('test_hdel', 'keya') assert_true(b) b = self.client.hdel('test_hdel', 'keyb') assert_true(b) b = self.client.hexists('test_hdel', 'keyb') assert_false(b) c = self.client.hget('test_hdel', 'keya') assert_is_none(c) b = self.client.hget('test_hdel', 'keyb') assert_is_none(c) #d = self.client.hclear('test_hdel') #assert_false(d) def test_hincr(self): self.client.hclear('test_counter') a = self.client.hset('test_counter', 'hincr', 100) assert_true(a) b = self.client.hincr('test_counter', 'hincr', 10) assert_equals(b, 110) b = self.client.hincr('test_counter', 'hincr') assert_equals(b, 111) b = self.client.hdecr('test_counter', 'hincr', 10) assert_equals(b, 101) b = self.client.hdecr('test_counter', 'hincr') assert_equals(b, 100) d = self.client.hclear('test_counter') assert_true(d) def test_hsize(self): b = self.client.hsize('test_hsize') assert_equals(b, 0) a = self.client.hset('test_hsize', 'a', 'a1') assert_true(a) a = self.client.hset('test_hsize', 'b', 'b1') assert_true(a) a = self.client.hset('test_hsize', 'c', 'c1') assert_true(a) b = self.client.hsize('test_hsize') assert_equals(b, 3) d = self.client.hclear('test_hsize') assert_true(d) def test_hgetall(self): self.client.hclear('test_hgetall') self.client.delete('test_hgetall') dct = { 'a':"AA", 'b':"BB", 'c':"CC", 'd':"DD" } a = self.client.multi_hset('test_hgetall', **dct) assert_equals(a,4) a = self.client.hgetall('test_hgetall') assert_dict_equal(a,dct) b = self.client.delete('test_hgetall') d = self.client.hclear('test_hgetall') assert_true(d) self.client.delete('test_hgetall') def test_hmulti(self): params = { 'uuu0': 'a1', 'uuu1': 'b2', 'uuu2': 'c3', 'uuu3': 'd4', 'uuu4': 'e5', 'uuu5': 'f6', 'uuu6': 'g7', 'uuu7': 'h8', } a = self.client.multi_hset('multi', **params) assert_equals(a,8) a1 = self.client.hget('multi', 'uuu0') assert_equals(a1, 'a1') b2 = self.client.hget('multi', 'uuu1') assert_equals(b2, 'b2') c3 = self.client.hget('multi', 'uuu2') assert_equals(c3, 'c3') d4 = self.client.hget('multi', 'uuu3') assert_equals(d4, 'd4') e5 = self.client.hget('multi', 'uuu4') assert_equals(e5, 'e5') f6 = self.client.hget('multi', 'uuu5') assert_equals(f6, 'f6') g7 = self.client.hget('multi', 'uuu6') assert_equals(g7, 'g7') h8 = self.client.hget('multi', 'uuu7') assert_equals(h8, 'h8') keys = self.client.hkeys('multi', 'uuu ', 'uuuu', 10) assert_items_equal(keys,params.keys()) kvs = self.client.multi_hget('multi', 'uuu0', 'uuu7') assert_dict_equal(kvs,{ "uuu0": 'a1', "uuu7": 'h8', }) kvs = self.client.hscan('multi', 'uuu ', 'uuuu', 10) assert_dict_equal(kvs, params) kvs = self.client.hrscan('multi', 'uuu4', 'uuu0', 10) assert_dict_equal(kvs,{ "uuu3": 'd4', "uuu2": 'c3', "uuu1": 'b2', "uuu0": 'a1', }) r = self.client.multi_hget('multi', *params.keys()) assert_dict_equal(r,params) d = self.client.multi_hdel('multi', *params.keys()) assert_equals(d,len(params)) #d = self.client.hclear('multi') #assert_true(d) def test_hlist(self): params = { 'hash_a': { 'a': 1, 'b': 2, 'c': 3, 'd': 4, }, 'hash_b': { 'h': 11, 'i': 12, 'j': 13, 'k': 14, }, 'hash_c': { 'o': 21, 'p': 22, 'q': 23, }, 'hash_d': { 'r': 31, 's': 32, 't': 33, }, } for k,v in params.items(): a = self.client.hclear(k) for k,v in params.items(): a = self.client.multi_hset(k, **v) assert_equals(a,len(v)) c = self.client.hlist('hash_ ', 'hash_z', 10) assert_items_equal(c,params.keys()) lst = ['hash_a','hash_b','hash_c','hash_d'] for index,item in enumerate(c): assert_equals(item,lst[index]) c = self.client.hrlist('hash_z', 'hash_ ', 10) lst.reverse() for index,item in enumerate(c): assert_equals(item,lst[index]) for k,v in params.items(): a = self.client.hclear(k) assert_true(a) def test_zset(self): params = { 'zset_a': { 'a': 1, 'b': 2, 'c': 3, 'd': 4, }, 'zset_b': { 'h': 11, 'i': 12, 'j': 13, 'k': 14, }, 'zset_c': { 'o': 21, 'p': 22, 'q': 23, }, 'zset_d': { 'r': 31, 's': 32, 't': 33, }, } for k,v in params.items(): a = self.client.multi_zset(k, **v) assert_equals(a, len(v)) a = self.client.zlist('zset_ ', 'zset_z', 10) assert_items_equal(a,params.keys()) a = self.client.zkeys('zset_b', 'h', 11, 20, 10) zset_b = params['zset_b'].copy() zset_b.pop('h') assert_items_equal(a,zset_b.keys()) a = self.client.zscan('zset_a', 'a', 1, 3, 10) zset_a = params['zset_a'].copy() zset_a.pop('a') zset_a.pop('d') assert_dict_equal(a, zset_a) a = self.client.zrscan('zset_a', 'd', 4, 1, 10) zset_a['a'] = params['zset_a']['a'] assert_dict_equal(a, zset_a) a = self.client.zrank('zset_a', 'a') assert_equals(a, 0) a = self.client.zrank('zset_a', 'b') assert_equals(a, 1) a = self.client.zrank('zset_a', 'c') assert_equals(a, 2) a = self.client.zrank('zset_a', 'd') assert_equals(a, 3) a = self.client.zrrank('zset_a', 'd') assert_equals(a, 0) a = self.client.zrrank('zset_a', 'c') assert_equals(a, 1) a = self.client.zrrank('zset_a', 'b') assert_equals(a, 2) a = self.client.zrrank('zset_a', 'a') assert_equals(a, 3) a = self.client.zrange('zset_b', 0, 2) zset_b = params['zset_b'].copy() zset_b.pop('j') zset_b.pop('k') assert_dict_equal(a, zset_b) a = self.client.zrange('zset_b', 2, 2) zset_b = params['zset_b'].copy() zset_b.pop('h') zset_b.pop('i') assert_dict_equal(a, zset_b) a = self.client.zrrange('zset_b', 0, 2) zset_b = params['zset_b'].copy() zset_b.pop('h') zset_b.pop('i') assert_dict_equal(a, zset_b) a = self.client.zrrange('zset_b', 2, 2) zset_b = params['zset_b'].copy() zset_b.pop('j') zset_b.pop('k') assert_dict_equal(a, zset_b) for k,v in params.items(): a = self.client.multi_zget(k, *v.keys()) assert_dict_equal(a, v) for k,v in params.items(): d = self.client.multi_zdel(k, *v.keys()) assert_equals(d, len(v)) for k,v in params['zset_a'].items(): a = self.client.zset('zset_a', k, v) assert_true(a) for k,v in params['zset_a'].items(): a = self.client.zget('zset_a', k) assert_equals(a,v) for k,v in params['zset_a'].items(): a = self.client.zdel('zset_a', k) assert_true(a) for k,v in params['zset_b'].items(): a = self.client.zset('zset_b', k, v) assert_true(a) for k,v in params['zset_b'].items(): a = self.client.zexists('zset_b', k) assert_true(a) a = self.client.zexists('zset_b', k+"1") assert_false(a) c = self.client.zsize('zset_b') assert_equals(c, len(params['zset_b'])) c = self.client.zincr('zset_b', 'h', 3) assert_equals(c, params['zset_b']['h']+3) c = self.client.zdecr('zset_b', 'h', 5) assert_equals(c, params['zset_b']['h']-5+3) d = self.client.zclear('zset_b') assert_true(d) def test_zset(self): zset_1 = { 'a': 30, 'b': 20, 'c': 100, 'd': 1, 'e': 64, 'f': -3, 'g': 0 } self.client.zclear('zset_1') self.client.delete('zset_1') a = self.client.multi_zset('zset_1', **zset_1) assert_equals(a, len(zset_1)) b = self.client.zcount('zset_1', 20, 70) assert_equals(b, 3) c = self.client.zcount('zset_1', 0, 100) assert_equals(c, 6) d = self.client.zcount('zset_1', 2, 3) assert_equals(d, 0) b = self.client.zsum('zset_1', 20, 70) assert_equals(b, 114) c = self.client.zsum('zset_1', 0, 100) assert_equals(c, 215) d = self.client.zsum('zset_1', 2, 3) assert_equals(d, 0) b = self.client.zavg('zset_1', 20, 70) assert_equals(b, 38.0) c = self.client.zavg('zset_1', 0, 100) assert_equals(round(abs(c-215.0/6),4),0) d = self.client.zavg('zset_1', 2, 3) assert_true(math.isnan(float('nan'))) b = self.client.zremrangebyrank('zset_1', 0, 2) assert_equals(b, 3) b = self.client.zremrangebyrank('zset_1', 1, 2) assert_equals(b, 2) a = self.client.multi_zset('zset_1', **zset_1) b = self.client.zremrangebyscore('zset_1', 20, 70) assert_equals(b, 3) b = self.client.zremrangebyscore('zset_1', 0, 100) assert_equals(b, 3) self.client.zclear('zset_1') self.client.delete('zset_1') def test_queue(self): self.client.qclear('queue_1') self.client.qclear('queue_2') queue_1 = ['a','b','c','d','e','f','g'] queue_2 = ['test1','test2','test3','test4','test5','test6'] #qpush a = self.client.qpush('queue_1',*queue_1) assert_equals(a,len(queue_1)) a = self.client.qpush('queue_2',*queue_2) assert_equals(a,len(queue_2)) #qsize a = self.client.qsize('queue_1') assert_equals(a,len(queue_1)) #qlist a = self.client.qlist('queue_1', 'queue_2', 10) assert_equals(a,['queue_2']) a = self.client.qlist('queue_', 'queue_2', 10) assert_equals(a,['queue_1', 'queue_2']) a = self.client.qlist('z', '', 10) assert_equals(a,[]) #qrlist a = self.client.qrlist('queue_2', 'queue_1', 10) assert_equals(a,['queue_1']) a = self.client.qrlist('queue_z', 'queue_', 10) assert_equals(a,['queue_2', 'queue_1']) a = self.client.qrlist('z', '', 10) assert_equals(a,['queue_2', 'queue_1']) #qfront a = self.client.qfront('queue_1') assert_equals(a,'a') #qback a = self.client.qback('queue_1') assert_equals(a,'g') #qget a = self.client.qget('queue_1',2) assert_equals(a,'c') a = self.client.qget('queue_1',0) assert_equals(a,'a') a = self.client.qget('queue_1',-1) assert_equals(a,'g') #qset a = self.client.qset('queue_1',0,'aaa') a = self.client.qget('queue_1',0) assert_equals(a,'aaa') a = self.client.qset('queue_1',0,'a') #qrange a = self.client.qrange('queue_1', 2, 2) assert_list_equal(a,['c','d']) a = self.client.qrange('queue_1', 2, 10) assert_list_equal(a,['c','d','e','f','g']) a = self.client.qrange('queue_1', -1, 1) assert_list_equal(a,['g']) #qslice a = self.client.qslice('queue_1', 2, 2) assert_list_equal(a,['c']) a = self.client.qslice('queue_1', 2, 3) assert_list_equal(a,['c','d']) a = self.client.qslice('queue_1', 2, 10) assert_list_equal(a,['c','d','e','f','g']) a = self.client.qslice('queue_1', -3, 5) assert_list_equal(a,['e','f']) #qpush a = self.client.qpush_back('queue_1','h') assert_equals(a,8) a = self.client.qpop_back('queue_1') assert_list_equal(a,['h']) a = self.client.qpush_back('queue_1','h','i','j','k') assert_equals(a,11) a = self.client.qpop_back('queue_1',4) assert_list_equal(a,['k','j','i','h']) a = self.client.qpush('queue_1','h') assert_equals(a,8) a = self.client.qpop_back('queue_1') assert_list_equal(a,['h']) a = self.client.qpush('queue_1','h','i','j','k') assert_equals(a,11) a = self.client.qpop_back('queue_1',4) assert_list_equal(a,['k','j','i','h']) a = self.client.qpush_front('queue_1','0') assert_equals(a,8) a = self.client.qpop_front('queue_1') assert_list_equal(a,['0']) a = self.client.qpush_front('queue_1','0','1','2','3') assert_equals(a,11) a = self.client.qpop_front('queue_1',4) assert_list_equal(a,['3','2','1','0']) a = self.client.qpush_front('queue_1','0') assert_equals(a,8) a = self.client.qpop('queue_1') assert_list_equal(a,['0']) a = self.client.qpush_front('queue_1','0','1','2','3') assert_equals(a,11) a = self.client.qpop('queue_1',4) assert_list_equal(a,['3','2','1','0']) #qrem_front a = self.client.qrem_front('queue_1',3) assert_equals(a,3) a = self.client.qpop('queue_1',10) assert_list_equal(a,['d','e','f','g']) a = self.client.qpush('queue_1',*queue_1) assert_equals(a,len(queue_1)) #qrem_back a = self.client.qrem_back('queue_1',3) assert_equals(a,3) a = self.client.qpop('queue_1',10) assert_list_equal(a,['a','b','c','d']) a = self.client.qpush('queue_1',*queue_1) assert_equals(a,len(queue_1)) #qpop a = self.client.qpop('queue_1',len(queue_1)) assert_list_equal(a,queue_1) #qclear a = self.client.qclear('queue_2') assert_equals(a, 6) b = self.client.qclear('queue_1') assert_equals(b, 0)
#------------------------------------------------------------------- # Copyright (c) 2013, Scott D. Peckham # # Apr 2013. New time interpolator class from/for framework3.py. # #------------------------------------------------------------------- # # class time_interp_data() # __init__() # update() # # class time_interpolator() # __init__() # initialize() # update() # update_all() # get_values() # convert_time_units() # #------------------------------------------------------------------- import numpy as np #------------------------------------------------------------------- class time_interp_data(): #-------------------------------------------------------- # Note: This is a small "utility class". We create an # instance of this class for every long_var_name # that is shared between components. #-------------------------------------------------------- # Note: Additional arguments will need to be added in # order to perform time interpolation by a method # other than "Linear" (or a new class?). #-------------------------------------------------------- def __init__( self, v1=None, t1=None, long_var_name=None ): ## interp_method='Linear'): #------------------------------------------- # Save (v1,t1) to (v2,t2) because update() # first sets (v1,t1) from old (v2,t2). #------------------------------------------- self.v2 = v1 self.t2 = t1 self.long_var_name = long_var_name ## self.interp_method = interp_method #-------------------------------------- # Need this, too, for in-place updates #-------------------------------------- self.v1 = v1.copy() self.t1 = t1.copy() #-------------- # For testing #-------------- ## if (self.long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'): ## print 'In __init__():' ## print '(P1,P2, t1,t2) =', self.v1, self.v2, self.t1, self.t2 # __init__() #---------------------------------------------------------- def update( self, v2=None, t2=None ): #---------------------------------------------------- # Note: v1 and v2 could be 0D, 1D, 2D or 3D arrays. # However, since they are NumPy ndarrays, the # equations used below will work regardless # of the array's rank. #---------------------------------------------------- # Note: We can use "in-place" assignments for v1 # and v2 as long as their rank is > 0. #---------------------------------------------------- #--------------------------------------------- # Update the "start values" (old end values) # (in-place, if possible) # Note: try/except is slightly faster. # Note: Need to use "copy()" as shown. #--------------------------------------------- self.t1 = self.t2.copy() try: self.v1[:] = self.v2.copy() except: self.v1 = self.v2.copy() #----------------------------------- ## if (np.ndim( self.v1 ) > 0): ## self.v1[:] = self.v2.copy() ## else: ## self.v1 = self.v2.copy() #-------------------------- # Update the "end values" # (in-place, if possible) # Note: Need to use "copy()" as shown. #--------------------------------------------- self.t2 = t2 try: self.v2[:] = v2.copy() ## NEED THIS! except: self.v2 = v2.copy() ## NEED THIS! #----------------------------------- ## if (np.ndim( self.v2 ) > 0): ## self.v2[:] = v2 ## else: ## self.v2 = v2 #--------------------------------------------- # Update the interpolation parameters, a & b # They are used in get_values2(). #--------------------------------------------- # This would also work: # v1_ne_v2 = (v2 - self.v1) != 0 # if np.any( v1_ne_v2 ) and (t2 != self.t1): #---------------------------------------------------- dv = np.abs(v2 - self.v1) dv_min = dv.min() if (dv_min != 0) and (t2 != self.t1): self.a = (v2 - self.v1) / (t2 - self.t1) self.b = v2 - (self.a * t2) else: #------------------------------------------ # Variables that don't vary in time will # have v1 = v2, but t2 > t1. # Disabled TopoFlow components will have # v2 = v1 and t2 = t1, but they may still # provide default values (e.g. precip=0). #------------------------------------------ # This a and b gives "no interpolation", # that is, v[t] = v1 = v2. #------------------------------------------ self.a = np.float64(0) self.b = v2 #-------------- # For testing #-------------- ## if (self.long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'): ## print '(P1,P2, t1,t2) =', self.v1, self.v2, self.t1, self.t2 # update() #---------------------------------------------------------- # time_interp_data() (class) #----------------------------------------------------------------------- #----------------------------------------------------------------------- class time_interpolator(): #---------------------------------------- # Define some unit-conversion constants #---------------------------------------- secs_per_min = 60 secs_per_hour = 60 * secs_per_min secs_per_day = 24 * secs_per_hour secs_per_year = 365 * secs_per_day secs_per_month = secs_per_year / 12 ######### #---------------------------------------------------------- def __init__( self, comp_set, port_names, vars_provided, method='Linear' ): #------------------------------------------------------- # Note: These are currently passed in from framework. # # comp_set = a dictionary that takes a port_name # key and returns a reference to a # BMI model instance. # # port_names = a list of all port_names that # provide vars to other components # (essentially "component type") # (and all of the keys in comp_set) # # vars_provided = a dictionary that takes a # port_name key and returns a # list of all the long_var_names # that the port actually provides # to another component in the set # # method = 'None' or 'Linear' (so far) #------------------------------------------------------- self.comp_set = comp_set self.provider_port_list = port_names self.vars_provided = vars_provided self.interpolation_method = method print 'Time interpolation method =', method print ' ' # __init__() #---------------------------------------------------------- def initialize( self ): #------------------------------------------------------------ # Note: This function initializes a dictionary called: # self.time_interp_vars and should be called from the # framework's initialize() function. # # Given "long_var_name" as a key, the dictionary # returns a bundle of variables that are needed to # perform time interpolation for that variable when # it is requested from other components. # # Note that self.vars_provided[ provider_port_name ] # contains a list of vars that are actually provided # by that provider port to some other component in # the current set of components (i.e. "comp_set"). #------------------------------------------------------------- # Note: If we could somehow distinguish between provided # vars that vary in time and those that don't, then # we could avoid some extra work. But this works. #------------------------------------------------------------- # Note: provider_port_list always includes the Driver. #------------------------------------------------------------- method = self.interpolation_method #---------------------------- # Case of no interpolation # (i.e. "steps" or "jumps") #---------------------------- if (method == 'None'): self.time_interp_vars = None #### #-------------------------------------------- # For new method, we must call bmi.update() # for every provider here. (4/13/13) #-------------------------------------------- for port_name in self.provider_port_list: bmi = self.comp_set[ port_name ] bmi.update( -1.0 ) ## print 'Updated port: ' + port_name ## print ' ' return #------------------------------- # Case of Linear interpolation #------------------------------- if (method == 'Linear'): self.time_interp_vars = dict() for port_name in self.provider_port_list: bmi = self.comp_set[ port_name ] #--------------------------------------- # Get t1 and convert units, if needed. #--------------------------------------- comp_time_units = bmi.get_time_units() t1 = bmi.get_current_time() t1 = self.convert_time_units( t1, comp_time_units ) #--------------------------------------------------- # Get vars at start of interpolation time interval #--------------------------------------------------- for long_var_name in self.vars_provided[ port_name ]: v1 = bmi.get_values( long_var_name ) data = time_interp_data( v1=v1, t1=t1, \ long_var_name=long_var_name ) self.time_interp_vars[ long_var_name ] = data #-------------------------------------------- # Call this component's update() just once. #--------------------------------------------- # Note: Driver is updated here, too, even if # it doesn't provide vars to others. #--------------------------------------------- bmi.update( -1.0 ) #--------------------------------------- # Get t2 and convert units, if needed. #--------------------------------------- t2 = bmi.get_current_time() t2 = self.convert_time_units( t2, comp_time_units ) #-------------- # For testing #-------------- ## print 'Updated port: ' + port_name ## print ' (t1, t2) =', t1, t2 #------------------------------------------------- # Get vars at end of interpolation time interval #------------------------------------------------- for long_var_name in self.vars_provided[ port_name ]: v2 = bmi.get_values( long_var_name ) #------------------------------------- # Save (v2,t2) and update the time # interpolation parameters a and b. #------------------------------------- self.time_interp_vars[ long_var_name ].update(v2, t2) return #------------------------------------- # Case of Cubic Spline interpolation #----------------------------------------------------- # Note: Cubic spline interpolation with a natural or # clamped boundary condition requires that all # time interval endpoint values are available # (i.e. for an entire model run). # However, during a run with Linear or None # interpolation we could compute the a0 and b0 # that are needed to compute a[n], b[n], c[n] # and d[n] for a subsequent run that uses # cubic spline interpolation. #----------------------------------------------------- # Note: We need to call bmi.update() 3 times here, # and then just once below in update(). #----------------------------------------------------- ## if (method == 'Cubic'): ## self.time_interp_vars = dict() ## return # initialize() #------------------------------------------------------------------- def update( self, port_name, time ): #------------------------------------------------------------ # Note: This function provides automatic time-interpolation # for components that have different time steps. #------------------------------------------------------------ # Note: The "framework time step" will be the same as the # component with the smallest time step. # # If follows that if the time argument is "framework # time", then we only need to call bmi.update() once # for any component to make its internal time greater # than the framework time. # # We must make sure that this method works when called # for the component (port_name) that has the smallest # time step. In that case, we don't need to do any # time interpolation and should take the "None" branch # below. #### CHECK THAT THIS HAPPENS. #### #------------------------------------------------------------ # Note: A component's current time is incremented every # time its bmi.update() method is called, as when # done by the initialize() method. #------------------------------------------------------------ DEBUG = False ## DEBUG = True bmi = self.comp_set[ port_name ] #----------------------------------------------------- # Get current time of component with this port_name. # Convert units to framework time units, if needed. #----------------------------------------------------- comp_time_units = bmi.get_time_units() comp_time = bmi.get_current_time() # comp_time0 = comp_time.copy() comp_time = self.convert_time_units( comp_time, comp_time_units ) #-------------- # For testing #-------------- ## print 'port_name =', port_name ## print 'comp_time before =', comp_time0 ## print 'comp_time after =', comp_time ## print ' ' if (DEBUG): print '=============================================' print 'In update_time_interpolation():' print ' time (fmwk) =', time print ' port_name =', port_name print ' comp_time =', comp_time #-------------------------------------------- # Do we need to update interpolation vars ? #------------------------------------------------ # Note: DISABLED components have comp_time = 0. #------------------------------------------------ ### if (time < comp_time): # (This works, too.) if (time <= comp_time): if (DEBUG): print ' NO update for: ' + port_name + ' interp. vars' return #------------------------------------------------ # The current "framework time" has passed this # model component's internal time so we need to # call the model's bmi.update() method and then # update the time interpolation vars. #------------------------------------------------ if (DEBUG): print ' Framework updated: ' + port_name + ' interp. vars' #------------------------------------------------ # Note: We need to check the component status # here because otherwise bmi.update() is # called every time below for DISABLED # components. #------------------------------------------------ # Using (status = 'initialized') works because # the initialize() method caused all other # components to reach "updated" status. #------------------------------------------------ comp_status = bmi.get_status() # if (comp_status == 'disabled'): # (not used/ready yet) if (comp_status == 'initialized'): # (this works) return #--------------------------- # Case of no interpolation #--------------------------- if (self.interpolation_method == 'None'): #------------------------------------------------- # Since the framework has the smallest timestep, # we should only need to call bmi.update() once # in order to get comp_time > time. #------------------------------------------------- bmi.update( -1.0 ) ## self.update( port_name ) # (Checks for failure.) return #------------------------------- # Case of Linear interpolation #------------------------------- if (self.interpolation_method == 'Linear'): #-------------------------------------------- # Call this component's update() just once. #-------------------------------------------- bmi.update( -1.0 ) ## self.update( port_name ) # (has error messages) #--------------------------------------- # Get t2 and convert units, if needed. #--------------------------------------- comp_time_units = bmi.get_time_units() t2 = bmi.get_current_time() t2 = self.convert_time_units( t2, comp_time_units ) #--------------------------------------------------- # Get values at end of interpolation time interval #--------------------------------------------------- for long_var_name in self.vars_provided[ port_name ]: #------------------------------------------------ # Note: bmi.get_values() works for any rank. #------------------------------------------------ # self.time_interp_vars is a dictionary that is # initialized in the framework's initialize(). #------------------------------------------------ v2 = bmi.get_values( long_var_name ) #-------------- # For testing #-------------- ## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'): ## print '(time, P) =', t2, v2 i_vars = self.time_interp_vars[ long_var_name ] #------------------------------------- # This also updates v1 and t1 first. #------------------------------------- i_vars.update(v2, t2) #-------------- # For testing #-------------- ## print 'Updated port: ' + port_name ## print ' (t1, t2) =', i_vars.t1, i_vars.t2 return #------------------------------------- # Case of Cubic Spline interpolation #------------------------------------- ## if (self.interpolation_method == 'Cubic'): # update() #------------------------------------------------------------------- def update2( self, port_name ): #------------------------------------------------------------ # Note: This function provides automatic time-interpolation # for components that have different time steps. #------------------------------------------------------------ # Note: The "framework time step" will be the same as the # component with the smallest time step. # # If follows that if the time argument is "framework # time", then we only need to call bmi.update() once # for any component to make its internal time greater # than the framework time. # # We must make sure that this method works when called # for the component (port_name) that has the smallest # time step. In that case, we don't need to do any # time interpolation and should take the "None" branch # below. #### CHECK THAT THIS HAPPENS. #### #------------------------------------------------------------ # Note: A component's current time is incremented every # time its bmi.update() method is called, as when # done by the initialize() method. #------------------------------------------------------------ # Note: In this version, we assume that bmi.update() was # already called by caller of this method. (4/18/13) #------------------------------------------------------------ DEBUG = False ## DEBUG = True #--------------------------- # Case of no interpolation #--------------------------- if (self.interpolation_method == 'None'): return #------------------------------------------------ # Note: We need to check the component status # here because otherwise bmi.update() is # called every time below for DISABLED # components. #------------------------------------------------ # Using (status = 'initialized') works because # the initialize() method caused all other # components to reach "updated" status. #------------------------------------------------ bmi = self.comp_set[ port_name ] # (or pass in bmi) comp_status = bmi.get_status() # if (comp_status == 'disabled'): # (not used/ready yet) if (comp_status == 'initialized'): # (this works) return #------------------------------- # Case of Linear interpolation #------------------------------- if (self.interpolation_method == 'Linear'): #--------------------------------------- # Get t2 and convert units, if needed. #--------------------------------------- comp_time_units = bmi.get_time_units() t2 = bmi.get_current_time() t2 = self.convert_time_units( t2, comp_time_units ) #--------------------------------------------------- # Get values at end of interpolation time interval #--------------------------------------------------- for long_var_name in self.vars_provided[ port_name ]: #------------------------------------------------ # Note: bmi.get_values() works for any rank. #------------------------------------------------ # self.time_interp_vars is a dictionary that is # initialized in the framework's initialize(). #------------------------------------------------ v2 = bmi.get_values( long_var_name ) #-------------- # For testing #-------------- ## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'): ## print '(time, P) =', t2, v2 i_vars = self.time_interp_vars[ long_var_name ] #------------------------------------- # This also updates v1 and t1 first. #------------------------------------- i_vars.update(v2, t2) #-------------- # For testing #-------------- ## print 'Updated port: ' + port_name ## print ' (t1, t2) =', i_vars.t1, i_vars.t2 return #------------------------------------- # Case of Cubic Spline interpolation #------------------------------------- ## if (self.interpolation_method == 'Cubic'): # update2() #------------------------------------------------------------------- def update_all( self, time ): for port_name in self.provider_port_list: self.update( port_name, time ) # update_all() #------------------------------------------------------------------- def get_values( self, long_var_name, port_name, time ): #------------------------------------------------------- # Note: This method returns a NumPy "ndarray" object # that Babel is able to pass to other components # as a SIDL generic array. #------------------------------------------------------- # Note: The update() method is called for port_name # before this is called. #------------------------------------------------------- bmi = self.comp_set[ port_name ] # (pass in bmi ?) #------------------------------------------------------- # Has this component been disabled? If so, it doesn't # advance time or update its initial values so time # interpolation is not needed. #------------------------------------------------------------ # TopoFlow components currently have a "comp_status" # attribute that is either "Enabled" or "Disabled", set in # their CFG file and read by BMI_base.read_config_file(). # They also have a "status" attribute that is from the # OpenMI status types (e.g. "initialized", "initializing"). # Should we add "disabled" to the OpenMI status types? #------------------------------------------------------------ # If a component has already be finalized, then just get # its current (final) values; do not interpolate. This # is needed for framework.finalize_all() to work. (8/20/13) #------------------------------------------------------------ comp_status = bmi.get_status() if (comp_status == 'initialized') or \ (comp_status == 'finalized'): return bmi.get_values( long_var_name ) #--------------------------- # Case of no interpolation #------------------------------------------------------- # Note that if (time < comp_time) then we are # just returning the same value that all users already # have. Maybe we can avoid doing this somehow. #------------------------------------------------------- if (self.interpolation_method == 'None'): #------------------------------------ # For testing. Is time in interval? #------------------------------------ bmi_time = bmi.get_current_time() if (time > bmi_time): print '#########################################' print ' ERROR: time > bmi_time in get_values().' print ' time, bmi_time =', time, bmi_time print ' port_name =', port_name print '#########################################' print ' ' return bmi.get_values( long_var_name ) #------------------------------- # Case of Linear interpolation #------------------------------- if (self.interpolation_method == 'Linear'): #------------------------------------------------ # Compute and return a time-interpolated value. #------------------------------------------------ i_vars = self.time_interp_vars[ long_var_name ] #------------------------------------ # For testing. Is time in interval? #------------------------------------ if (time > i_vars.t2): print '#######################################' print ' ERROR: time > t2 in get_values().' print ' time, t2 =', time, i_vars.t2 print ' port_name =', port_name print '#######################################' print ' ' value = (i_vars.a * time) + i_vars.b #-------------- # For testing #-------------- ## if (long_var_name == 'atmosphere_water__precipitation_leq-volume_flux'): ## print '(time, P, a, b) =', time, value, i_vars.a, i_vars.b return value #------------------------------------- # Case of Cubic Spline interpolation #------------------------------------- ## if (self.interpolation_method == 'Cubic'): ## #------------------------------------------------ ## # Compute and return a time-interpolated value. ## #------------------------------------------------ ## value = ?????? ## return value # get_values() #------------------------------------------------------------------- def convert_time_units( self, in_time, in_units ): #----------------------------------------------- # Note: Conversion constants are defined just # inside (at top of) class declaration. #----------------------------------------------- #---------------------------------- # Convert "in_units" to "seconds" #---------------------------------- if (in_units in ['years', 'y']): time = in_time * self.secs_per_year elif (in_units == 'months'): ### Use 'm' ???? time = in_time * self.secs_per_month elif (in_units in ['days', 'd']): time = in_time * self.secs_per_day elif (in_units in ['hours', 'h']): time = in_time * secs_per_hour elif (in_units in ['minutes','m']): ### month? time = in_time * self.secs_per_min else: time = in_time.copy() ## time = in_time return time # convert_time_units() #-------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.common.types import InvalidCredsError from libcloud.compute.drivers.opsource import OpsourceNodeDriver as Opsource from libcloud.compute.drivers.opsource import OpsourceAPIException from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation from libcloud.test import MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import OPSOURCE_PARAMS class OpsourceTests(unittest.TestCase, TestCaseMixin): def setUp(self): Opsource.connectionCls.conn_classes = (None, OpsourceMockHttp) OpsourceMockHttp.type = None self.driver = Opsource(*OPSOURCE_PARAMS) def test_invalid_creds(self): OpsourceMockHttp.type = 'UNAUTHORIZED' try: self.driver.list_nodes() self.assertTrue( False) # Above command should have thrown an InvalidCredsException except InvalidCredsError: self.assertTrue(True) def test_list_sizes_response(self): OpsourceMockHttp.type = None ret = self.driver.list_sizes() self.assertEqual(len(ret), 1) size = ret[0] self.assertEqual(size.name, 'default') def test_reboot_node_response(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.reboot() self.assertTrue(ret is True) def test_reboot_node_response_INPROGRESS(self): OpsourceMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) try: node.reboot() self.assertTrue( False) # above command should have thrown OpsourceAPIException except OpsourceAPIException: self.assertTrue(True) def test_destroy_node_response(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = node.destroy() self.assertTrue(ret is True) def test_destroy_node_response_INPROGRESS(self): OpsourceMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) try: node.destroy() self.assertTrue( False) # above command should have thrown OpsourceAPIException except OpsourceAPIException: self.assertTrue(True) def test_create_node_response(self): rootPw = NodeAuthPassword('pass123') image = self.driver.list_images()[0] network = self.driver.ex_list_networks()[0] node = self.driver.create_node(name='test2', image=image, auth=rootPw, ex_description='test2 node', ex_network=network, ex_isStarted=False) self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87') self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER') def test_ex_shutdown_graceful(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_shutdown_graceful(node) self.assertTrue(ret is True) def test_ex_shutdown_graceful_INPROGRESS(self): OpsourceMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) try: self.driver.ex_shutdown_graceful(node) self.assertTrue( False) # above command should have thrown OpsourceAPIException except OpsourceAPIException: self.assertTrue(True) def test_ex_start_node(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_start_node(node) self.assertTrue(ret is True) def test_ex_start_node_INPROGRESS(self): OpsourceMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) try: self.driver.ex_start_node(node) self.assertTrue( False) # above command should have thrown OpsourceAPIException except OpsourceAPIException: self.assertTrue(True) def test_ex_power_off(self): node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) ret = self.driver.ex_power_off(node) self.assertTrue(ret is True) def test_ex_power_off_INPROGRESS(self): OpsourceMockHttp.type = 'INPROGRESS' node = Node(id='11', name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) try: self.driver.ex_power_off(node) self.assertTrue( False) # above command should have thrown OpsourceAPIException except OpsourceAPIException: self.assertTrue(True) def test_ex_list_networks(self): nets = self.driver.ex_list_networks() self.assertEqual(nets[0].name, 'test-net1') self.assertTrue(isinstance(nets[0].location, NodeLocation)) def test_node_public_ip(self): nodes = self.driver.list_nodes() node = [n for n in nodes if n.id == 'abadbc7e-9e10-46ca-9d4a-194bcc6b6c16'][0] self.assertEqual(node.public_ips[0], '200.16.132.7') class OpsourceMockHttp(MockHttp): fixtures = ComputeFileFixtures('opsource') def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED]) def _oec_0_9_myaccount(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_myaccount.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_base_image(self, method, url, body, headers): body = self.fixtures.load('oec_0_9_base_image.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers): body = None action = url.split('?')[-1] if action == 'restart': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml') elif action == 'shutdown': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml') elif action == 'delete': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml') elif action == 'start': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml') elif action == 'poweroff': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers): body = None action = url.split('?')[-1] if action == 'restart': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml') elif action == 'shutdown': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml') elif action == 'delete': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml') elif action == 'start': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml') elif action == 'poweroff': body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml') return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers): body = self.fixtures.load( '_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers): body = self.fixtures.load( 'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
#!/usr/bin/python2 # Copyright 2014 CloudFounders NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Basic test module """ import uuid import time from unittest import TestCase from ovs.dal.exceptions import * from ovs.dal.dataobjectlist import DataObjectList from ovs.extensions.generic import fakesleep from ovs.extensions.storage.persistent.dummystore import DummyPersistentStore from ovs.extensions.storage.volatile.dummystore import DummyVolatileStore from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.extensions.storage.volatilefactory import VolatileFactory from ovs.dal.hybrids.t_testmachine import TestMachine from ovs.dal.hybrids.t_testdisk import TestDisk from ovs.dal.hybrids.t_testemachine import TestEMachine from ovs.dal.datalist import DataList from ovs.dal.helpers import Descriptor from ovs.extensions.generic.volatilemutex import VolatileMutex class Basic(TestCase): """ The basic unittestsuite will test all basic functionality of the DAL framework It will also try accessing all dynamic properties of all hybrids making sure that code actually works. This however means that all loaded 3rd party libs need to be mocked """ @classmethod def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ PersistentFactory.store = DummyPersistentStore() PersistentFactory.store.clean() PersistentFactory.store.clean() VolatileFactory.store = DummyVolatileStore() VolatileFactory.store.clean() VolatileFactory.store.clean() fakesleep.monkey_patch() @classmethod def setUp(cls): """ (Re)Sets the stores on every test """ PersistentFactory.store = DummyPersistentStore() PersistentFactory.store.clean() VolatileFactory.store = DummyVolatileStore() VolatileFactory.store.clean() @classmethod def tearDownClass(cls): """ Clean up the unittest """ fakesleep.monkey_restore() def test_invalidobject(self): """ Validates the behavior when a non-existing object is loaded """ # Loading an non-existing object should raise self.assertRaises(ObjectNotFoundException, TestDisk, uuid.uuid4(), None) def test_newobject_delete(self): """ Validates the behavior on object deletions """ disk = TestDisk() disk.name = 'disk' disk.save() # An object should always have a guid guid = disk.guid self.assertIsNotNone(guid, 'Guid should not be None') # After deleting, the object should not be retreivable disk.delete() self.assertRaises(Exception, TestDisk, guid, None) def test_discard(self): """ Validates the behavior regarding pending changes discard """ disk = TestDisk() disk.name = 'one' disk.save() disk.name = 'two' # Discarding an object should rollback all changes disk.discard() self.assertEqual(disk.name, 'one', 'Data should be discarded') def test_updateproperty(self): """ Validates the behavior regarding updating properties """ disk = TestDisk() disk.name = 'test' disk.description = 'desc' # A property should be writable self.assertIs(disk.name, 'test', 'Property should be updated') self.assertIs(disk.description, 'desc', 'Property should be updated') def test_preinit(self): """ Validates whether initial data is loaded on object creation """ disk = TestDisk(data={'name': 'diskx'}) disk.save() self.assertEqual(disk.name, 'diskx', 'Disk name should be preloaded') def test_datapersistent(self): """ Validates whether data is persisted correctly """ disk = TestDisk() guid = disk.guid disk.name = 'test' disk.save() # Retreiving an object should return the data as when it was saved disk2 = TestDisk(guid) self.assertEqual(disk.name, disk2.name, 'Data should be persistent') def test_readonlyproperty(self): """ Validates whether all dynamic properties are actually read-only """ disk = TestDisk() # Readonly properties should return data self.assertIsNotNone(disk.used_size, 'RO property should return data') def test_datastorewins(self): """ Validates the "datastore_wins" behavior in the usecase where it wins """ disk = TestDisk() disk.name = 'initial' disk.save() disk2 = TestDisk(disk.guid, datastore_wins=True) disk.name = 'one' disk.save() disk2.name = 'two' disk2.save() # With datastore_wins set to True, the datastore wins concurrency conflicts self.assertEqual(disk2.name, 'one', 'Data should be overwritten') def test_datastoreloses(self): """ Validates the "datastore_wins" behavior in the usecase where it loses """ disk = TestDisk() disk.name = 'initial' disk.save() disk2 = TestDisk(disk.guid, datastore_wins=False) disk.name = 'one' disk.save() disk2.name = 'two' disk2.save() # With datastore_wins set to False, the datastore loses concurrency conflicts self.assertEqual(disk2.name, 'two', 'Data should not be overwritten') def test_silentdatarefresh(self): """ Validates whether the default scenario (datastore_wins=False) will execute silent data refresh """ disk = TestDisk() disk.name = 'initial' disk.save() disk2 = TestDisk(disk.guid, datastore_wins=False) disk.name = 'one' disk.save() disk2.name = 'two' disk2.save() disk.save() # This should not overwrite anything but instead refresh data # With datastore_wins set to False, the datastore loses concurrency conflicts self.assertEqual(disk2.name, 'two', 'Data should not be overwritten') self.assertEqual(disk.name, 'two', 'Data should be refreshed') def test_datastoreraises(self): """ Validates the "datastore_wins" behavior in the usecase where it's supposed to raise """ disk = TestDisk() disk.name = 'initial' disk.save() disk2 = TestDisk(disk.guid, datastore_wins=None) disk.name = 'one' disk.save() disk2.name = 'two' # with datastore_wins set to None, concurrency conflicts are raised self.assertRaises(ConcurrencyException, disk2.save) def test_volatileproperty(self): """ Validates the volatile behavior of dynamic properties """ disk = TestDisk() disk.size = 1000000 value = disk.used_size # Volatile properties should be stored for the correct amount of time time.sleep(2) self.assertEqual(disk.used_size, value, 'Value should still be from cache') time.sleep(2) self.assertEqual(disk.used_size, value, 'Value should still be from cache') time.sleep(2) # ... after which they should be reloaded from the backend self.assertNotEqual(disk.used_size, value, 'Value should be different') def test_primarykeyvalidation(self): """ Validates whether the passed in key (guid) of an object is validated """ self.assertRaises(ValueError, TestDisk, 'foo', None) disk = TestDisk() # Should not raise disk.name = 'disk' disk.save() _ = TestDisk(disk.guid) # Should not raise def test_persistency(self): """ Validates whether the object is fetches from the correct storage backend """ disk = TestDisk() disk.name = 'test' disk.save() # Right after a save, the cache is invalidated disk2 = TestDisk(disk.guid) self.assertFalse(disk2._metadata['cache'], 'Object should be retreived from persistent backend') # Subsequent calls will retreive the object from cache disk3 = TestDisk(disk.guid) self.assertTrue(disk3._metadata['cache'], 'Object should be retreived from cache') # After the object expiry passed, it will be retreived from backend again DummyVolatileStore().delete(disk._key) # We clear the entry disk4 = TestDisk(disk.guid) self.assertFalse(disk4._metadata['cache'], 'Object should be retreived from persistent backend') def test_queries(self): """ Validates whether executing queries returns the expected results """ machine = TestMachine() machine.name = 'machine' machine.save() for i in xrange(0, 20): disk = TestDisk() disk.name = 'test_{0}'.format(i) disk.size = i if i < 10: disk.machine = machine else: disk.storage = machine disk.save() self.assertEqual(len(machine.disks), 10, 'query should find added machines') # pylint: disable=line-too-long list_1 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.EQUALS, 1)]}}).data # noqa self.assertEqual(list_1, 1, 'list should contain int 1') list_2 = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.EQUALS, 1)]}}).data # noqa found_object = Descriptor(TestDisk, list_2[0]).get_object(True) self.assertEqual(found_object.name, 'test_1', 'list should contain correct machine') list_3 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.GT, 3), ('size', DataList.operator.LT, 6)]}}).data # noqa self.assertEqual(list_3, 2, 'list should contain int 2') # disk 4 and 5 list_4 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.OR, 'items': [('size', DataList.operator.LT, 3), ('size', DataList.operator.GT, 6)]}}).data # noqa # at least disk 0, 1, 2, 7, 8, 9, 10-19 self.assertGreaterEqual(list_4, 16, 'list should contain >= 16') list_5 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.guid', DataList.operator.EQUALS, machine.guid), # noqa {'type': DataList.where_operator.OR, 'items': [('size', DataList.operator.LT, 3), ('size', DataList.operator.GT, 6)]}]}}).data # noqa self.assertEqual(list_5, 6, 'list should contain int 6') # disk 0, 1, 2, 7, 8, 9 list_6 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.LT, 3), ('size', DataList.operator.GT, 6)]}}).data # noqa self.assertEqual(list_6, 0, 'list should contain int 0') # no disks list_7 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.OR, 'items': [('machine.guid', DataList.operator.EQUALS, '123'), # noqa ('used_size', DataList.operator.EQUALS, -1), {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.GT, 3), ('size', DataList.operator.LT, 6)]}]}}).data # noqa self.assertEqual(list_7, 2, 'list should contain int 2') # disk 4 and 5 list_8 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine'), # noqa ('name', DataList.operator.EQUALS, 'test_3')]}}).data # noqa self.assertEqual(list_8, 1, 'list should contain int 1') # disk 3 list_9 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.GT, 3), {'type': DataList.where_operator.AND, 'items': [('size', DataList.operator.LT, 6)]}]}}).data # noqa self.assertEqual(list_9, 2, 'list should contain int 2') # disk 4 and 5 list_10 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.OR, 'items': [('size', DataList.operator.LT, 3), {'type': DataList.where_operator.OR, 'items': [('size', DataList.operator.GT, 6)]}]}}).data # noqa # at least disk 0, 1, 2, 7, 8, 9, 10-19 self.assertGreaterEqual(list_10, 16, 'list should contain >= 16') list_11 = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('storage.name', DataList.operator.EQUALS, 'machine')]}}).data # noqa self.assertEqual(list_11, 10, 'list should contain int 10') # disk 10-19 # pylint: enable=line-too-long def test_invalidpropertyassignment(self): """ Validates whether the correct exception is raised when properties are assigned with a wrong type """ disk = TestDisk() disk.size = 100 with self.assertRaises(TypeError): disk.machine = TestDisk() def test_recursive(self): """ Validates the recursive save """ machine = TestMachine() machine.name = 'original' machine.save() disks = [] for i in xrange(0, 10): disk = TestDisk() disk.name = 'test_{0}'.format(i) if i % 2: disk.machine = machine else: disk.machine = machine self.assertEqual(disk.machine.name, 'original', 'child should be set') disk.machine = None self.assertIsNone(disk.machine, 'child should be cleared') disks.append(disk) disk.save() counter = 1 for disk in machine.disks: disk.size = counter counter += 1 machine.save(recursive=True) disk = TestDisk(machine.disks[0].guid) self.assertEqual(disk.size, 1, 'lists should be saved recursively') disk.machine.name = 'mtest' disk.save(recursive=True) machine2 = TestMachine(machine.guid) self.assertEqual(machine2.disks[1].size, 2, 'lists should be saved recursively') self.assertEqual(machine2.name, 'mtest', 'properties should be saved recursively') def test_descriptors(self): """ Validates the correct behavior of the Descriptor """ with self.assertRaises(RuntimeError): _ = Descriptor().descriptor with self.assertRaises(RuntimeError): _ = Descriptor().get_object() def test_relationcache(self): """ Validates whether the relational properties are cached correctly, and whether they are invalidated when required """ machine = TestMachine() machine.name = 'machine' machine.save() disk1 = TestDisk() disk1.name = 'disk1' disk1.save() disk2 = TestDisk() disk2.name = 'disk2' disk2.save() disk3 = TestDisk() disk3.name = 'disk3' disk3.save() self.assertEqual(len(machine.disks), 0, 'There should be no disks on the machine') disk1.machine = machine disk1.save() self.assertEqual(len(machine.disks), 1, 'There should be 1 disks on the machine') disk2.machine = machine disk2.save() self.assertEqual(len(machine.disks), 2, 'There should be 2 disks on the machine') disk3.machine = machine disk3.save() self.assertEqual(len(machine.disks), 3, 'There should be 3 disks on the machine') machine.disks[0].name = 'disk1_' machine.disks[1].name = 'disk2_' machine.disks[2].name = 'disk3_' disk1.machine = None disk1.save() disk2.machine = None disk2.save() self.assertEqual(len(machine.disks), 1, 'There should be 1 disks on the machine') def test_datalistactions(self): """ Validates all actions that can be executed agains DataLists """ machine = TestMachine() machine.name = 'machine' machine.save() disk1 = TestDisk() disk1.name = 'disk1' disk1.machine = machine disk1.save() disk2 = TestDisk() disk2.name = 'disk2' disk2.machine = machine disk2.save() disk3 = TestDisk() disk3.name = 'disk3' disk3.machine = machine disk3.save() self.assertEqual(machine.disks.count(disk1), 1, 'Disk should be available only once') self.assertGreaterEqual(machine.disks.index(disk1), 0, 'We should retreive an index') machine.disks.sort() guid = machine.disks[0].guid machine.disks.reverse() self.assertEqual(machine.disks[-1].guid, guid, 'Reverse and sort should work') machine.disks.sort() self.assertEqual(machine.disks[0].guid, guid, 'And the guid should be first again') def test_listcache(self): """ Validates whether lists are cached and invalidated correctly """ keys = ['list_cache', None] for key in keys: disk0 = TestDisk() disk0.name = 'disk 0' disk0.save() list_cache = DataList(key=key, query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key)) self.assertEqual(list_cache.data, 0, 'List should find no entries (mode: {0})'.format(key)) machine = TestMachine() machine.name = 'machine' machine.save() disk1 = TestDisk() disk1.name = 'disk 1' disk1.machine = machine disk1.save() list_cache = DataList(key=key, query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key)) self.assertEqual(list_cache.data, 1, 'List should find one entry (mode: {0})'.format(key)) list_cache = DataList(key=key, query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa self.assertTrue(list_cache.from_cache, 'List should be loaded from cache (mode: {0})'.format(key)) disk2 = TestDisk() disk2.machine = machine disk2.name = 'disk 2' disk2.save() list_cache = DataList(key=key, query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key)) self.assertEqual(list_cache.data, 2, 'List should find two entries (mode: {0})'.format(key)) machine.name = 'x' machine.save() list_cache = DataList(key=key, query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa self.assertFalse(list_cache.from_cache, 'List should not be loaded from cache (mode: {0})'.format(key)) self.assertEqual(list_cache.data, 0, 'List should have no matches (mode: {0})'.format(key)) def test_emptyquery(self): """ Validates whether an certain query returns an empty set """ amount = DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}).data # noqa self.assertEqual(amount, 0, 'There should be no data') def test_nofilterquery(self): """ Validates whether empty queries return the full resultset """ disk1 = TestDisk() disk1.name = 'disk 1' disk1.save() disk2 = TestDisk() disk2.name = 'disk 2' disk2.save() amount = DataList(key='some_list', query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data self.assertEqual(amount, 2, 'There should be two disks ({0})'.format(amount)) disk3 = TestDisk() disk3.name = 'disk 3' disk3.save() amount = DataList(key='some_list', query={'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data self.assertEqual(amount, 3, 'There should be three disks ({0})'.format(amount)) def test_invalidqueries(self): """ Validates invalid queries """ machine = TestMachine() machine.name = 'machine' machine.save() disk = TestDisk() disk.name = 'disk' disk.machine = machine disk.save() setattr(DataList.select, 'SOMETHING', 'SOMETHING') with self.assertRaises(NotImplementedError): DataList({'object': TestDisk, 'data': DataList.select.SOMETHING, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa setattr(DataList.where_operator, 'SOMETHING', 'SOMETHING') with self.assertRaises(NotImplementedError): DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.SOMETHING, 'items': [('machine.name', DataList.operator.EQUALS, 'machine')]}}) # noqa setattr(DataList.operator, 'SOMETHING', 'SOMETHING') with self.assertRaises(NotImplementedError): DataList({'object': TestDisk, 'data': DataList.select.COUNT, 'query': {'type': DataList.where_operator.AND, 'items': [('machine.name', DataList.operator.SOMETHING, 'machine')]}}) # noqa def test_clearedcache(self): """ Validates the correct behavior when the volatile cache is cleared """ disk = TestDisk() disk.name = 'somedisk' disk.save() VolatileFactory.store.delete(disk._key) disk2 = TestDisk(disk.guid) self.assertEqual(disk2.name, 'somedisk', 'Disk should be fetched from persistent store') def test_serialization(self): """ Validates whether serialization works as expected """ machine = TestMachine() machine.name = 'machine' machine.save() disk = TestDisk() disk.name = 'disk' disk.machine = machine disk.save() dictionary = disk.serialize() self.assertIn('name', dictionary, 'Serialized object should have correct properties') self.assertEqual(dictionary['name'], 'disk', 'Serialized object should have correct name') self.assertIn('machine_guid', dictionary, 'Serialized object should have correct depth') self.assertEqual(dictionary['machine_guid'], machine.guid, 'Serialized object should have correct properties') dictionary = disk.serialize(depth=1) self.assertIn('machine', dictionary, 'Serialized object should have correct depth') self.assertEqual(dictionary['machine']['name'], 'machine', 'Serialized object should have correct properties at all depths') def test_primarykeys(self): """ Validates whether the primary keys are kept in sync """ disk = TestDisk() disk.name = 'disk' keys = DataList.get_pks(disk._namespace, disk._name) self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys))) disk.save() keys = DataList.get_pks(disk._namespace, disk._name) self.assertEqual(len(keys), 1, 'There should be one primary key ({0})'.format(len(keys))) disk.delete() keys = DataList.get_pks(disk._namespace, disk._name) self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys))) def test_reduceddatalist(self): """ Validates the reduced list """ disk = TestDisk() disk.name = 'test' disk.save() data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data datalist = DataObjectList(data, TestDisk) self.assertEqual(len(datalist), 1, 'There should be only one item ({0})'.format(len(datalist))) item = datalist.reduced[0] with self.assertRaises(AttributeError): print item.name self.assertEqual(item.guid, disk.guid, 'The guid should be available') def test_volatiemutex(self): """ Validates the volatile mutex """ mutex = VolatileMutex('test') mutex.acquire() mutex.acquire() # Should not raise errors mutex.release() mutex.release() # Should not raise errors mutex._volatile.add(mutex.key(), 1, 10) with self.assertRaises(RuntimeError): mutex.acquire(wait=1) mutex._volatile.delete(mutex.key()) mutex.acquire() time.sleep(0.5) mutex.release() def test_typesafety(self): """ Validates typesafety checking on object properties """ disk = TestDisk() disk.name = 'test' disk.name = u'test' disk.name = None disk.size = 100 disk.size = 100.5 disk.order = 100 with self.assertRaises(TypeError): disk.order = 100.5 with self.assertRaises(TypeError): disk.__dict__['wrong_type_data'] = None disk.wrong_type_data = 'string' _ = disk.wrong_type with self.assertRaises(TypeError): disk.type = 'THREE' disk.type = 'ONE' def test_ownrelations(self): """ Validates whether relations to the object itself are working """ pdisk = TestDisk() pdisk.name = 'parent' pdisk.save() cdisk1 = TestDisk() cdisk1.name = 'child 1' cdisk1.size = 100 cdisk1.parent = pdisk cdisk1.save() cdisk2 = TestDisk() cdisk2.name = 'child 2' cdisk2.size = 100 cdisk2.parent = pdisk cdisk2.save() self.assertEqual(len(pdisk.children), 2, 'There should be 2 children ({0})'.format(len(pdisk.children))) self.assertEqual(cdisk1.parent.name, 'parent', 'Parent should be loaded correctly') data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('parent.name', DataList.operator.EQUALS, 'parent')]}}).data datalist = DataObjectList(data, TestDisk) self.assertEqual(len(datalist), 2, 'There should be two items ({0})'.format(len(datalist))) cdisk2.parent = None cdisk2.save() data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('parent.name', DataList.operator.EQUALS, 'parent')]}}).data datalist = DataObjectList(data, TestDisk) self.assertEqual(len(datalist), 1, 'There should be one item ({0})'.format(len(datalist))) def test_copy(self): """ Validates whether the copy function works correct """ machine = TestMachine() machine.name = 'testmachine1' machine.save() disk1 = TestDisk() disk1.name = 'test1' disk1.size = 100 disk1.order = 1 disk1.type = 'ONE' disk1.machine = machine disk1.save() disk2 = TestDisk() disk2.copy(disk1) self.assertEqual(disk2.name, 'test1', 'Properties should be copied') self.assertEqual(disk2.size, 100, 'Properties should be copied') self.assertEqual(disk2.order, 1, 'Properties should be copied') self.assertEqual(disk2.type, 'ONE', 'Properties should be copied') self.assertEqual(disk2.machine, None, 'Relations should not be copied') disk3 = TestDisk() disk3.copy(disk1, include_relations=True) self.assertEqual(disk3.machine.name, 'testmachine1', 'Relations should be copied') disk4 = TestDisk() disk4.copy(disk1, include=['name']) self.assertEqual(disk4.name, 'test1', 'Name should be copied') self.assertEqual(disk4.size, 0, 'Size should not be copied') self.assertEqual(disk4.machine, None, 'Relations should not be copied') disk5 = TestDisk() disk5.copy(disk1, exclude=['name']) self.assertEqual(disk5.name, None, 'Name should not be copied') self.assertEqual(disk5.size, 100, 'Size should be copied') self.assertEqual(disk5.machine, None, 'Relations should not be copied') def test_querydynamic(self): """ Validates whether a query that queried dynamic properties is never cached """ def get_disks(): return DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('used_size', DataList.operator.NOT_EQUALS, -1)]}}) disk1 = TestDisk() disk1.name = 'disk 1' disk1.size = 100 disk1.save() disk2 = TestDisk() disk2.name = 'disk 2' disk2.size = 100 disk2.save() query_result = get_disks() self.assertEqual(len(query_result.data), 2, 'There should be 2 disks ({0})'.format(len(query_result.data))) self.assertFalse(query_result.from_cache, 'Disk should not be loaded from cache') query_result = get_disks() self.assertFalse(query_result.from_cache, 'Disk should not be loaded from cache') def test_delete_abandoning(self): """ Validates the abandoning behavior of the delete method """ machine = TestMachine() machine.name = 'machine' machine.save() disk_1 = TestDisk() disk_1.name = 'disk 1' disk_1.machine = machine disk_1.save() disk_2 = TestDisk() disk_2.name = 'disk 2' disk_2.machine = machine disk_2.save() self.assertRaises(LinkedObjectException, machine.delete) disk_3 = TestDisk(disk_1.guid) self.assertIsNotNone(disk_3.machine, 'The machine should still be linked') _ = machine.disks # Make sure we loaded the list disk_2.delete() machine.delete(abandon=True) # Should not raise due to disk_2 being deleted disk_4 = TestDisk(disk_1.guid) self.assertIsNone(disk_4.machine, 'The machine should be unlinked') def test_save_deleted(self): """ Validates whether saving a previously deleted object raises """ disk = TestDisk() disk.name = 'disk' disk.save() disk.delete() self.assertRaises(ObjectNotFoundException, disk.save, 'Cannot resave a deleted object') def test_dol_advanced(self): """ Validates the DataObjectList advanced functions (indexer, sort) """ sizes = [7, 2, 0, 4, 6, 1, 5, 9, 3, 8] guids = [] for i in xrange(0, 10): disk = TestDisk() disk.name = 'disk_{0}'.format(i) disk.size = sizes[i] disk.save() guids.append(disk.guid) data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data disks = DataObjectList(data, TestDisk) disks.sort() guids.sort() self.assertEqual(disks[0].guid, guids[0], 'Disks should be sorted on guid') self.assertEqual(disks[4].guid, guids[4], 'Disks should be sorted on guid') disks.sort(cmp=lambda a, b: a.size - b.size) self.assertEqual(disks[0].size, 0, 'Disks should be sorted on size') self.assertEqual(disks[4].size, 4, 'Disks should be sorted on size') disks.sort(key=lambda a: a.name) self.assertEqual(disks[0].name, 'disk_0', 'Disks should be sorted on name') self.assertEqual(disks[4].name, 'disk_4', 'Disks should be sorted on name') filtered = disks[1:4] self.assertEqual(filtered[0].name, 'disk_1', 'Disks should be properly sliced') self.assertEqual(filtered[2].name, 'disk_3', 'Disks should be properly sliced') def test_itemchange_during_list_build(self): """ Validates whether changing, creating or deleting objects while running a depending list will cause the list to be invalidated """ def inject_new(datalist_object): """ Creates a new object """ _ = datalist_object disk_x = TestDisk() disk_x.name = 'test' disk_x.save() def inject_delete(datalist_object): """ Deletes an object """ _ = datalist_object disk_1.delete() def inject_update(datalist_object): """ Updates an object """ _ = datalist_object disk_2.name = 'x' disk_2.save() disk_z = None disk_1 = TestDisk() disk_1.name = 'test' disk_1.save() disk_2 = TestDisk() disk_2.name = 'test' disk_2.save() # Validates new object creation data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}, post_query_hook=inject_new).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 2, 'Two disks should be found ({0})'.format(len(disks))) data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 3, 'Three disks should be found ({0})'.format(len(disks))) # Clear the list cache for the next test VolatileFactory.store.delete('ovs_list_6ea1af78996c9eb24a92c968ccc5f16b16686a8134212ea562135046ba146db4') # Validates object change data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}, post_query_hook=inject_update).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 3, 'Three disks should be found ({0})'.format(len(disks))) data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 2, 'Two disk should be found ({0})'.format(len(disks))) # Clear the list cache for the next test VolatileFactory.store.delete('ovs_list_6ea1af78996c9eb24a92c968ccc5f16b16686a8134212ea562135046ba146db4') # Validates object deletion data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}, post_query_hook=inject_delete).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 2, 'Two disks should be found ({0})'.format(len(disks))) data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('name', DataList.operator.EQUALS, 'test')]}}).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 1, 'One disks should be found ({0})'.format(len(disks))) _ = disk_z # Ignore this object not being used def test_guid_query(self): """ Validates whether queries can use the _guid fields """ machine = TestMachine() machine.name = 'machine' machine.save() disk = TestDisk() disk.name = 'test' disk.machine = machine disk.save() data = DataList({'object': TestDisk, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': [('machine_guid', DataList.operator.EQUALS, machine.guid)]}}).data disks = DataObjectList(data, TestDisk) self.assertEqual(len(disks), 1, 'There should be one disk ({0})'.format(len(disks))) def test_1_to_1(self): """ Validates whether 1-to-1 relations work correct """ machine = TestMachine() machine.name = 'machine' machine.save() self.assertIsNone(machine.one, 'The machine should not have a reverse disk relation') self.assertIsNone(machine.one_guid, 'The machine should have an empty disk _guid property') disk = TestDisk() disk.name = 'test' disk.one = machine disk.save() self.assertIsNotNone(machine.one, 'The machine should have a reverse disk relation') self.assertEqual(machine.one.name, 'test', 'The reverse 1-to-1 relation should work') self.assertEqual(disk.one.name, 'machine', 'The normal 1-to-1 relation should work') self.assertEqual(machine.one_guid, disk.guid, 'The reverse disk should be the correct one') with self.assertRaises(RuntimeError): machine.one = disk def test_auto_inheritance(self): """ Validates whether fetching a base hybrid will result in the extended object """ machine = TestMachine() self.assertEqual(Descriptor(machine.__class__), Descriptor(TestEMachine), 'The fetched TestMachine should be a TestEMachine') def test_relation_inheritance(self): """ Validates whether relations on inherited hybrids behave OK """ machine = TestMachine() machine.name = 'machine' machine.save() disk = TestDisk() disk.name = 'disk' disk.machine = machine # Validates relation acceptance (accepts TestEMachine) disk.save() machine.the_disk = disk # Validates whether _relations is build correctly machine.save() disk2 = TestDisk(disk.guid) self.assertEqual(Descriptor(disk2.machine.__class__), Descriptor(TestEMachine), 'The machine should be a TestEMachine') def test_extended_property(self): """ Validates whether an inherited object has all properties """ machine = TestEMachine() machine.name = 'emachine' machine.extended = 'ext' machine.save() machine2 = TestEMachine(machine.guid) self.assertEqual(machine2.name, 'emachine', 'The name of the extended machine should be correct') self.assertEqual(machine2.extended, 'ext', 'The extended property of the extended machine should be correct') def test_extended_filter(self): """ Validates whether base and extended hybrids behave the same in lists """ machine1 = TestMachine() machine1.name = 'basic' machine1.save() machine2 = TestEMachine() machine2.name = 'extended' machine2.save() data = DataList({'object': TestMachine, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data datalist = DataObjectList(data, TestMachine) self.assertEqual(len(datalist), 2, 'There should be two machines if searched for TestMachine ({0})'.format(len(datalist))) data = DataList({'object': TestEMachine, 'data': DataList.select.GUIDS, 'query': {'type': DataList.where_operator.AND, 'items': []}}).data datalist = DataObjectList(data, TestMachine) self.assertEqual(len(datalist), 2, 'There should be two machines if searched for TestEMachine ({0})'.format(len(datalist))) def test_mandatory_fields(self): """ Validates whether mandatory properties and relations work """ machine = TestMachine() machine.extended = 'extended' machine.name = 'machine' machine.save() disk = TestDisk() # Modify relation to mandatory [_ for _ in disk._relations if _.name == 'machine'][0].mandatory = True # Continue test disk.name = None with self.assertRaises(MissingMandatoryFieldsException) as exception: disk.save() self.assertIn('name', exception.exception.message, 'Field name should be in exception message: {0}'.format(exception.exception.message)) self.assertIn('machine', exception.exception.message, 'Field machine should be in exception message: {0}'.format(exception.exception.message)) disk.name = 'disk' disk.machine = machine disk.save() disk.description = 'test' disk.storage = machine disk.save() # Restore relation [_ for _ in disk._relations if _.name == 'machine'][0].mandatory = False def test_saveorder(self): """ Validates whether the order of saving related objects doesn't matter """ machine1 = TestMachine() machine1.name = 'machine' disk1_1 = TestDisk() disk1_1.name = 'disk1' disk1_1.machine = machine1 disk1_1.save() disk1_2 = TestDisk() disk1_2.name = 'disk2' disk1_2.machine = machine1 disk1_2.save() machine1.save() self.assertEqual(len(machine1.disks), 2, 'There should be two disks. {0}'.format(len(machine1.disks))) machine2 = TestMachine() machine2.name = 'machine' machine2.save() disk2_1 = TestDisk() disk2_1.name = 'disk1' disk2_1.machine = machine2 disk2_1.save() disk2_2 = TestDisk() disk2_2.name = 'disk2' disk2_2.machine = machine2 disk2_2.save() self.assertEqual(len(machine2.disks), 2, 'There should be two disks. {0}'.format(len(machine2.disks))) if __name__ == '__main__': import unittest suite = unittest.TestLoader().loadTestsFromTestCase(Basic) unittest.TextTestRunner(verbosity=2).run(suite)
import numpy as np from .settings import MWA_FREQ_EOR_ALL_80KHZ __all__ = ['bin_freqs', 'crop_arr', 'gen_radius_array', 'gen_radial_mask', 'is_empty_list', 'radial_profile', 'LazyProperty', 'get_channel_indexes_per_bin'] class LazyProperty(object): def __init__(self, func): self._func = func self.__name__ = func.__name__ self.__doc__ = func.__doc__ def __get__(self, obj, klass=None): if obj is None: return None result = obj.__dict__[self.__name__] = self._func(obj) return result def radial_profile(data, center, scale=(1, 1), normalized=False, max_r='circle'): """ Compute radial sum and radial average of a 2D array. Parameters ---------- data: 2D numpy ndarray Data to calculate the profile center: (float, float) (x-center, y-center) of data scale: (float, float), optional Scaling factor to be multiplied to x and y. default is (1, 1) normalized: bool, optional Normalize the outputs by their maxima. default is false max_r: {float, 'circle', 'all'} Maximum radial distance. float will limit the radial distance to that number. 'circle' will limit the radius to full concentric circle 'all' will includes all radius, including those not in a full circle Everything else will use 'all' Returns ------- radial_sum: numpy ndarray of ints Radial sum profile. lenght = numpy.amax(r) + 1 radial_avg: numpy ndarray of ints Radial average profile. length = numpy.amax(r) + 1 r: numpy ndarray of ints Radius corresponding to the profiles Note ---- Due to the use of numpy.bincount, the length of an output array is limited to numpy.amax(r) + 1, where: r = np.sqrt(((x - center[0]) * scale[0]) ** 2 + ((y - center[1]) * scale[1]) ** 2) """ y, x = np.indices(data.shape) r = np.sqrt(((x - center[0]) * scale[0]) ** 2 + ((y - center[1]) * scale[1]) ** 2) r = r.astype('int') radial_sum = np.bincount(r.ravel(), data.ravel()) nr = np.bincount(r.ravel()) radial_avg = radial_sum / nr if normalized: radial_sum /= radial_sum.max() radial_avg /= radial_avg.max() if isinstance(max_r, (float, int)): cut = slice(0, max_r) elif max_r == 'circle': max_r_ = np.min( [np.array([center[0] - 0, data.shape[0] - center[0]]) * scale[0], np.array([center[1] - 0, data.shape[1] - center[1]]) * scale[1]] ) cut = slice(0, max_r_) else: cut = slice(0, -1) return radial_sum[cut], radial_avg[cut], np.arange(np.amax(r) + 1)[cut] def is_empty_list(inlist): if isinstance(inlist, list): out = all(map(is_empty_list, inlist)) else: out = False return out def crop_arr(in_arr, mask): crop_length = np.ceil(mask.sum(0).max() / 2.) xcen, ycen = np.array(in_arr.shape) / 2. crop_slice = np.s_[xcen-crop_length:xcen+crop_length, ycen-crop_length:ycen+crop_length] in_arr[~mask] = np.nan out_arr = in_arr[crop_slice] return out_arr def gen_radius_array(shape, center, xy_scale=None, r_scale=None): """ Make a 2D array of radius values from a specific center. Parameters ---------- shape: (int, int) Size for the x and y dimensions of the array in pixels. center: (float, float) Center for the x and y dimensions of the array in pixels. xy_scale: float or (float, float), optional Scale factor to apply to x and y dimension before generating a mask. If a single number is given, this is a scale factor for both x and y. If a (float, float) is given, these are (x scale, y scale). r_scale: float, optional Scale factor to apply to radius. Overwrite `xy_scale`, i.e. the radius will be calculate in pixels unit before applying this scaling factor. Returns ------- r: array Radius array """ # Figure out all the scaling complexity if r_scale is not None: rscale = r_scale xscale = 1 yscale = 1 else: if isinstance(xy_scale, (tuple, list, np.ndarray)): rscale = 1 xscale = xy_scale[0] yscale = xy_scale[1] elif isinstance(xy_scale, (float, int)): rscale = 1 xscale = xy_scale yscale = xy_scale else: rscale = 1 xscale = 1 yscale = 1 x = (np.arange(shape[0]) - center[0]) * xscale y = (np.arange(shape[1]) - center[1]) * yscale r = np.sqrt(x[:, np.newaxis] ** 2 + y ** 2) * rscale return r def gen_radial_mask(shape, center, radius, mask=True, xy_scale=None, r_scale=None): """ Generate a 2D radial mask array. Pixels within the radius=(rmin, rmax) from a specified center will be masked by to the value in `mask`. Parameters ---------- shape: (int, int) Size for the x and y dimensions of an array in pixels. center: (float, float) Center for the x and y dimensions of an array in pixels. radius: (float, float) Minimum and maximum radius of the masking region from the center of an array. This region will be masked with the value specified in `mask`. xy_scale` and/or `r_scale` will be applied before masking. xy_scale: float or (float, float), optional Scale factor to apply to x and y dimension before generating a mask. If a single number is given, it will be used for both x and y. If a (float, float) is given, these are (x scale, y scale). r_scale: float, optional Scale factor to apply to radius. Overwrite `xy_scale`, i.e. the radius will be calculate in pixels unit before applying this scaling factor. mask: {True, False}, optional Weather to set the mask region to `True` or `False`. Default is True. Returns ------- mask: bool array A boolean array of shape=`shape` with pixels within `radius` from the center pixels set to True, and False elsewhere. """ r = gen_radius_array(shape, center, xy_scale=xy_scale, r_scale=r_scale) out = (r >= radius[0]) & (r <= radius[1]) return out if mask else np.logical_not(out) def bin_freqs(bin_width, native_channel_width=0.08, freqs_list=None): """Group frequency channels into bins. Parameters ---------- bin_width : float Frequency bandwidth of the bin native_channel_width : float Native frequency bandwidth of the channel freqs_list : array-like List of frequency channels. Must be in ascending order. Assume continuous frequency channels. Returns ------- ch_list_per_bin: list List of ndarray containing frequency channels per bin. bin_centers: ndarray Center frequency of each bin """ if freqs_list is None: freqs_list = MWA_FREQ_EOR_ALL_80KHZ if bin_width == native_channel_width: ch_list_per_bin = [[f] for f in freqs_list] else: nchannel_per_bin = int(np.ceil(bin_width / native_channel_width)) # nbins = int(np.ceil(len(freqs) / float(nchannel_per_bin))) ch_list_per_bin = np.array_split( freqs_list, np.arange(len(freqs_list), 0, -nchannel_per_bin) [-1:0:-1] ) bin_centers = np.array([(f[0] + f[-1]) / 2. for f in ch_list_per_bin]).ravel() out = (ch_list_per_bin, bin_centers) return out def get_channel_indexes_per_bin(bin_width, native_channel_width, nchannels): nchannel_per_bin = int(np.ceil(bin_width / native_channel_width)) channel_indexes_per_bin = np.array_split( np.arange(nchannels), np.arange(nchannels, 0, -nchannel_per_bin) [-1:0:-1] ) return channel_indexes_per_bin
from functools import partial from django.test import TestCase from django.utils.safestring import SafeString from wagtail.admin import compare from wagtail.core.blocks import StreamValue from wagtail.images import get_image_model from wagtail.images.tests.utils import get_test_image_file from wagtail.tests.testapp.models import ( AdvertWithCustomPrimaryKey, EventCategory, EventPage, EventPageSpeaker, HeadCountRelatedModelUsingPK, SimplePage, SnippetChooserModelWithCustomPrimaryKey, StreamPage, TaggedPage) class TestFieldComparison(TestCase): comparison_class = compare.FieldComparison def test_hasnt_changed(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="Content"), SimplePage(content="Content"), ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Content") self.assertEqual(comparison.htmldiff(), 'Content') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="Original content"), SimplePage(content="Modified content"), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original content</span><span class="addition">Modified content</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_htmldiff_escapes_value(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content='Original content'), SimplePage(content='<script type="text/javascript">doSomethingBad();</script>'), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original content</span><span class="addition">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) class TestTextFieldComparison(TestFieldComparison): comparison_class = compare.TextFieldComparison # Only change from FieldComparison is the HTML diff is performed on words # instead of the whole field value. def test_has_changed(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="Original content"), SimplePage(content="Modified content"), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original</span><span class="addition">Modified</span> content') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_from_none_to_value_only_shows_addition(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content=None), SimplePage(content="Added content") ) self.assertEqual(comparison.htmldiff(), '<span class="addition">Added content</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_from_value_to_none_only_shows_deletion(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="Removed content"), SimplePage(content=None) ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Removed content</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) class TestRichTextFieldComparison(TestFieldComparison): comparison_class = compare.RichTextFieldComparison # Only change from FieldComparison is the HTML diff is performed on words # instead of the whole field value. def test_has_changed(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="Original content"), SimplePage(content="Modified content"), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original</span><span class="addition">Modified</span> content') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) # Only change from FieldComparison is that this comparison disregards HTML tags def test_has_changed_html(self): comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content="<b>Original</b> content"), SimplePage(content="Modified <i>content</i>"), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original</span><span class="addition">Modified</span> content') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_htmldiff_escapes_value(self): # Need to override this one as the HTML tags are stripped by RichTextFieldComparison comparison = self.comparison_class( SimplePage._meta.get_field('content'), SimplePage(content='Original content'), SimplePage(content='<script type="text/javascript">doSomethingBad();</script>'), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Original content</span><span class="addition">doSomethingBad();</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) class TestStreamFieldComparison(TestCase): comparison_class = compare.StreamFieldComparison def test_hasnt_changed(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ])), ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Body") self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Content</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Original content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Modified content", '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object"><span class="deletion">Original</span><span class="addition">Modified</span> content</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_add_block(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ('text', "New Content", '2'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Content</div>\n<div class="comparison__child-object addition">New Content</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_delete_block(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ('text', "Content Foo", '2'), ('text', "Content Bar", '3'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ('text', "Content Bar", '3'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Content</div>\n<div class="comparison__child-object deletion">Content Foo</div>\n<div class="comparison__child-object">Content Bar</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_edit_block(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ('text', "Content Foo", '2'), ('text', "Content Bar", '3'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Content", '1'), ('text', "Content Baz", '2'), ('text', "Content Bar", '3'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Content</div>\n<div class="comparison__child-object">Content <span class="deletion">Foo</span><span class="addition">Baz</span></div>\n<div class="comparison__child-object">Content Bar</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_has_changed_richtext(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "<b>Original</b> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "Modified <i>content</i>", '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object"><span class="deletion">Original</span><span class="addition">Modified</span> content</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_htmldiff_escapes_value_on_change(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "I <b>really</b> like original<i>ish</i> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', 'I <b>really</b> like evil code <script type="text/javascript">doSomethingBad();</script>', '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">I &lt;b&gt;really&lt;/b&gt; like <span class="deletion">original&lt;i&gt;ish&lt;/i&gt; content</span><span class="addition">evil code &lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</span></div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_escapes_value_on_addition(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Original <em>and unchanged</em> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Original <em>and unchanged</em> content", '1'), ('text', '<script type="text/javascript">doSomethingBad();</script>', '2'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original &lt;em&gt;and unchanged&lt;/em&gt; content</div>\n<div class="comparison__child-object addition">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_escapes_value_on_deletion(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('text', "Original <em>and unchanged</em> content", '1'), ('text', '<script type="text/javascript">doSomethingBad();</script>', '2'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('text', "Original <em>and unchanged</em> content", '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original &lt;em&gt;and unchanged&lt;/em&gt; content</div>\n<div class="comparison__child-object deletion">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_richtext_strips_tags_on_change(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "I <b>really</b> like Wagtail &lt;3", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', 'I <b>really</b> like evil code &gt;_&lt; <script type="text/javascript">doSomethingBad();</script>', '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">I really like <span class="deletion">Wagtail &lt;3</span><span class="addition">evil code &gt;_&lt; doSomethingBad();</span></div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_richtext_strips_tags_on_addition(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "Original <em>and unchanged</em> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "Original <em>and unchanged</em> content", '1'), ('rich_text', 'I <b>really</b> like evil code &gt;_&lt; <script type="text/javascript">doSomethingBad();</script>', '2'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original and unchanged content</div>\n<div class="comparison__child-object addition">I really like evil code &gt;_&lt; doSomethingBad();</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_richtext_strips_tags_on_deletion(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "Original <em>and unchanged</em> content", '1'), ('rich_text', 'I <b>really</b> like evil code &gt;_&lt; <script type="text/javascript">doSomethingBad();</script>', '2'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('rich_text', "Original <em>and unchanged</em> content", '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original and unchanged content</div>\n<div class="comparison__child-object deletion">I really like evil code &gt;_&lt; doSomethingBad();</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_raw_html_escapes_value_on_change(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', "Original<i>ish</i> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', '<script type="text/javascript">doSomethingBad();</script>', '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object"><span class="deletion">Original&lt;i&gt;ish&lt;/i&gt; content</span><span class="addition">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</span></div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_raw_html_escapes_value_on_addition(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', "Original <em>and unchanged</em> content", '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', "Original <em>and unchanged</em> content", '1'), ('raw_html', '<script type="text/javascript">doSomethingBad();</script>', '2'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original &lt;em&gt;and unchanged&lt;/em&gt; content</div>\n<div class="comparison__child-object addition">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_htmldiff_raw_html_escapes_value_on_deletion(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', "Original <em>and unchanged</em> content", '1'), ('raw_html', '<script type="text/javascript">doSomethingBad();</script>', '2'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('raw_html', "Original <em>and unchanged</em> content", '1'), ])), ) self.assertEqual(comparison.htmldiff(), '<div class="comparison__child-object">Original &lt;em&gt;and unchanged&lt;/em&gt; content</div>\n<div class="comparison__child-object deletion">&lt;script type=&quot;text/javascript&quot;&gt;doSomethingBad();&lt;/script&gt;</div>') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_compare_structblock(self): field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('product', {'name': 'a packet of rolos', 'price': '75p'}, '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('product', {'name': 'a packet of rolos', 'price': '85p'}, '1'), ])), ) expected = """ <div class="comparison__child-object"><dl> <dt>Name</dt> <dd>a packet of rolos</dd> <dt>Price</dt> <dd><span class="deletion">75p</span><span class="addition">85p</span></dd> </dl></div> """ self.assertHTMLEqual(comparison.htmldiff(), expected) self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_compare_imagechooserblock(self): image_model = get_image_model() test_image_1 = image_model.objects.create( title="Test image 1", file=get_test_image_file(), ) test_image_2 = image_model.objects.create( title="Test image 2", file=get_test_image_file(), ) field = StreamPage._meta.get_field('body') comparison = self.comparison_class( field, StreamPage(body=StreamValue(field.stream_block, [ ('image', test_image_1, '1'), ])), StreamPage(body=StreamValue(field.stream_block, [ ('image', test_image_2, '1'), ])), ) result = comparison.htmldiff() self.assertIn('<div class="preview-image deletion">', result) self.assertIn('alt="Test image 1"', result) self.assertIn('<div class="preview-image addition">', result) self.assertIn('alt="Test image 2"', result) self.assertIsInstance(result, SafeString) self.assertTrue(comparison.has_changed()) class TestChoiceFieldComparison(TestCase): comparison_class = compare.ChoiceFieldComparison def test_hasnt_changed(self): comparison = self.comparison_class( EventPage._meta.get_field('audience'), EventPage(audience="public"), EventPage(audience="public"), ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Audience") self.assertEqual(comparison.htmldiff(), 'Public') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): comparison = self.comparison_class( EventPage._meta.get_field('audience'), EventPage(audience="public"), EventPage(audience="private"), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Public</span><span class="addition">Private</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_from_none_to_value_only_shows_addition(self): comparison = self.comparison_class( EventPage._meta.get_field('audience'), EventPage(audience=None), EventPage(audience="private"), ) self.assertEqual(comparison.htmldiff(), '<span class="addition">Private</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) def test_from_value_to_none_only_shows_deletion(self): comparison = self.comparison_class( EventPage._meta.get_field('audience'), EventPage(audience="public"), EventPage(audience=None), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Public</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) class TestTagsFieldComparison(TestCase): comparison_class = compare.TagsFieldComparison def test_hasnt_changed(self): a = TaggedPage() a.tags.add('wagtail') a.tags.add('bird') b = TaggedPage() b.tags.add('wagtail') b.tags.add('bird') comparison = self.comparison_class(TaggedPage._meta.get_field('tags'), a, b) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Tags") self.assertEqual(comparison.htmldiff(), 'wagtail, bird') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): a = TaggedPage() a.tags.add('wagtail') a.tags.add('bird') b = TaggedPage() b.tags.add('wagtail') b.tags.add('motacilla') comparison = self.comparison_class(TaggedPage._meta.get_field('tags'), a, b) self.assertEqual(comparison.htmldiff(), 'wagtail, <span class="deletion">bird</span>, <span class="addition">motacilla</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) class TestM2MFieldComparison(TestCase): fixtures = ['test.json'] comparison_class = compare.M2MFieldComparison def setUp(self): self.meetings_category = EventCategory.objects.create(name='Meetings') self.parties_category = EventCategory.objects.create(name='Parties') self.holidays_category = EventCategory.objects.create(name='Holidays') def test_hasnt_changed(self): christmas_event = EventPage.objects.get(url_path='/home/events/christmas/') saint_patrick_event = EventPage.objects.get(url_path='/home/events/saint-patrick/') christmas_event.categories = [self.meetings_category, self.parties_category] saint_patrick_event.categories = [self.meetings_category, self.parties_category] comparison = self.comparison_class( EventPage._meta.get_field('categories'), christmas_event, saint_patrick_event ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Categories") self.assertFalse(comparison.has_changed()) self.assertEqual(comparison.htmldiff(), 'Meetings, Parties') self.assertIsInstance(comparison.htmldiff(), SafeString) def test_has_changed(self): christmas_event = EventPage.objects.get(url_path='/home/events/christmas/') saint_patrick_event = EventPage.objects.get(url_path='/home/events/saint-patrick/') christmas_event.categories = [self.meetings_category, self.parties_category] saint_patrick_event.categories = [self.meetings_category, self.holidays_category] comparison = self.comparison_class( EventPage._meta.get_field('categories'), christmas_event, saint_patrick_event ) self.assertTrue(comparison.has_changed()) self.assertEqual(comparison.htmldiff(), 'Meetings, <span class="deletion">Parties</span>, <span class="addition">Holidays</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) class TestForeignObjectComparison(TestCase): comparison_class = compare.ForeignObjectComparison @classmethod def setUpTestData(cls): image_model = get_image_model() cls.test_image_1 = image_model.objects.create( title="Test image 1", file=get_test_image_file(), ) cls.test_image_2 = image_model.objects.create( title="Test image 2", file=get_test_image_file(), ) def test_hasnt_changed(self): comparison = self.comparison_class( EventPage._meta.get_field('feed_image'), EventPage(feed_image=self.test_image_1), EventPage(feed_image=self.test_image_1), ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Feed image") self.assertEqual(comparison.htmldiff(), 'Test image 1') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): comparison = self.comparison_class( EventPage._meta.get_field('feed_image'), EventPage(feed_image=self.test_image_1), EventPage(feed_image=self.test_image_2), ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Test image 1</span><span class="addition">Test image 2</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) class TestForeignObjectComparisonWithCustomPK(TestCase): """ForeignObjectComparison works with models declaring a custom primary key field""" comparison_class = compare.ForeignObjectComparison @classmethod def setUpTestData(cls): ad1 = AdvertWithCustomPrimaryKey.objects.create( advert_id='ad1', text='Advert 1' ) ad2 = AdvertWithCustomPrimaryKey.objects.create( advert_id='ad2', text='Advert 2' ) cls.test_obj_1 = SnippetChooserModelWithCustomPrimaryKey.objects.create( advertwithcustomprimarykey=ad1 ) cls.test_obj_2 = SnippetChooserModelWithCustomPrimaryKey.objects.create( advertwithcustomprimarykey=ad2 ) def test_hasnt_changed(self): comparison = self.comparison_class( SnippetChooserModelWithCustomPrimaryKey._meta.get_field('advertwithcustomprimarykey'), self.test_obj_1, self.test_obj_1, ) self.assertTrue(comparison.is_field) self.assertFalse(comparison.is_child_relation) self.assertEqual(comparison.field_label(), 'Advertwithcustomprimarykey') self.assertEqual(comparison.htmldiff(), 'Advert 1') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertFalse(comparison.has_changed()) def test_has_changed(self): comparison = self.comparison_class( SnippetChooserModelWithCustomPrimaryKey._meta.get_field('advertwithcustomprimarykey'), self.test_obj_1, self.test_obj_2, ) self.assertEqual(comparison.htmldiff(), '<span class="deletion">Advert 1</span><span class="addition">Advert 2</span>') self.assertIsInstance(comparison.htmldiff(), SafeString) self.assertTrue(comparison.has_changed()) class TestChildRelationComparison(TestCase): field_comparison_class = compare.FieldComparison comparison_class = compare.ChildRelationComparison def test_hasnt_changed(self): # Two event pages with speaker called "Father Christmas". Neither of # the speaker objects have an ID so this tests that the code can match # the two together by field content. event_page = EventPage(title="Event page", slug="event") event_page.speakers.add(EventPageSpeaker( first_name="Father", last_name="Christmas", )) modified_event_page = EventPage(title="Event page", slug="event") modified_event_page.speakers.add(EventPageSpeaker( first_name="Father", last_name="Christmas", )) comparison = self.comparison_class( EventPage._meta.get_field('speaker'), [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Speaker") self.assertFalse(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {0: 0}) self.assertEqual(map_backwards, {0: 0}) self.assertEqual(added, []) self.assertEqual(deleted, []) def test_has_changed(self): # Father Christmas renamed to Santa Claus. And Father Ted added. # Father Christmas should be mapped to Father Ted because they # are most alike. Santa claus should be displayed as "new" event_page = EventPage(title="Event page", slug="event") event_page.speakers.add(EventPageSpeaker( first_name="Father", last_name="Christmas", sort_order=0, )) modified_event_page = EventPage(title="Event page", slug="event") modified_event_page.speakers.add(EventPageSpeaker( first_name="Santa", last_name="Claus", sort_order=0, )) modified_event_page.speakers.add(EventPageSpeaker( first_name="Father", last_name="Ted", sort_order=1, )) comparison = self.comparison_class( EventPage._meta.get_field('speaker'), [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Speaker") self.assertTrue(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {0: 1}) # Map Father Christmas to Father Ted self.assertEqual(map_backwards, {1: 0}) # Map Father Ted ot Father Christmas self.assertEqual(added, [0]) # Add Santa Claus self.assertEqual(deleted, []) def test_has_changed_with_same_id(self): # Father Christmas renamed to Santa Claus, but this time the ID of the # child object remained the same. It should now be detected as the same # object event_page = EventPage(title="Event page", slug="event") event_page.speakers.add(EventPageSpeaker( id=1, first_name="Father", last_name="Christmas", sort_order=0, )) modified_event_page = EventPage(title="Event page", slug="event") modified_event_page.speakers.add(EventPageSpeaker( id=1, first_name="Santa", last_name="Claus", sort_order=0, )) modified_event_page.speakers.add(EventPageSpeaker( first_name="Father", last_name="Ted", sort_order=1, )) comparison = self.comparison_class( EventPage._meta.get_field('speaker'), [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Speaker") self.assertTrue(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {0: 0}) # Map Father Christmas to Santa Claus self.assertEqual(map_backwards, {0: 0}) # Map Santa Claus to Father Christmas self.assertEqual(added, [1]) # Add Father Ted self.assertEqual(deleted, []) def test_hasnt_changed_with_different_id(self): # Both of the child objects have the same field content but have a # different ID so they should be detected as separate objects event_page = EventPage(title="Event page", slug="event") event_page.speakers.add(EventPageSpeaker( id=1, first_name="Father", last_name="Christmas", )) modified_event_page = EventPage(title="Event page", slug="event") modified_event_page.speakers.add(EventPageSpeaker( id=2, first_name="Father", last_name="Christmas", )) comparison = self.comparison_class( EventPage._meta.get_field('speaker'), [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Speaker") self.assertTrue(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {}) self.assertEqual(map_backwards, {}) self.assertEqual(added, [0]) # Add new Father Christmas self.assertEqual(deleted, [0]) # Delete old Father Christmas class TestChildObjectComparison(TestCase): field_comparison_class = compare.FieldComparison comparison_class = compare.ChildObjectComparison def test_same_object(self): obj_a = EventPageSpeaker( first_name="Father", last_name="Christmas", ) obj_b = EventPageSpeaker( first_name="Father", last_name="Christmas", ) comparison = self.comparison_class( EventPageSpeaker, [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], obj_a, obj_b, ) self.assertFalse(comparison.is_addition()) self.assertFalse(comparison.is_deletion()) self.assertFalse(comparison.has_changed()) self.assertEqual(comparison.get_position_change(), 0) self.assertEqual(comparison.get_num_differences(), 0) def test_different_object(self): obj_a = EventPageSpeaker( first_name="Father", last_name="Christmas", ) obj_b = EventPageSpeaker( first_name="Santa", last_name="Claus", ) comparison = self.comparison_class( EventPageSpeaker, [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], obj_a, obj_b, ) self.assertFalse(comparison.is_addition()) self.assertFalse(comparison.is_deletion()) self.assertTrue(comparison.has_changed()) self.assertEqual(comparison.get_position_change(), 0) self.assertEqual(comparison.get_num_differences(), 2) def test_moved_object(self): obj_a = EventPageSpeaker( first_name="Father", last_name="Christmas", sort_order=1, ) obj_b = EventPageSpeaker( first_name="Father", last_name="Christmas", sort_order=5, ) comparison = self.comparison_class( EventPageSpeaker, [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], obj_a, obj_b, ) self.assertFalse(comparison.is_addition()) self.assertFalse(comparison.is_deletion()) self.assertFalse(comparison.has_changed()) self.assertEqual(comparison.get_position_change(), 4) self.assertEqual(comparison.get_num_differences(), 0) def test_addition(self): obj = EventPageSpeaker( first_name="Father", last_name="Christmas", ) comparison = self.comparison_class( EventPageSpeaker, [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], None, obj, ) self.assertTrue(comparison.is_addition()) self.assertFalse(comparison.is_deletion()) self.assertFalse(comparison.has_changed()) self.assertIsNone(comparison.get_position_change(), 0) self.assertEqual(comparison.get_num_differences(), 0) def test_deletion(self): obj = EventPageSpeaker( first_name="Father", last_name="Christmas", ) comparison = self.comparison_class( EventPageSpeaker, [ partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('first_name')), partial(self.field_comparison_class, EventPageSpeaker._meta.get_field('last_name')), ], obj, None, ) self.assertFalse(comparison.is_addition()) self.assertTrue(comparison.is_deletion()) self.assertFalse(comparison.has_changed()) self.assertIsNone(comparison.get_position_change()) self.assertEqual(comparison.get_num_differences(), 0) class TestChildRelationComparisonUsingPK(TestCase): """Test related objects can be compred if they do not use id for primary key""" field_comparison_class = compare.FieldComparison comparison_class = compare.ChildRelationComparison def test_has_changed_with_same_id(self): # Head Count was changed but the PK of the child object remained the same. # It should be detected as the same object event_page = EventPage(title="Semi Finals", slug="semi-finals-2018") event_page.head_counts.add(HeadCountRelatedModelUsingPK( custom_id=1, head_count=22, )) modified_event_page = EventPage(title="Semi Finals", slug="semi-finals-2018") modified_event_page.head_counts.add(HeadCountRelatedModelUsingPK( custom_id=1, head_count=23, )) modified_event_page.head_counts.add(HeadCountRelatedModelUsingPK( head_count=25, )) comparison = self.comparison_class( EventPage._meta.get_field('head_counts'), [partial(self.field_comparison_class, HeadCountRelatedModelUsingPK._meta.get_field('head_count'))], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), 'Head counts') self.assertTrue(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {0: 0}) # map head count 22 to 23 self.assertEqual(map_backwards, {0: 0}) # map head count 23 to 22 self.assertEqual(added, [1]) # add second head count self.assertEqual(deleted, []) def test_hasnt_changed_with_different_id(self): # Both of the child objects have the same field content but have a # different PK (ID) so they should be detected as separate objects event_page = EventPage(title="Finals", slug="finals-event-abc") event_page.head_counts.add(HeadCountRelatedModelUsingPK( custom_id=1, head_count=220 )) modified_event_page = EventPage(title="Finals", slug="finals-event-abc") modified_event_page.head_counts.add(HeadCountRelatedModelUsingPK( custom_id=2, head_count=220 )) comparison = self.comparison_class( EventPage._meta.get_field('head_counts'), [partial(self.field_comparison_class, HeadCountRelatedModelUsingPK._meta.get_field('head_count'))], event_page, modified_event_page, ) self.assertFalse(comparison.is_field) self.assertTrue(comparison.is_child_relation) self.assertEqual(comparison.field_label(), "Head counts") self.assertTrue(comparison.has_changed()) # Check mapping objs_a = list(comparison.val_a.all()) objs_b = list(comparison.val_b.all()) map_forwards, map_backwards, added, deleted = comparison.get_mapping(objs_a, objs_b) self.assertEqual(map_forwards, {}) self.assertEqual(map_backwards, {}) self.assertEqual(added, [0]) # Add new head count self.assertEqual(deleted, [0]) # Delete old head count
""" Django settings for bbp_oa project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import sys BASE_DIR = os.path.dirname(os.path.dirname(__file__)) APPS_DIR = os.path.join(BASE_DIR, 'bbp/apps') sys.path.insert(0, APPS_DIR) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True # Use this to review Settings at run time DEBUG_SETTINGS = True APPLICATION_TITLE="MedYear:Device" if DEBUG_SETTINGS: print "Application: %s" % APPLICATION_TITLE print "" print "BASE_DIR:%s " % BASE_DIR print "APPS_DIR:%s " % APPS_DIR ALLOWED_HOSTS = [] ADMINS = ( ('Mark Scrimshire', 'mark@ekivemark.com'), ) MANAGERS = ADMINS # Application definition INSTALLED_APPS = ( # add admin_bootstrapped items before django.contrib.admin 'django_admin_bootstrapped.bootstrap3', 'django_admin_bootstrapped', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap3', 'bootstrap_themes', # https://django-oauth2-provider.readthedocs.org/en/latest/getting_started.html #'provider', #'provider.oauth2', # http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html 'oauth2_provider', 'corsheaders', 'rest_framework', 'device', 'bbp.member', 'bbp.member.vutils', ) AUTHENTICATION_BACKENDS = ( 'oauth2_provider.backends.OAuth2Backend', # Uncomment following if you want to access the admin 'django.contrib.auth.backends.ModelBackend', #'...', ) # https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#a-full-example #AUTH_USER_MODEL = 'member.MyUser' MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'corsheaders.middleware.CorsMiddleware', 'oauth2_provider.middleware.OAuth2TokenMiddleware', ) # http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html # Allow CORS requests from all domains (just for the scope of this tutorial): CORS_ORIGIN_ALLOW_ALL = True ROOT_URLCONF = 'bbp.urls' WSGI_APPLICATION = 'bbp.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DBPATH = os.path.join(BASE_DIR, 'db/db.db') if DEBUG_SETTINGS: print "DBPATH:",DBPATH DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': DBPATH, # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" # STATIC_ROOT = '' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' if DEBUG_SETTINGS: print "STATIC_ROOT:%s" % STATIC_ROOT ADMIN_MEDIA_PREFIX = '/static/admin' MAIN_STATIC_ROOT = os.path.join(BASE_DIR, 'mainstatic') if DEBUG_SETTINGS: print "MAIN_STATIC_ROOT:%s" % MAIN_STATIC_ROOT # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. MAIN_STATIC_ROOT, # '/Users/mark/PycharmProjects/virtualenv/rb/rainbowbutton/static', ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'bbp/templates'), ) TEMPLATE_VISIBLE_SETTINGS = { # Put Strings here that you want to be visible in the templates # then add settings_context_processor 'DEBUG', 'TEMPLATE_DEBUG', 'APPLICATION_TITLE', } TEMPLATE_MODULES = { # Put the names of custom modules in this section # This will be used by home.index to display a list of modules # that can be called 'privacy', 'about', 'contact', 'terms', 'faq', 'admin', 'member/view', 'member/get_id', 'accounts/logout', 'accounts/login', } TEMPLATE_CONTEXT_PROCESSORS = ( # Use a context processor to enable frequently used settings variables # to be used in templates 'django.contrib.auth.context_processors.auth', 'bbp.settings_context_processor.settings', ) # Default settings for bootstrap 3 BOOTSTRAP3 = { # The URL to the jQuery JavaScript file 'jquery_url': '//code.jquery.com/jquery.min.js', # The Bootstrap base URL 'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.2.0/', # The complete URL to the Bootstrap CSS file (None means derive it from base_url) 'css_url': None, # The complete URL to the Bootstrap CSS file (None means no theme) 'theme_url': None, # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url) 'javascript_url': None, # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html) 'javascript_in_head': False, # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags) 'include_jquery': False, # Label class to use in horizontal forms 'horizontal_label_class': 'col-md-2', # Field class to use in horizontal forms 'horizontal_field_class': 'col-md-4', # Set HTML required attribute on required fields 'set_required': True, # Set placeholder attributes to label if no placeholder is provided 'set_placeholder': True, # Class to indicate required (better to set this in your Django form) 'required_css_class': '', # Class to indicate error (better to set this in your Django form) 'error_css_class': 'has-error', # Class to indicate success, meaning the field has valid input (better to set this in your Django form) 'success_css_class': 'has-success', # Renderers (only set these if you have studied the source and understand the inner workings) 'formset_renderers':{ 'default': 'bootstrap3.renderers.FormsetRenderer', }, 'form_renderers': { 'default': 'bootstrap3.renderers.FormRenderer', }, 'field_renderers': { 'default': 'bootstrap3.renderers.FieldRenderer', 'inline': 'bootstrap3.renderers.InlineFieldRenderer', }, } # http://django-oauth-toolkit.readthedocs.org/en/latest/rest-framework/getting_started.html OAUTH2_PROVIDER = { # this is the list of available scopes 'SCOPES': {'read': 'Read scope', 'write': 'Write scope', 'groups': 'Access to your groups'} } REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'oauth2_provider.ext.rest_framework.OAuth2Authentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), } # @login_required defaults to using settings.LOGIN_URL # if login_url= is not defined #LOGIN_URL='/member/login' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Validic Device Integration # Organization ID and Token are set in local_settings.py # V_Secure = True of False. Used to determine http: or https: prefix V_SECURE = True V_ORG_ID = 'fake_value' V_ACCESS_TOKEN = 'fake_token' V_SERVER = "api.validic.com" # Optional port number # V_PORT = 443 VALIDIC_API = "https://api.validic.com/v1/" # Make this unique, and don't share it with anybody. # Setting a false value here and will overwrite using value in local_settings.py # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'fake_value' # Get Local Settings that you want to keep private. # Make sure Local_settings.py is excluded from Git try: from local_settings import * except Exception as e: pass if DEBUG_SETTINGS: print "SECRET_KEY:%s" % SECRET_KEY print "Validic_Org_ID:%s" % V_ORG_ID print "================================================================" # SECURITY WARNING: keep the secret key used in production secret!
from textwrap import dedent from numpy import ( bool_, dtype, float32, float64, int32, int64, int16, uint16, ndarray, uint32, uint8, ) from catalyst.errors import ( WindowLengthNotPositive, WindowLengthTooLong, ) from catalyst.lib.labelarray import LabelArray from catalyst.utils.numpy_utils import ( datetime64ns_dtype, float64_dtype, int64_dtype, uint8_dtype, ) from catalyst.utils.memoize import lazyval # These class names are all the same because of our bootleg templating system. from ._float64window import AdjustedArrayWindow as Float64Window from ._int64window import AdjustedArrayWindow as Int64Window from ._labelwindow import AdjustedArrayWindow as LabelWindow from ._uint8window import AdjustedArrayWindow as UInt8Window NOMASK = None BOOL_DTYPES = frozenset( map(dtype, [bool_]), ) FLOAT_DTYPES = frozenset( map(dtype, [float32, float64]), ) INT_DTYPES = frozenset( # NOTE: uint64 not supported because it can't be safely cast to int64. map(dtype, [int16, uint16, int32, int64, uint32]), ) DATETIME_DTYPES = frozenset( map(dtype, ['datetime64[ns]', 'datetime64[D]']), ) # We use object arrays for strings. OBJECT_DTYPES = frozenset(map(dtype, ['O'])) STRING_KINDS = frozenset(['S', 'U']) REPRESENTABLE_DTYPES = BOOL_DTYPES.union( FLOAT_DTYPES, INT_DTYPES, DATETIME_DTYPES, OBJECT_DTYPES, ) def can_represent_dtype(dtype): """ Can we build an AdjustedArray for a baseline of `dtype``? """ return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS def is_categorical(dtype): """ Do we represent this dtype with LabelArrays rather than ndarrays? """ return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS CONCRETE_WINDOW_TYPES = { float64_dtype: Float64Window, int64_dtype: Int64Window, uint8_dtype: UInt8Window, } def _normalize_array(data, missing_value): """ Coerce buffer data for an AdjustedArray into a standard scalar representation, returning the coerced array and a dict of argument to pass to np.view to use when providing a user-facing view of the underlying data. - float* data is coerced to float64 with viewtype float64. - int32, int64, and uint32 are converted to int64 with viewtype int64. - datetime[*] data is coerced to int64 with a viewtype of datetime64[ns]. - bool_ data is coerced to uint8 with a viewtype of bool_. Parameters ---------- data : np.ndarray Returns ------- coerced, view_kwargs : (np.ndarray, np.dtype) """ if isinstance(data, LabelArray): return data, {} data_dtype = data.dtype if data_dtype == bool_: return data.astype(uint8), {'dtype': dtype(bool_)} elif data_dtype in FLOAT_DTYPES: return data.astype(float64), {'dtype': dtype(float64)} elif data_dtype in INT_DTYPES: return data.astype(int64), {'dtype': dtype(int64)} elif is_categorical(data_dtype): if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES): raise TypeError( "Invalid missing_value for categorical array.\n" "Expected None, bytes or unicode. Got %r." % missing_value, ) return LabelArray(data, missing_value), {} elif data_dtype.kind == 'M': try: outarray = data.astype('datetime64[ns]').view('int64') return outarray, {'dtype': datetime64ns_dtype} except OverflowError: raise ValueError( "AdjustedArray received a datetime array " "not representable as datetime64[ns].\n" "Min Date: %s\n" "Max Date: %s\n" % (data.min(), data.max()) ) else: raise TypeError( "Don't know how to construct AdjustedArray " "on data of type %s." % data_dtype ) class AdjustedArray(object): """ An array that can be iterated with a variable-length window, and which can provide different views on data from different perspectives. Parameters ---------- data : np.ndarray The baseline data values. mask : np.ndarray[bool] A mask indicating the locations of missing data. adjustments : dict[int -> list[Adjustment]] A dict mapping row indices to lists of adjustments to apply when we reach that row. missing_value : object A value to use to fill missing data in yielded windows. Should be a value coercible to `data.dtype`. """ __slots__ = ( '_data', '_view_kwargs', 'adjustments', 'missing_value', '__weakref__', ) def __init__(self, data, mask, adjustments, missing_value): self._data, self._view_kwargs = _normalize_array(data, missing_value) self.adjustments = adjustments self.missing_value = missing_value if mask is not NOMASK: if mask.dtype != bool_: raise ValueError("Mask must be a bool array.") if data.shape != mask.shape: raise ValueError( "Mask shape %s != data shape %s." % (mask.shape, data.shape), ) self._data[~mask] = self.missing_value @lazyval def data(self): """ The data stored in this array. """ return self._data.view(**self._view_kwargs) @lazyval def dtype(self): """ The dtype of the data stored in this array. """ return self._view_kwargs.get('dtype') or self._data.dtype @lazyval def _iterator_type(self): """ The iterator produced when `traverse` is called on this Array. """ if isinstance(self._data, LabelArray): return LabelWindow return CONCRETE_WINDOW_TYPES[self._data.dtype] def traverse(self, window_length, offset=0, perspective_offset=0): """ Produce an iterator rolling windows rows over our data. Each emitted window will have `window_length` rows. Parameters ---------- window_length : int The number of rows in each emitted window. offset : int, optional Number of rows to skip before the first window. Default is 0. perspective_offset : int, optional Number of rows past the end of the current window from which to "view" the underlying data. """ data = self._data.copy() _check_window_params(data, window_length) return self._iterator_type( data, self._view_kwargs, self.adjustments, offset, window_length, perspective_offset, rounding_places=None, ) def inspect(self): """ Return a string representation of the data stored in this array. """ return dedent( """\ Adjusted Array ({dtype}): Data: {data!r} Adjustments: {adjustments} """ ).format( dtype=self.dtype.name, data=self.data, adjustments=self.adjustments, ) def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value): if isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, ndarray): return AdjustedArray( ndarray_or_adjusted_array, NOMASK, {}, missing_value, ) else: raise TypeError( "Can't convert %s to AdjustedArray" % type(ndarray_or_adjusted_array).__name__ ) def ensure_ndarray(ndarray_or_adjusted_array): """ Return the input as a numpy ndarray. This is a no-op if the input is already an ndarray. If the input is an adjusted_array, this extracts a read-only view of its internal data buffer. Parameters ---------- ndarray_or_adjusted_array : numpy.ndarray | catalyst.data.adjusted_array Returns ------- out : The input, converted to an ndarray. """ if isinstance(ndarray_or_adjusted_array, ndarray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array.data else: raise TypeError( "Can't convert %s to ndarray" % type(ndarray_or_adjusted_array).__name__ ) def _check_window_params(data, window_length): """ Check that a window of length `window_length` is well-defined on `data`. Parameters ---------- data : np.ndarray[ndim=2] The array of data to check. window_length : int Length of the desired window. Returns ------- None Raises ------ WindowLengthNotPositive If window_length < 1. WindowLengthTooLong If window_length is greater than the number of rows in `data`. """ if window_length < 1: raise WindowLengthNotPositive(window_length=window_length) if window_length > data.shape[0]: raise WindowLengthTooLong( nrows=data.shape[0], window_length=window_length, )
#!/usr/bin/env python # Copyright 2017, Google, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. For more information, see the tutorial page at https://cloud.google.com/natural-language/docs/classify-text-tutorial. """ # [START language_classify_text_tutorial_imports] import argparse import io import json import os from google.cloud import language_v1 import numpy import six # [END language_classify_text_tutorial_imports] # [START language_classify_text_tutorial_classify] def classify(text, verbose=True): """Classify the input text into categories. """ language_client = language_v1.LanguageServiceClient() document = language_v1.Document( content=text, type_=language_v1.Document.Type.PLAIN_TEXT ) response = language_client.classify_text(request={"document": document}) categories = response.categories result = {} for category in categories: # Turn the categories into a dictionary of the form: # {category.name: category.confidence}, so that they can # be treated as a sparse vector. result[category.name] = category.confidence if verbose: print(text) for category in categories: print(u"=" * 20) print(u"{:<16}: {}".format("category", category.name)) print(u"{:<16}: {}".format("confidence", category.confidence)) return result # [END language_classify_text_tutorial_classify] # [START language_classify_text_tutorial_index] def index(path, index_file): """Classify each text file in a directory and write the results to the index_file. """ result = {} for filename in os.listdir(path): file_path = os.path.join(path, filename) if not os.path.isfile(file_path): continue try: with io.open(file_path, "r") as f: text = f.read() categories = classify(text, verbose=False) result[filename] = categories except Exception: print("Failed to process {}".format(file_path)) with io.open(index_file, "w", encoding="utf-8") as f: f.write(json.dumps(result, ensure_ascii=False)) print("Texts indexed in file: {}".format(index_file)) return result # [END language_classify_text_tutorial_index] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels are used as keys in the categories dictionary, whose values are confidence scores. The split_labels function splits the keys into individual levels while duplicating the confidence score, which allows a natural boost in how we calculate similarity when more levels are in common. Example: If we have x = {"/a/b/c": 0.5} y = {"/a/b": 0.5} z = {"/a": 0.5} Then x and y are considered more similar than y and z. """ _categories = {} for name, confidence in six.iteritems(categories): labels = [label for label in name.split("/") if label] for label in labels: _categories[label] = confidence return _categories def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) categories2 = split_labels(categories2) norm1 = numpy.linalg.norm(list(categories1.values())) norm2 = numpy.linalg.norm(list(categories2.values())) # Return the smallest possible similarity if either categories is empty. if norm1 == 0 or norm2 == 0: return 0.0 # Compute the cosine similarity. dot = 0.0 for label, confidence in six.iteritems(categories1): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) # [START language_classify_text_tutorial_query] def query(index_file, text, n_top=3): """Find the indexed files that are the most similar to the query text. """ with io.open(index_file, "r") as f: index = json.load(f) # Get the categories of the query text. query_categories = classify(text, verbose=False) similarities = [] for filename, categories in six.iteritems(index): similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) print("=" * 20) print("Query: {}\n".format(text)) for category, confidence in six.iteritems(query_categories): print("\tCategory: {}, confidence: {}".format(category, confidence)) print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: print("\tFilename: {}".format(filename)) print("\tSimilarity: {}".format(sim)) print("\n") return similarities # [END language_classify_text_tutorial_query] # [START language_classify_text_tutorial_query_category] def query_category(index_file, category_string, n_top=3): """Find the indexed files that are the most similar to the query label. The list of all available labels: https://cloud.google.com/natural-language/docs/categories """ with io.open(index_file, "r") as f: index = json.load(f) # Make the category_string into a dictionary so that it is # of the same format as what we get by calling classify. query_categories = {category_string: 1.0} similarities = [] for filename, categories in six.iteritems(index): similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) print("=" * 20) print("Query: {}\n".format(category_string)) print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: print("\tFilename: {}".format(filename)) print("\tSimilarity: {}".format(sim)) print("\n") return similarities # [END language_classify_text_tutorial_query_category] if __name__ == "__main__": parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) subparsers = parser.add_subparsers(dest="command") classify_parser = subparsers.add_parser("classify", help=classify.__doc__) classify_parser.add_argument( "text", help="The text to be classified. " "The text needs to have at least 20 tokens.", ) index_parser = subparsers.add_parser("index", help=index.__doc__) index_parser.add_argument( "path", help="The directory that contains " "text files to be indexed." ) index_parser.add_argument( "--index_file", help="Filename for the output JSON.", default="index.json" ) query_parser = subparsers.add_parser("query", help=query.__doc__) query_parser.add_argument("index_file", help="Path to the index JSON file.") query_parser.add_argument("text", help="Query text.") query_category_parser = subparsers.add_parser( "query-category", help=query_category.__doc__ ) query_category_parser.add_argument( "index_file", help="Path to the index JSON file." ) query_category_parser.add_argument("category", help="Query category.") args = parser.parse_args() if args.command == "classify": classify(args.text) if args.command == "index": index(args.path, args.index_file) if args.command == "query": query(args.index_file, args.text) if args.command == "query-category": query_category(args.index_file, args.category)
''' Frontend module ''' __all__ = ('frontend', ) from flask import Blueprint, render_template, url_for, redirect, abort, \ send_file, request, make_response, jsonify from web.job import read_job, read_logs, JobObj from werkzeug import secure_filename from web.config import qjob, r from os.path import exists from uuid import uuid4 import subprocess import re from time import time from os import mkdir from os.path import join, splitext from flaskext.wtf import Form, TextField, SubmitField, validators, \ FileField, SelectField, BooleanField frontend = Blueprint('frontend', __name__, static_folder='static', static_url_path='/frontend/static', template_folder='templates') class JobForm(Form): package_name = TextField('Package', [ validators.Required(), validators.Length(min=3, max=128)], description='org.kivy.touchtracer') package_version = TextField('Version', [ validators.Required(), validators.Length(min=1, max=64)], description='1.0') package_title = TextField('Name', [ validators.Required(), validators.Length(min=1, max=128)], description='Touchtracer') modules = TextField('Modules', description='pil kivy') directory = FileField('Application zipped with main.py', description='''You must create a zip of your application directory containing main.py in the zip root.''') package_permissions = TextField('Permissions', [ validators.Length(max=2048)], description='INTERNET') package_orientation = SelectField('Orientation', choices=[ ('landscape', 'Landscape'), ('portrait', 'Portrait')], default='landscape') package_icon = FileField('Icon') package_presplash = FileField('Presplash') emails = TextField('Send notification to', [ validators.Length(max=2048)], description='your@email.com') release = BooleanField('Release mode') submit = SubmitField('Submit') @frontend.route('/job/<uid>') def job(uid): job = read_job(uid) if job is None: return redirect(url_for('frontend.index')) # get the log associated to the job joblog = list(reversed(read_logs(uid))) progress = job.build_status if not progress: pprogress = 0 ptotal = 1 pcurrent = 0 status = "Waiting a builder" else: try: progress, status = progress[1:].split(']', 1) pcurrent, ptotal = progress.split('/') except ValueError: status = job.build_status pcurrent = 1 ptotal = 1 pprogress = int((int(pcurrent) / float(ptotal)) * 100.) status = status.strip().rstrip('.').capitalize() return render_template('frontend/job.html', job=job, joblog=joblog, pcurrent=pcurrent, ptotal=ptotal, pprogress=pprogress, status=status) @frontend.route('/job/<uid>/delete') def delete(uid): job = read_job(uid, 'package_name') if not job: abort(404) # delte job directory d = job.directory if d and len(d) > 10: subprocess.Popen(['rm', '-rf', d], shell=False).communicate() keys = r.keys('job:%s*' % uid) if keys: r.delete(*keys) keys = r.keys('log:%s*' % uid) if keys: r.delete(*keys) return redirect(url_for('frontend.index')) @frontend.route('/api/data/<uid>') def jobdata(uid): job = read_job(uid, 'directory', 'data_ext') if not job: return abort(404) print job r.set('job:%s:dt_started' % uid, time()) r.set('job:%s:is_started' % uid, 1) return send_file(job.data_fn) @frontend.route('/api/icon/<uid>') def jobicon(uid): job = read_job(uid, 'directory', 'have_icon') if not job or not job.have_icon or not exists(job.icon_fn): return abort(404) return send_file(job.icon_fn) @frontend.route('/api/presplash/<uid>') def jobpresplash(uid): job = read_job(uid, 'directory') if not job or not job.have_presplash or not exists(job.presplash_fn): return abort(404) return send_file(job.presplash_fn) @frontend.route('/api/push/<uid>', methods=['POST']) def jobpush(uid): job = read_job(uid) if not job: return abort(404) file = request.files['file'] if file and file.filename.rsplit('.', 1)[-1] == 'apk': filename = secure_filename(file.filename) file.save(join(job.directory, filename)) r.set('job:%s:apk' % uid, filename) r.set('job:%s:dt_done' % uid, time()) r.set('job:%s:is_done' % uid, 1) try: job.notify() except: pass return make_response('done') else: return abort(403) @frontend.route('/download/<uid>/<apk>') def download(uid, apk): job = read_job(uid, 'apk', 'directory', 'dt_done') if not job or not job.apk or not job.dt_done: return abort(404) return send_file(job.apk_fn) @frontend.route('/') def index(): form = JobForm() return render_template('frontend/index.html', form=form) @frontend.route('/faq') def faq(): return render_template('frontend/faq.html') @frontend.route('/about') def about(): return render_template('frontend/about.html') @frontend.route('/status') def status(): key = qjob.key queue_len = qjob._HotQueue__redis.llen(key) hosts_last_alive = r.keys('host:*:last_alive') hosts = [x.split(':')[1] for x in hosts_last_alive] stats = {} for host in hosts: stats[host] = { 'last_seen': int(time() - float(r.get('host:{}:last_alive'.format(host)))), 'status': r.get('host:{}:status'.format(host))} return render_template('frontend/status.html', queue_len=queue_len, stats=stats) def csplit(s): return ' '.join([x for x in re.split(r'[.; ]', s) if len(x)]) @frontend.route('/submit', methods=['POST']) def submit(): form = JobForm() if form.validate_on_submit(): fn = secure_filename(form.directory.file.filename) ext = splitext(fn)[-1] if splitext(fn)[-1] not in ( '.zip'):#, '.tbz', '.tar.gz', '.tbz2', '.tar.bz2'): return render_template('frontend/index.html', form=form, error='Invalid application directory package') # create a job uid = str(uuid4()) # fake job obj for getting path job = JobObj({'uid': uid}) jobkey = 'job:%s' % uid basekey = jobkey + ':' r.set(basekey + 'dt_added', time()) # create the job directory d = job.directory mkdir(d) form.directory.file.save(join(d, 'data%s' % ext)) if form.package_presplash.file: form.package_presplash.file.save(job.presplash_fn) r.set(basekey + 'have_presplash', 1) else: r.set(basekey + 'have_presplash', 0) if form.package_icon.file: form.package_icon.file.save(job.icon_fn) r.set(basekey + 'have_icon', 1) else: r.set(basekey + 'have_icon', 0) # add in the database r.set(basekey + 'package_name', form.package_name.data) r.set(basekey + 'package_version', form.package_version.data) r.set(basekey + 'package_title', form.package_title.data) r.set(basekey + 'package_orientation', form.package_orientation.data) r.set(basekey + 'package_permissions', form.package_permissions.data) r.set(basekey + 'modules', form.modules.data) r.set(basekey + 'emails', form.emails.data) r.set(basekey + 'data_ext', ext) r.set(basekey + 'is_release', 1 if form.release.data else 0) r.set(basekey + 'build_status', '') r.set(basekey + 'is_failed', 0) r.set(basekey + 'is_started', 0) r.set(basekey + 'is_done', 0) r.set(basekey + 'apk', '') # creation finished r.set(jobkey, uid) # not optimized, but reread it. job = read_job(uid) # submit a job in reddis qjob.put({ 'uid': job.uid, 'package_name': job.package_name, 'package_title': job.package_title, 'package_version': job.package_version, 'package_orientation': job.package_orientation, 'package_permissions': csplit(job.package_permissions), 'emails': csplit(job.emails), 'have_icon': job.have_icon, 'have_presplash': job.have_presplash, 'mode': 'release' if job.is_release == '1' else 'debug', 'modules': csplit(job.modules) }) if 'batch' in request.form: d = {'status': 'ok', 'uid': job.uid, 'url': url_for('frontend.job', uid=job.uid, _external=True)} return jsonify(**d) else: # redirect to the view job return redirect(url_for('frontend.job', uid=job.uid)) return render_template('frontend/index.html', form=form)
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''DDS texture loader. Reference: http://msdn2.microsoft.com/en-us/library/bb172993.aspx ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' from ctypes import * import struct from pyglet.gl import * from pyglet.image import CompressedImageData from pyglet.image import codecs from pyglet.image.codecs import s3tc from pyglet.compat import izip_longest as compat_izip_longest class DDSException(codecs.ImageDecodeException): exception_priority = 0 # dwFlags of DDSURFACEDESC2 DDSD_CAPS = 0x00000001 DDSD_HEIGHT = 0x00000002 DDSD_WIDTH = 0x00000004 DDSD_PITCH = 0x00000008 DDSD_PIXELFORMAT = 0x00001000 DDSD_MIPMAPCOUNT = 0x00020000 DDSD_LINEARSIZE = 0x00080000 DDSD_DEPTH = 0x00800000 # ddpfPixelFormat of DDSURFACEDESC2 DDPF_ALPHAPIXELS = 0x00000001 DDPF_FOURCC = 0x00000004 DDPF_RGB = 0x00000040 # dwCaps1 of DDSCAPS2 DDSCAPS_COMPLEX = 0x00000008 DDSCAPS_TEXTURE = 0x00001000 DDSCAPS_MIPMAP = 0x00400000 # dwCaps2 of DDSCAPS2 DDSCAPS2_CUBEMAP = 0x00000200 DDSCAPS2_CUBEMAP_POSITIVEX = 0x00000400 DDSCAPS2_CUBEMAP_NEGATIVEX = 0x00000800 DDSCAPS2_CUBEMAP_POSITIVEY = 0x00001000 DDSCAPS2_CUBEMAP_NEGATIVEY = 0x00002000 DDSCAPS2_CUBEMAP_POSITIVEZ = 0x00004000 DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x00008000 DDSCAPS2_VOLUME = 0x00200000 class _filestruct(object): def __init__(self, data): if len(data) < self.get_size(): raise DDSException('Not a DDS file') items = struct.unpack(self.get_format(), data) for field, value in compat_izip_longest(self._fields, items, fillvalue=None): setattr(self, field[0], value) def __repr__(self): name = self.__class__.__name__ return '%s(%s)' % \ (name, (', \n%s' % (' ' * (len(name) + 1))).join( \ ['%s = %s' % (field[0], repr(getattr(self, field[0]))) \ for field in self._fields])) @classmethod def get_format(cls): return '<' + ''.join([f[1] for f in cls._fields]) @classmethod def get_size(cls): return struct.calcsize(cls.get_format()) class DDSURFACEDESC2(_filestruct): _fields = [ ('dwMagic', '4s'), ('dwSize', 'I'), ('dwFlags', 'I'), ('dwHeight', 'I'), ('dwWidth', 'I'), ('dwPitchOrLinearSize', 'I'), ('dwDepth', 'I'), ('dwMipMapCount', 'I'), ('dwReserved1', '44s'), ('ddpfPixelFormat', '32s'), ('dwCaps1', 'I'), ('dwCaps2', 'I'), ('dwCapsReserved', '8s'), ('dwReserved2', 'I') ] def __init__(self, data): super(DDSURFACEDESC2, self).__init__(data) self.ddpfPixelFormat = DDPIXELFORMAT(self.ddpfPixelFormat) class DDPIXELFORMAT(_filestruct): _fields = [ ('dwSize', 'I'), ('dwFlags', 'I'), ('dwFourCC', '4s'), ('dwRGBBitCount', 'I'), ('dwRBitMask', 'I'), ('dwGBitMask', 'I'), ('dwBBitMask', 'I'), ('dwRGBAlphaBitMask', 'I') ] _compression_formats = { (b'DXT1', False): (GL_COMPRESSED_RGB_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgb), (b'DXT1', True): (GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgba), (b'DXT3', False): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3), (b'DXT3', True): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3), (b'DXT5', False): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5), (b'DXT5', True): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5), } def _check_error(): e = glGetError() if e != 0: print('GL error %d' % e) class DDSImageDecoder(codecs.ImageDecoder): def get_file_extensions(self): return ['.dds'] def decode(self, file, filename): header = file.read(DDSURFACEDESC2.get_size()) desc = DDSURFACEDESC2(header) if desc.dwMagic != b'DDS ' or desc.dwSize != 124: raise DDSException('Invalid DDS file (incorrect header).') width = desc.dwWidth height = desc.dwHeight mipmaps = 1 if desc.dwFlags & DDSD_DEPTH: raise DDSException('Volume DDS files unsupported') if desc.dwFlags & DDSD_MIPMAPCOUNT: mipmaps = desc.dwMipMapCount if desc.ddpfPixelFormat.dwSize != 32: raise DDSException('Invalid DDS file (incorrect pixel format).') if desc.dwCaps2 & DDSCAPS2_CUBEMAP: raise DDSException('Cubemap DDS files unsupported') if not desc.ddpfPixelFormat.dwFlags & DDPF_FOURCC: raise DDSException('Uncompressed DDS textures not supported.') has_alpha = desc.ddpfPixelFormat.dwRGBAlphaBitMask != 0 selector = (desc.ddpfPixelFormat.dwFourCC, has_alpha) if selector not in _compression_formats: raise DDSException('Unsupported texture compression %s' % \ desc.ddpfPixelFormat.dwFourCC) dformat, decoder = _compression_formats[selector] if dformat == GL_COMPRESSED_RGB_S3TC_DXT1_EXT: block_size = 8 else: block_size = 16 datas = [] w, h = width, height for i in range(mipmaps): if not w and not h: break if not w: w = 1 if not h: h = 1 size = ((w + 3) // 4) * ((h + 3) // 4) * block_size data = file.read(size) datas.append(data) w >>= 1 h >>= 1 image = CompressedImageData(width, height, dformat, datas[0], 'GL_EXT_texture_compression_s3tc', decoder) level = 0 for data in datas[1:]: level += 1 image.set_mipmap_data(level, data) return image def get_decoders(): return [DDSImageDecoder()] def get_encoders(): return []
import os import weakref import shutil import logging from watchdog.events import FileSystemEventHandler from .utilities import is_jsp_hook from .deploy import Deploy from . import sassc LOG = logging.getLogger(__name__) def normalize_path(path): return path.replace('/', os.sep) def contains_path(path, sub_path): return path.find(normalize_path(sub_path)) != -1 class OnDeployHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(OnDeployHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) def on_created(self, event): d = Deploy(self.hotterDeployer, event.src_path, self.hotterDeployer.tomcat_directory ) d.start() class OnTempDeployHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(OnTempDeployHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) def process_default(self, event): self.hotterDeployer._scan_temp() def on_created(self, event): self.process_default(event) def on_delete(self, event): self.process_default(event) class OnWebappsDeployHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(OnWebappsDeployHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) def process_default(self, event): self.hotterDeployer._scan_webapps() def on_created(self, event): self.process_default(event) def on_delete(self, event): self.process_default(event) class WorkSpaceHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(WorkSpaceHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) def dispatch(self, event): LOG.debug('WorkSpaceHandler::dispatch {0} {1}'.format(event.src_path, event)) path = event.src_path if (path.find('.svn') == -1 and contains_path(path, 'src/main/webapp/WEB-INF')): super(WorkSpaceHandler, self).dispatch(event) else: LOG.debug('WorkSpaceHandler::dispatch ignored {0}'.format(event.src_path)) def on_created(self, event): self.process_default(event) def on_delete(self, event): self.process_default(event) def on_modified(self, event): self.process_default(event) def process_default(self, event): if event.src_path.endswith('.xml'): LOG.debug('WorkSpaceHandler::process_default {0} {1}'.format(event.src_path, event)) self.hotterDeployer._scan_wd(self.hotterDeployer.workspace_directory) class OnFileChangedHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(OnFileChangedHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) extension = '.jsp,.js,.css,.tag,.vm,.jspf' self.extensions = extension.split(',') def dispatch(self, event): path = event.src_path print 'FF', os.sep LOG.debug('OnFileChangedHandler::dispatch {0} {1}'.format(event.src_path, event)) if path.find('.svn') == -1 and contains_path(path, 'src/main/webapp'): super(OnFileChangedHandler, self).dispatch(event) else: LOG.debug('OnFileChangedHandler::dispatch ignored {0}'.format(event.src_path)) def on_modified(self, event): cwd = event.src_path.split(normalize_path('/src/main/webapp'))[0] # Handle portlets portlet_name = self.hotterDeployer.portlets.get(cwd, None) if portlet_name: if all(not event.src_path.endswith(ext) for ext in self.extensions): return rel_path = event.src_path.split(cwd+normalize_path('/src/main/webapp'))[1][1:] jsp_hook = is_jsp_hook(cwd, rel_path) if jsp_hook: print 'JSP HOOK', rel_path rel_path = jsp_hook latest_subdir = self.hotterDeployer.liferay_dir dest_path = os.path.join(latest_subdir, rel_path) print dest_path if not os.path.exists(dest_path+'.hotterdeploy'): shutil.copy2(dest_path, dest_path+'.hotterdeploy') else: # Find latest dir latest_subdir = self.hotterDeployer.find_latest_temp_dir(portlet_name) if not latest_subdir: LOG.debug('- Skipped {0} ({1} not deployed)'.format(rel_path, portlet_name)) else: dest_path = os.path.join(latest_subdir, rel_path) LOG.info('- Copying {0} ({1}) [{2}]'.format(rel_path, portlet_name, os.path.basename(latest_subdir))) if not os.path.exists(os.path.dirname(dest_path)): os.makedirs(os.path.dirname(dest_path)) print 'dest_path', dest_path if rel_path.endswith('.js'): shutil.copy2(event.src_path, dest_path) if self.hotterDeployer.statics_directory: dest_path = os.path.join(self.hotterDeployer.statics_directory, portlet_name, rel_path) if not os.path.exists(os.path.dirname(dest_path)): os.makedirs(os.path.dirname(dest_path)) shutil.copy2(event.src_path, dest_path) self.hotterDeployer.trigger_browser_reload() elif rel_path.endswith('.css'): #shutil.copy2(event.src_path, dest_path) try: print 'compiling scss' data = sassc.compile(event.src_path) with open(dest_path, 'wb') as f: f.write(data) # TODO: copy output # /home/sueastside/Projects/CreDoc/static - credoc-theme print 'output for portlet ', portlet_name, rel_path if self.hotterDeployer.statics_directory: dest_path = os.path.join(self.hotterDeployer.statics_directory, portlet_name, rel_path) if not os.path.exists(os.path.dirname(dest_path)): os.makedirs(os.path.dirname(dest_path)) with open(dest_path, 'wb') as f: f.write(data) self.hotterDeployer.trigger_browser_reload(portlet_name+'/'+rel_path) except sassc.SassException as e: log.warn(e) else: self.hotterDeployer.trigger_browser_reload() shutil.copy2(event.src_path, dest_path) class OnClassChangedHandler(FileSystemEventHandler): def __init__(self, hotterDeployer): super(OnClassChangedHandler, self).__init__() self.hotterDeployer = weakref.proxy(hotterDeployer) def dispatch(self, event): path = event.src_path if (path.find('.svn') == -1 and path.find('target/classes') != -1 and os.path.isfile(path)): super(OnClassChangedHandler, self).dispatch(event) def on_modified(self, event): cwd = event.src_path.split('/target/classes')[0] # Handle portlets portlet_name = self.hotterDeployer.portlets.get(cwd, None) print 'portlet_name', portlet_name if portlet_name: rel_path = event.src_path.split(cwd+'/target/classes')[1][1:] print rel_path latest_subdir = self.hotterDeployer.find_latest_temp_dir(portlet_name) print 'rel_path', rel_path if not latest_subdir: print '- Skipped {0} ({1} not deployed)'.format(rel_path, portlet_name) else: dest_path = os.path.join(latest_subdir, 'WEB-INF', 'classes', rel_path) print '- Copying {0} ({1}) [{2}]'.format(rel_path, portlet_name, os.path.basename(latest_subdir)) if not os.path.exists(os.path.dirname(dest_path)): os.makedirs(os.path.dirname(dest_path)) print 'OnClassChangedHandler::on_modified', dest_path shutil.copy2(event.src_path, dest_path) # self.hotterDeployer.trigger_browser_reload() from threading import Timer t = Timer(1.2, self.hotterDeployer.trigger_browser_reload) t.start()
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ce_netstream_export version_added: "2.4" short_description: Manages netstream export on HUAWEI CloudEngine switches. description: - Configure NetStream flow statistics exporting and versions for exported packets on HUAWEI CloudEngine switches. author: Zhijin Zhou (@QijunPan) notes: options: type: description: - Specifies NetStream feature. required: true choices: ['ip', 'vxlan'] source_ip: description: - Specifies source address which can be IPv6 or IPv4 of the exported NetStream packet. host_ip: description: - Specifies destination address which can be IPv6 or IPv4 of the exported NetStream packet. host_port: description: - Specifies the destination UDP port number of the exported packets. The value is an integer that ranges from 1 to 65535. host_vpn: description: - Specifies the VPN instance of the exported packets carrying flow statistics. Ensure the VPN instance has been created on the device. version: description: - Sets the version of exported packets. choices: ['5', '9'] as_option: description: - Specifies the AS number recorded in the statistics as the original or the peer AS number. choices: ['origin', 'peer'] bgp_nexthop: description: - Configures the statistics to carry BGP next hop information. Currently, only V9 supports the exported packets carrying BGP next hop information. choices: ['enable','disable'] default: 'disable' state: description: - Manage the state of the resource. choices: ['present','absent'] default: present ''' EXAMPLES = ''' - name: netstream export module test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Configures the source address for the exported packets carrying IPv4 flow statistics. ce_netstream_export: type: ip source_ip: 192.8.2.2 provider: "{{ cli }}" - name: Configures the source IP address for the exported packets carrying VXLAN flexible flow statistics. ce_netstream_export: type: vxlan source_ip: 192.8.2.3 provider: "{{ cli }}" - name: Configures the destination IP address and destination UDP port number for the exported packets carrying IPv4 flow statistics. ce_netstream_export: type: ip host_ip: 192.8.2.4 host_port: 25 host_vpn: test provider: "{{ cli }}" - name: Configures the destination IP address and destination UDP port number for the exported packets carrying VXLAN flexible flow statistics. ce_netstream_export: type: vxlan host_ip: 192.8.2.5 host_port: 26 host_vpn: test provider: "{{ cli }}" - name: Configures the version number of the exported packets carrying IPv4 flow statistics. ce_netstream_export: type: ip version: 9 as_option: origin bgp_nexthop: enable provider: "{{ cli }}" - name: Configures the version for the exported packets carrying VXLAN flexible flow statistics. ce_netstream_export: type: vxlan version: 9 provider: "{{ cli }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: { "as_option": "origin", "bgp_nexthop": "enable", "host_ip": "192.8.5.6", "host_port": "26", "host_vpn": "test", "source_ip": "192.8.2.5", "state": "present", "type": "ip", "version": "9" } existing: description: k/v pairs of existing attributes on the device returned: always type: dict sample: { "as_option": null, "bgp_nexthop": "disable", "host_ip": null, "host_port": null, "host_vpn": null, "source_ip": null, "type": "ip", "version": null } end_state: description: k/v pairs of end attributes on the device returned: always type: dict sample: { "as_option": "origin", "bgp_nexthop": "enable", "host_ip": "192.8.5.6", "host_port": "26", "host_vpn": "test", "source_ip": "192.8.2.5", "type": "ip", "version": "9" } updates: description: command list sent to the device returned: always type: list sample: [ "netstream export ip source 192.8.2.5", "netstream export ip host 192.8.5.6 26 vpn-instance test", "netstream export ip version 9 origin-as bgp-nexthop" ] changed: description: check to see if a change was made on the device returned: always type: bool sample: true ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import exec_command, load_config from ansible.module_utils.network.cloudengine.ce import ce_argument_spec def is_ipv4_addr(ip_addr): """check ipaddress validate""" rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.' rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])' ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$') return bool(re.match(ipv4_regex, ip_addr)) def is_config_exist(cmp_cfg, test_cfg): """is configuration exist""" test_cfg_tmp = test_cfg + ' *$' + '|' + test_cfg + ' *\n' obj = re.compile(test_cfg_tmp) result = re.findall(obj, cmp_cfg) if not result: return False return True class NetstreamExport(object): """Manage NetStream export""" def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.__init_module__() # NetStream export configuration parameters self.type = self.module.params['type'] self.source_ip = self.module.params['source_ip'] self.host_ip = self.module.params['host_ip'] self.host_port = self.module.params['host_port'] self.host_vpn = self.module.params['host_vpn'] self.version = self.module.params['version'] self.as_option = self.module.params['as_option'] self.bgp_netxhop = self.module.params['bgp_nexthop'] self.state = self.module.params['state'] self.commands = list() self.config = None self.exist_conf = dict() # state self.changed = False self.updates_cmd = list() self.results = dict() self.proposed = dict() self.existing = dict() self.end_state = dict() def __init_module__(self): """init module""" self.module = AnsibleModule( argument_spec=self.spec, supports_check_mode=True) def cli_load_config(self, commands): """load config by cli""" if not self.module.check_mode: load_config(self.module, commands) def get_netstream_config(self): """get current netstream configuration""" cmd = "display current-configuration | include ^netstream export" rc, out, err = exec_command(self.module, cmd) if rc != 0: self.module.fail_json(msg=err) config = str(out).strip() return config def get_existing(self): """get existing config""" self.existing = dict(type=self.type, source_ip=self.exist_conf['source_ip'], host_ip=self.exist_conf['host_ip'], host_port=self.exist_conf['host_port'], host_vpn=self.exist_conf['host_vpn'], version=self.exist_conf['version'], as_option=self.exist_conf['as_option'], bgp_nexthop=self.exist_conf['bgp_netxhop']) def get_proposed(self): """get proposed config""" self.proposed = dict(type=self.type, source_ip=self.source_ip, host_ip=self.host_ip, host_port=self.host_port, host_vpn=self.host_vpn, version=self.version, as_option=self.as_option, bgp_nexthop=self.bgp_netxhop, state=self.state) def get_end_state(self): """get end config""" self.get_config_data() self.end_state = dict(type=self.type, source_ip=self.exist_conf['source_ip'], host_ip=self.exist_conf['host_ip'], host_port=self.exist_conf['host_port'], host_vpn=self.exist_conf['host_vpn'], version=self.exist_conf['version'], as_option=self.exist_conf['as_option'], bgp_nexthop=self.exist_conf['bgp_netxhop']) def show_result(self): """show result""" self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def cli_add_command(self, command, undo=False): """add command to self.update_cmd and self.commands""" if undo and command.lower() not in ["quit", "return"]: cmd = "undo " + command else: cmd = command self.commands.append(cmd) # set to device if command.lower() not in ["quit", "return"]: if cmd not in self.updates_cmd: self.updates_cmd.append(cmd) # show updates result def config_nets_export_src_addr(self): """Configures the source address for the exported packets""" if is_ipv4_addr(self.source_ip): if self.type == 'ip': cmd = "netstream export ip source %s" % self.source_ip else: cmd = "netstream export vxlan inner-ip source %s" % self.source_ip else: if self.type == 'ip': cmd = "netstream export ip source ipv6 %s" % self.source_ip else: cmd = "netstream export vxlan inner-ip source ipv6 %s" % self.source_ip if is_config_exist(self.config, cmd): self.exist_conf['source_ip'] = self.source_ip if self.state == 'present': return else: undo = True else: if self.state == 'absent': return else: undo = False self.cli_add_command(cmd, undo) def config_nets_export_host_addr(self): """Configures the destination IP address and destination UDP port number""" if is_ipv4_addr(self.host_ip): if self.type == 'ip': cmd = 'netstream export ip host %s %s' % (self.host_ip, self.host_port) else: cmd = 'netstream export vxlan inner-ip host %s %s' % (self.host_ip, self.host_port) else: if self.type == 'ip': cmd = 'netstream export ip host ipv6 %s %s' % (self.host_ip, self.host_port) else: cmd = 'netstream export vxlan inner-ip host ipv6 %s %s' % (self.host_ip, self.host_port) if self.host_vpn: cmd += " vpn-instance %s" % self.host_vpn if is_config_exist(self.config, cmd): self.exist_conf['host_ip'] = self.host_ip self.exist_conf['host_port'] = self.host_port if self.host_vpn: self.exist_conf['host_vpn'] = self.host_vpn if self.state == 'present': return else: undo = True else: if self.state == 'absent': return else: undo = False self.cli_add_command(cmd, undo) def config_nets_export_vxlan_ver(self): """Configures the version for the exported packets carrying VXLAN flexible flow statistics""" cmd = 'netstream export vxlan inner-ip version 9' if is_config_exist(self.config, cmd): self.exist_conf['version'] = self.version if self.state == 'present': return else: undo = True else: if self.state == 'absent': return else: undo = False self.cli_add_command(cmd, undo) def config_nets_export_ip_ver(self): """Configures the version number of the exported packets carrying IPv4 flow statistics""" cmd = 'netstream export ip version %s' % self.version if self.version == '5': if self.as_option == 'origin': cmd += ' origin-as' elif self.as_option == 'peer': cmd += ' peer-as' else: if self.as_option == 'origin': cmd += ' origin-as' elif self.as_option == 'peer': cmd += ' peer-as' if self.bgp_netxhop == 'enable': cmd += ' bgp-nexthop' if cmd == 'netstream export ip version 5': cmd_tmp = "netstream export ip version" if cmd_tmp in self.config: if self.state == 'present': self.cli_add_command(cmd, False) else: self.exist_conf['version'] = self.version return if is_config_exist(self.config, cmd): self.exist_conf['version'] = self.version self.exist_conf['as_option'] = self.as_option self.exist_conf['bgp_netxhop'] = self.bgp_netxhop if self.state == 'present': return else: undo = True else: if self.state == 'absent': return else: undo = False self.cli_add_command(cmd, undo) def config_netstream_export(self): """configure netstream export""" if self.commands: self.cli_load_config(self.commands) self.changed = True def check_params(self): """Check all input params""" if not self.type: self.module.fail_json(msg='Error: The value of type cannot be empty.') if self.host_port: if not self.host_port.isdigit(): self.module.fail_json(msg='Error: Host port is invalid.') if int(self.host_port) < 1 or int(self.host_port) > 65535: self.module.fail_json(msg='Error: Host port is not in the range from 1 to 65535.') if self.host_vpn: if self.host_vpn == '_public_': self.module.fail_json( msg='Error: The host vpn name _public_ is reserved.') if len(self.host_vpn) < 1 or len(self.host_vpn) > 31: self.module.fail_json(msg='Error: The host vpn name length is not in the range from 1 to 31.') if self.type == 'vxlan' and self.version == '5': self.module.fail_json(msg="Error: When type is vxlan, version must be 9.") if self.type == 'ip' and self.version == '5' and self.bgp_netxhop == 'enable': self.module.fail_json(msg="Error: When type=ip and version=5, bgp_netxhop is not supported.") if (self.host_ip and not self.host_port) or (self.host_port and not self.host_ip): self.module.fail_json(msg="Error: host_ip and host_port must both exist or not exist.") def get_config_data(self): """get configuration commands and current configuration""" self.exist_conf['type'] = self.type self.exist_conf['source_ip'] = None self.exist_conf['host_ip'] = None self.exist_conf['host_port'] = None self.exist_conf['host_vpn'] = None self.exist_conf['version'] = None self.exist_conf['as_option'] = None self.exist_conf['bgp_netxhop'] = 'disable' self.config = self.get_netstream_config() if self.type and self.source_ip: self.config_nets_export_src_addr() if self.type and self.host_ip and self.host_port: self.config_nets_export_host_addr() if self.type == 'vxlan' and self.version == '9': self.config_nets_export_vxlan_ver() if self.type == 'ip' and self.version: self.config_nets_export_ip_ver() def work(self): """execute task""" self.check_params() self.get_proposed() self.get_config_data() self.get_existing() self.config_netstream_export() self.get_end_state() self.show_result() def main(): """main function entry""" argument_spec = dict( type=dict(required=True, type='str', choices=['ip', 'vxlan']), source_ip=dict(required=False, type='str'), host_ip=dict(required=False, type='str'), host_port=dict(required=False, type='str'), host_vpn=dict(required=False, type='str'), version=dict(required=False, type='str', choices=['5', '9']), as_option=dict(required=False, type='str', choices=['origin', 'peer']), bgp_nexthop=dict(required=False, type='str', choices=['enable', 'disable'], default='disable'), state=dict(choices=['absent', 'present'], default='present', required=False) ) argument_spec.update(ce_argument_spec) netstream_export = NetstreamExport(argument_spec) netstream_export.work() if __name__ == '__main__': main()
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.contrib.auth.models import User from django.contrib.messages import add_message from django.contrib.messages import constants as messages from django.contrib.messages.views import SuccessMessageMixin from django.db.models import F, Count, Q, Case, When, IntegerField, ExpressionWrapper # testing from django.http import HttpResponseRedirect, JsonResponse from django.shortcuts import get_object_or_404 from django.urls import reverse from django.utils import timezone from django.views import generic from messaging.views import find_group from notifications.models import NotifType, notify_everybody, notify from .forms import RpgForm, RpgCreateForm from .models import Rpg, Tag from .templatetags.rpg_tags import can_manage from users.models import Member class Index(generic.ListView): template_name = 'rpgs/index.html' model = Rpg context_object_name = 'rpgs' paginate_by = 10 def get_queryset(self): Rpg.objects.filter(is_in_the_past=False, finishes__lt=timezone.now()).update(is_in_the_past=True) queryset = Rpg.objects.filter(unlisted=False) if self.request.GET.get('tag', False): queryset = queryset.filter(tags__name__iexact=self.request.GET['tag']) if self.request.GET.get('user', False): try: user = Member.objects.get(equiv_user__username__iexact=self.request.GET.get('user')) except Member.DoesNotExist: pass else: queryset = queryset.filter(Q(members=user) | Q(creator=user) | Q(game_masters=user)).distinct() if not self.request.GET.get('showfinished', False): queryset = queryset.filter(is_in_the_past=False) queryset = queryset.annotate( n_remain=ExpressionWrapper(F('players_wanted') - Count('members'), output_field=IntegerField())).annotate( full=Case(When(n_remain=0, then=1), default=0, output_field=IntegerField())) if self.request.GET.get('showfull', False) or not self.request.GET.get('isfilter', False): # second filter needed to detect if the filtered form has been submitted # as checkbox False is transmitted by omitting the attribute (stupid!) pass else: queryset = queryset.filter(full__exact=0) return queryset.order_by('-pinned', 'full', '-created_at') class Detail(generic.DetailView): template_name = 'rpgs/detail.html' model = Rpg context_object_name = 'rpg' class Create(LoginRequiredMixin, generic.CreateView): template_name = 'rpgs/create.html' model = Rpg form_class = RpgCreateForm def form_valid(self, form): form.instance.creator = self.request.user.member response = super().form_valid(form) for i in form.cleaned_data['tag_list']: tag, new = Tag.objects.get_or_create(name=i) self.object.tags.add(tag) self.object.game_masters.add(self.request.user.member) self.object.save() add_message(self.request, messages.SUCCESS, "Event successfully created") notify_everybody(NotifType.RPG_CREATE, "New Events are available for signup.", reverse('rpgs:detail', kwargs={'pk': self.object.id}), merge_key=self.object.id) return response class Update(LoginRequiredMixin, UserPassesTestMixin, generic.UpdateView): template_name = 'rpgs/edit.html' model = Rpg form_class = RpgForm def test_func(self): rpg = get_object_or_404(Rpg, id=self.kwargs['pk']) return can_manage(self.request.user.member, rpg) def form_valid(self, form): response = super().form_valid(form) for i in self.object.tags.all(): if i.name.lower() not in form.cleaned_data['tag_list']: self.object.tags.remove(i) for i in form.cleaned_data['tag_list']: tag, new = Tag.objects.get_or_create(name=i) if tag not in self.object.tags.all(): self.object.tags.add(tag) self.object.save() add_message(self.request, messages.SUCCESS, "Event updated") return response class Delete(LoginRequiredMixin, UserPassesTestMixin, SuccessMessageMixin, generic.DeleteView): template_name = 'rpgs/delete.html' model = Rpg success_message = "Event Deleted" def get_success_url(self): return reverse('rpgs:index') def test_func(self): rpg = get_object_or_404(Rpg, pk=self.kwargs['pk']) return can_manage(self.request.user.member, rpg) class Join(LoginRequiredMixin, generic.View): def __init__(self, **kwargs): super().__init__(**kwargs) def post(self, request, *args, **kwargs): rpg = get_object_or_404(Rpg, pk=self.kwargs['pk']) if self.request.user.member in rpg.members.all(): add_message(self.request, messages.WARNING, "You are already in that event!") elif self.request.user.member in rpg.game_masters.all(): add_message(self.request, messages.WARNING, "You are running that event!") elif rpg.members.count() >= rpg.players_wanted: add_message(self.request, messages.WARNING, "Sorry, the event is already full") elif len(self.request.user.member.discord.strip()) == 0 and rpg.discord: add_message(self.request, messages.WARNING, "This event is being held on discord. " "Please add a discord account to your profile and try again.") else: rpg.members.add(self.request.user.member) notify(rpg.creator, NotifType.RPG_JOIN, 'User {} joined your game "{}"!'.format(self.request.user.username, rpg.title), reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) add_message(self.request, messages.SUCCESS, "You have successfully joined that event") return HttpResponseRedirect(reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) class Leave(LoginRequiredMixin, generic.View): def __init__(self, **kwargs): super().__init__(**kwargs) def post(self, *args, **kwargs): rpg = get_object_or_404(Rpg, pk=self.kwargs['pk']) if self.request.user.member not in rpg.members.all(): add_message(self.request, messages.WARNING, "You are not currently in that event!") else: rpg.members.remove(self.request.user.member) notify(rpg.creator, NotifType.RPG_JOIN, 'User {} left your game "{}"!'.format(self.request.user.username, rpg.title), reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) add_message(self.request, messages.SUCCESS, "You have successfully left that event") return HttpResponseRedirect(reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) class Kick(LoginRequiredMixin, UserPassesTestMixin, generic.View): def __init__(self, **kwargs): self.rpg = None super().__init__(**kwargs) def test_func(self): self.rpg = get_object_or_404(Rpg, id=self.kwargs.get('pk')) return can_manage(self.request.user.member, self.rpg) def post(self, *args, **kwargs): kicked = User.objects.get(member__id=self.request.POST.get('user-to-remove')).member self.rpg.members.remove(kicked) notify(kicked, NotifType.RPG_KICK, 'You were kicked from the game "{}".'.format(self.rpg.title), reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) add_message(self.request, messages.SUCCESS, "{} Removed from Event".format(kicked.equiv_user.username)) return HttpResponseRedirect(reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) class AddMember(LoginRequiredMixin, UserPassesTestMixin, generic.View): def __init__(self, **kwargs): self.rpg = None super().__init__(**kwargs) def test_func(self): self.rpg = get_object_or_404(Rpg, id=self.kwargs['pk']) return can_manage(self.request.user.member, self.rpg) def post(self, *args, **kwargs): try: added = User.objects.get(username__iexact=self.request.POST.get('username')).member except User.DoesNotExist: add_message(self.request, messages.WARNING, "Username not found") else: if self.rpg.members.count() >= self.rpg.players_wanted: add_message(self.request, messages.WARNING, "Game is full") else: self.rpg.members.add(added) notify(added, NotifType.RPG_KICK, 'You were added to the game "{}".'.format(self.rpg.title), reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) add_message(self.request, messages.SUCCESS, "{} Added to Event".format(added.equiv_user.username)) return HttpResponseRedirect(reverse('rpgs:detail', kwargs={'pk': self.kwargs['pk']})) class MessageGroup(LoginRequiredMixin, UserPassesTestMixin, generic.RedirectView): def __init__(self, **kwargs): super().__init__(**kwargs) self.rpg = None def test_func(self): self.rpg = get_object_or_404(Rpg, id=self.kwargs['pk']) return self.request.user.member in self.rpg.members.all() or self.request.user.member in self.rpg.game_masters.all() def get_redirect_url(self, *args, **kwargs): members = {*self.rpg.members.all(), *self.rpg.game_masters.all()} group = find_group(*members, name=self.rpg.title) add_message(self.request, messages.WARNING, "Please note, if the people in the event change you will need to " "create a new messaging group.") return reverse("message:message_thread", kwargs={'pk': group.pk}) def alltags(request): tags = [x.name for x in Tag.objects.all().order_by('name')] return JsonResponse(tags, safe=False)
from django.shortcuts import get_object_or_404 from django.views.generic import (TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView, FormView, RedirectView) from django.core.urlresolvers import reverse, reverse_lazy from django.core.exceptions import ObjectDoesNotExist from django.http import HttpResponseRedirect, Http404 from django.contrib import messages from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import logout as auth_logout, login as auth_login from django.contrib.sites.models import get_current_site from django.conf import settings from oscar.core.loading import get_model from oscar.views.generic import PostActionMixin from oscar.apps.customer.utils import get_password_reset_url from oscar.core.loading import get_class, get_profile_class, get_classes from oscar.core.compat import get_user_model from . import signals PageTitleMixin, RegisterUserMixin = get_classes( 'customer.mixins', ['PageTitleMixin', 'RegisterUserMixin']) Dispatcher = get_class('customer.utils', 'Dispatcher') EmailAuthenticationForm, EmailUserCreationForm, OrderSearchForm = get_classes( 'customer.forms', ['EmailAuthenticationForm', 'EmailUserCreationForm', 'OrderSearchForm']) PasswordChangeForm = get_class('customer.forms', 'PasswordChangeForm') ProfileForm, ConfirmPasswordForm = get_classes( 'customer.forms', ['ProfileForm', 'ConfirmPasswordForm']) UserAddressForm = get_class('address.forms', 'UserAddressForm') Order = get_model('order', 'Order') Line = get_model('basket', 'Line') Basket = get_model('basket', 'Basket') UserAddress = get_model('address', 'UserAddress') Email = get_model('customer', 'Email') UserAddress = get_model('address', 'UserAddress') ProductAlert = get_model('customer', 'ProductAlert') CommunicationEventType = get_model('customer', 'CommunicationEventType') User = get_user_model() # ======= # Account # ======= class AccountSummaryView(RedirectView): """ View that exists for legacy reasons and customisability. It commonly gets called when the user clicks on "Account" in the navbar, and can be overriden to determine to what sub-page the user is directed without having to change a lot of templates. """ url = reverse_lazy(settings.OSCAR_ACCOUNTS_REDIRECT_URL) class AccountRegistrationView(RegisterUserMixin, FormView): form_class = EmailUserCreationForm template_name = 'customer/registration.html' redirect_field_name = 'next' def get(self, request, *args, **kwargs): if request.user.is_authenticated(): return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) return super(AccountRegistrationView, self).get( request, *args, **kwargs) def get_logged_in_redirect(self): return reverse('customer:summary') def get_form_kwargs(self): kwargs = super(AccountRegistrationView, self).get_form_kwargs() kwargs['initial'] = { 'email': self.request.GET.get('email', ''), 'redirect_url': self.request.GET.get(self.redirect_field_name, '') } kwargs['host'] = self.request.get_host() return kwargs def get_context_data(self, *args, **kwargs): ctx = super(AccountRegistrationView, self).get_context_data( *args, **kwargs) ctx['cancel_url'] = self.request.META.get('HTTP_REFERER', None) return ctx def form_valid(self, form): self.register_user(form) return HttpResponseRedirect( form.cleaned_data['redirect_url']) class AccountAuthView(RegisterUserMixin, TemplateView): """ This is actually a slightly odd double form view """ template_name = 'customer/login_registration.html' login_prefix, registration_prefix = 'login', 'registration' login_form_class = EmailAuthenticationForm registration_form_class = EmailUserCreationForm redirect_field_name = 'next' def get(self, request, *args, **kwargs): if request.user.is_authenticated(): return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) return super(AccountAuthView, self).get( request, *args, **kwargs) def get_context_data(self, *args, **kwargs): ctx = super(AccountAuthView, self).get_context_data(*args, **kwargs) ctx.update(kwargs) # Don't pass request as we don't want to trigger validation of BOTH # forms. if 'login_form' not in kwargs: ctx['login_form'] = self.get_login_form() if 'registration_form' not in kwargs: ctx['registration_form'] = self.get_registration_form() return ctx def get_login_form(self, request=None): return self.login_form_class(**self.get_login_form_kwargs(request)) def get_login_form_kwargs(self, request=None): kwargs = {} kwargs['host'] = self.request.get_host() kwargs['prefix'] = self.login_prefix kwargs['initial'] = { 'redirect_url': self.request.GET.get(self.redirect_field_name, ''), } if request and request.method in ('POST', 'PUT'): kwargs.update({ 'data': request.POST, 'files': request.FILES, }) return kwargs def get_registration_form(self, request=None): return self.registration_form_class( **self.get_registration_form_kwargs(request)) def get_registration_form_kwargs(self, request=None): kwargs = {} kwargs['host'] = self.request.get_host() kwargs['prefix'] = self.registration_prefix kwargs['initial'] = { 'redirect_url': self.request.GET.get(self.redirect_field_name, ''), } if request and request.method in ('POST', 'PUT'): kwargs.update({ 'data': request.POST, 'files': request.FILES, }) return kwargs def post(self, request, *args, **kwargs): # Use the name of the submit button to determine which form to validate if u'login_submit' in request.POST: return self.validate_login_form() elif u'registration_submit' in request.POST: return self.validate_registration_form() return self.get(request) def validate_login_form(self): form = self.get_login_form(self.request) if form.is_valid(): user = form.get_user() # Grab a reference to the session ID before logging in old_session_key = self.request.session.session_key auth_login(self.request, form.get_user()) # Raise signal robustly (we don't want exceptions to crash the # request handling). We use a custom signal as we want to track the # session key before calling login (which cycles the session ID). signals.user_logged_in.send_robust( sender=self, request=self.request, user=user, old_session_key=old_session_key) return HttpResponseRedirect(form.cleaned_data['redirect_url']) ctx = self.get_context_data(login_form=form) return self.render_to_response(ctx) def validate_registration_form(self): form = self.get_registration_form(self.request) if form.is_valid(): self.register_user(form) return HttpResponseRedirect(form.cleaned_data['redirect_url']) ctx = self.get_context_data(registration_form=form) return self.render_to_response(ctx) class LogoutView(RedirectView): url = settings.OSCAR_HOMEPAGE permanent = False def get(self, request, *args, **kwargs): auth_logout(request) response = super(LogoutView, self).get(request, *args, **kwargs) for cookie in settings.OSCAR_COOKIES_DELETE_ON_LOGOUT: response.delete_cookie(cookie) return response # ============= # Profile # ============= class ProfileView(PageTitleMixin, TemplateView): template_name = 'customer/profile/profile.html' page_title = _('Profile') active_tab = 'profile' def get_context_data(self, **kwargs): ctx = super(ProfileView, self).get_context_data(**kwargs) ctx['profile_fields'] = self.get_profile_fields(self.request.user) return ctx def get_profile_fields(self, user): field_data = [] # Check for custom user model for field_name in User._meta.additional_fields: field_data.append( self.get_model_field_data(user, field_name)) # Check for profile class profile_class = get_profile_class() if profile_class: try: profile = profile_class.objects.get(user=user) except ObjectDoesNotExist: profile = profile_class(user=user) field_names = [f.name for f in profile._meta.local_fields] for field_name in field_names: if field_name in ('user', 'id'): continue field_data.append( self.get_model_field_data(profile, field_name)) return field_data def get_model_field_data(self, model_class, field_name): """ Extract the verbose name and value for a model's field value """ field = model_class._meta.get_field(field_name) if field.choices: value = getattr(model_class, 'get_%s_display' % field_name)() else: value = getattr(model_class, field_name) return { 'name': getattr(field, 'verbose_name'), 'value': value, } class ProfileUpdateView(PageTitleMixin, FormView): form_class = ProfileForm template_name = 'customer/profile/profile_form.html' communication_type_code = 'EMAIL_CHANGED' page_title = _('Edit Profile') active_tab = 'profile' def get_form_kwargs(self): kwargs = super(ProfileUpdateView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def form_valid(self, form): # Grab current user instance before we save form. We may need this to # send a warning email if the email address is changed. try: old_user = User.objects.get(id=self.request.user.id) except User.DoesNotExist: old_user = None form.save() # We have to look up the email address from the form's # cleaned data because the object created by form.save() can # either be a user or profile instance depending whether a profile # class has been specified by the AUTH_PROFILE_MODULE setting. new_email = form.cleaned_data['email'] if old_user and new_email != old_user.email: # Email address has changed - send a confirmation email to the old # address including a password reset link in case this is a # suspicious change. ctx = { 'user': self.request.user, 'site': get_current_site(self.request), 'reset_url': get_password_reset_url(old_user), 'new_email': new_email, } msgs = CommunicationEventType.objects.get_and_render( code=self.communication_type_code, context=ctx) Dispatcher().dispatch_user_messages(old_user, msgs) messages.success(self.request, _("Profile updated")) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return reverse('customer:profile-view') class ProfileDeleteView(PageTitleMixin, FormView): form_class = ConfirmPasswordForm template_name = 'customer/profile/profile_delete.html' page_title = _('Delete profile') active_tab = 'profile' success_url = settings.OSCAR_HOMEPAGE def get_form_kwargs(self): kwargs = super(ProfileDeleteView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def form_valid(self, form): self.request.user.delete() messages.success( self.request, _("Your profile has now been deleted. Thanks for using the site.")) return HttpResponseRedirect(self.get_success_url()) class ChangePasswordView(PageTitleMixin, FormView): form_class = PasswordChangeForm template_name = 'customer/profile/change_password_form.html' communication_type_code = 'PASSWORD_CHANGED' page_title = _('Change Password') active_tab = 'profile' def get_form_kwargs(self): kwargs = super(ChangePasswordView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def form_valid(self, form): form.save() messages.success(self.request, _("Password updated")) ctx = { 'user': self.request.user, 'site': get_current_site(self.request), 'reset_url': get_password_reset_url(self.request.user), } msgs = CommunicationEventType.objects.get_and_render( code=self.communication_type_code, context=ctx) Dispatcher().dispatch_user_messages(self.request.user, msgs) return HttpResponseRedirect(self.get_success_url()) def get_success_url(self): return reverse('customer:profile-view') # ============= # Email history # ============= class EmailHistoryView(PageTitleMixin, ListView): context_object_name = "emails" template_name = 'customer/email/email_list.html' paginate_by = 20 page_title = _('Email History') active_tab = 'emails' def get_queryset(self): return Email._default_manager.filter(user=self.request.user) class EmailDetailView(PageTitleMixin, DetailView): """Customer email""" template_name = "customer/email/email_detail.html" context_object_name = 'email' active_tab = 'emails' def get_object(self, queryset=None): return get_object_or_404(Email, user=self.request.user, id=self.kwargs['email_id']) def get_page_title(self): """Append email subject to page title""" return u'%s: %s' % (_('Email'), self.object.subject) # ============= # Order history # ============= class OrderHistoryView(PageTitleMixin, ListView): """ Customer order history """ context_object_name = "orders" template_name = 'customer/order/order_list.html' paginate_by = 20 model = Order form_class = OrderSearchForm page_title = _('Order History') active_tab = 'orders' def get(self, request, *args, **kwargs): if 'date_from' in request.GET: self.form = self.form_class(self.request.GET) if not self.form.is_valid(): self.object_list = self.get_queryset() ctx = self.get_context_data(object_list=self.object_list) return self.render_to_response(ctx) data = self.form.cleaned_data # If the user has just entered an order number, try and look it up # and redirect immediately to the order detail page. if data['order_number'] and not (data['date_to'] or data['date_from']): try: order = Order.objects.get( number=data['order_number'], user=self.request.user) except Order.DoesNotExist: pass else: return HttpResponseRedirect( reverse('customer:order', kwargs={'order_number': order.number})) else: self.form = self.form_class() return super(OrderHistoryView, self).get(request, *args, **kwargs) def get_queryset(self): qs = self.model._default_manager.filter(user=self.request.user) if self.form.is_bound and self.form.is_valid(): qs = qs.filter(**self.form.get_filters()) return qs def get_context_data(self, *args, **kwargs): ctx = super(OrderHistoryView, self).get_context_data(*args, **kwargs) ctx['form'] = self.form return ctx class OrderDetailView(PageTitleMixin, PostActionMixin, DetailView): model = Order active_tab = 'orders' def get_template_names(self): return ["customer/order/order_detail.html"] def get_page_title(self): """ Order number as page title """ return u'%s #%s' % (_('Order'), self.object.number) def get_object(self, queryset=None): return get_object_or_404(self.model, user=self.request.user, number=self.kwargs['order_number']) def do_reorder(self, order): # noqa (too complex (10)) """ 'Re-order' a previous order. This puts the contents of the previous order into your basket """ # Collect lines to be added to the basket and any warnings for lines # that are no longer available. basket = self.request.basket lines_to_add = [] warnings = [] for line in order.lines.all(): is_available, reason = line.is_available_to_reorder( basket, self.request.strategy) if is_available: lines_to_add.append(line) else: warnings.append(reason) # Check whether the number of items in the basket won't exceed the # maximum. total_quantity = sum([line.quantity for line in lines_to_add]) is_quantity_allowed, reason = basket.is_quantity_allowed( total_quantity) if not is_quantity_allowed: messages.warning(self.request, reason) self.response = HttpResponseRedirect( reverse('customer:order-list')) return # Add any warnings for warning in warnings: messages.warning(self.request, warning) for line in lines_to_add: options = [] for attribute in line.attributes.all(): if attribute.option: options.append({ 'option': attribute.option, 'value': attribute.value}) basket.add_product(line.product, line.quantity, options) if len(lines_to_add) > 0: self.response = HttpResponseRedirect(reverse('basket:summary')) messages.info( self.request, _("All available lines from order %(number)s " "have been added to your basket") % {'number': order.number}) else: self.response = HttpResponseRedirect( reverse('customer:order-list')) messages.warning( self.request, _("It is not possible to re-order order %(number)s " "as none of its lines are available to purchase") % {'number': order.number}) class OrderLineView(PostActionMixin, DetailView): """Customer order line""" def get_object(self, queryset=None): order = get_object_or_404(Order, user=self.request.user, number=self.kwargs['order_number']) return order.lines.get(id=self.kwargs['line_id']) def do_reorder(self, line): self.response = HttpResponseRedirect( reverse('customer:order', args=(int(self.kwargs['order_number']),))) basket = self.request.basket line_available_to_reorder, reason = line.is_available_to_reorder( basket, self.request.strategy) if not line_available_to_reorder: messages.warning(self.request, reason) return # We need to pass response to the get_or_create... method # as a new basket might need to be created self.response = HttpResponseRedirect(reverse('basket:summary')) # Convert line attributes into basket options options = [] for attribute in line.attributes.all(): if attribute.option: options.append({'option': attribute.option, 'value': attribute.value}) basket.add_product(line.product, line.quantity, options) if line.quantity > 1: msg = _("%(qty)d copies of '%(product)s' have been added to your" " basket") % { 'qty': line.quantity, 'product': line.product} else: msg = _("'%s' has been added to your basket") % line.product messages.info(self.request, msg) class AnonymousOrderDetailView(DetailView): model = Order template_name = "customer/anon_order.html" def get_object(self, queryset=None): # Check URL hash matches that for order to prevent spoof attacks order = get_object_or_404(self.model, user=None, number=self.kwargs['order_number']) if self.kwargs['hash'] != order.verification_hash(): raise Http404() return order # ------------ # Address book # ------------ class AddressListView(PageTitleMixin, ListView): """Customer address book""" context_object_name = "addresses" template_name = 'customer/address/address_list.html' paginate_by = 40 active_tab = 'addresses' page_title = _('Address Book') def get_queryset(self): """Return customer's addresses""" return UserAddress._default_manager.filter(user=self.request.user) class AddressCreateView(PageTitleMixin, CreateView): form_class = UserAddressForm model = UserAddress template_name = 'customer/address/address_form.html' active_tab = 'addresses' page_title = _('Add a new address') def get_form_kwargs(self): kwargs = super(AddressCreateView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def get_context_data(self, **kwargs): ctx = super(AddressCreateView, self).get_context_data(**kwargs) ctx['title'] = _('Add a new address') return ctx def get_success_url(self): messages.success(self.request, _("Address '%s' created") % self.object.summary) return reverse('customer:address-list') class AddressUpdateView(PageTitleMixin, UpdateView): form_class = UserAddressForm model = UserAddress template_name = 'customer/address/address_form.html' active_tab = 'addresses' page_title = _('Edit address') def get_form_kwargs(self): kwargs = super(AddressUpdateView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def get_context_data(self, **kwargs): ctx = super(AddressUpdateView, self).get_context_data(**kwargs) ctx['title'] = _('Edit address') return ctx def get_queryset(self): return self.request.user.addresses.all() def get_success_url(self): messages.success(self.request, _("Address '%s' updated") % self.object.summary) return reverse('customer:address-list') class AddressDeleteView(PageTitleMixin, DeleteView): model = UserAddress template_name = "customer/address/address_delete.html" page_title = _('Delete address?') active_tab = 'addresses' context_object_name = 'address' def get_queryset(self): return UserAddress._default_manager.filter(user=self.request.user) def get_success_url(self): messages.success(self.request, _("Address '%s' deleted") % self.object.summary) return reverse('customer:address-list') class AddressChangeStatusView(RedirectView): """ Sets an address as default_for_(billing|shipping) """ url = reverse_lazy('customer:address-list') def get(self, request, pk=None, action=None, *args, **kwargs): address = get_object_or_404(UserAddress, user=self.request.user, pk=pk) # We don't want the user to set an address as the default shipping # address, though they should be able to set it as their billing # address. if address.country.is_shipping_country: setattr(address, 'is_%s' % action, True) elif action == 'default_for_billing': setattr(address, 'is_default_for_billing', True) else: messages.error(request, _('We do not ship to this country')) address.save() return super(AddressChangeStatusView, self).get( request, *args, **kwargs)
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainGui2.ui' # # Created: Thu May 29 11:56:44 2014 # by: pyside-uic 0.2.14 running on PySide 1.2.0 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_mainDialog(object): def setupUi(self, mainDialog): mainDialog.setObjectName("mainDialog") mainDialog.resize(1061, 639) mainDialog.setStyleSheet("") self.frame = QtGui.QFrame(mainDialog) self.frame.setGeometry(QtCore.QRect(0, 0, 1061, 641)) self.frame.setStyleSheet("\n" "QFrame{\n" "font: 12pt \"Calibri\";\n" "background-color: rgb(208, 208, 208);\n" "}\n" "\n" "\n" "QLabel{\n" "\n" "}\n" "QToolButton{\n" "font: 12pt \"Calibri\";\n" "}") self.frame.setFrameShape(QtGui.QFrame.StyledPanel) self.frame.setFrameShadow(QtGui.QFrame.Raised) self.frame.setObjectName("frame") self.groupBoxInfo = QtGui.QGroupBox(self.frame) self.groupBoxInfo.setGeometry(QtCore.QRect(20, 160, 211, 271)) self.groupBoxInfo.setStyleSheet("font: 12pt \"Calibri\";") self.groupBoxInfo.setFlat(False) self.groupBoxInfo.setObjectName("groupBoxInfo") self.qlTech = QtGui.QLabel(self.groupBoxInfo) self.qlTech.setGeometry(QtCore.QRect(10, 30, 81, 21)) self.qlTech.setObjectName("qlTech") self.comboBoxTech = QtGui.QComboBox(self.groupBoxInfo) self.comboBoxTech.setGeometry(QtCore.QRect(100, 30, 101, 22)) self.comboBoxTech.setObjectName("comboBoxTech") self.qlBand = QtGui.QLabel(self.groupBoxInfo) self.qlBand.setGeometry(QtCore.QRect(10, 60, 81, 21)) self.qlBand.setObjectName("qlBand") self.comboBoxBand = QtGui.QComboBox(self.groupBoxInfo) self.comboBoxBand.setGeometry(QtCore.QRect(100, 60, 101, 22)) self.comboBoxBand.setObjectName("comboBoxBand") self.qlDLchTitle = QtGui.QLabel(self.groupBoxInfo) self.qlDLchTitle.setGeometry(QtCore.QRect(10, 120, 81, 21)) self.qlDLchTitle.setObjectName("qlDLchTitle") self.qlULchTitle = QtGui.QLabel(self.groupBoxInfo) self.qlULchTitle.setGeometry(QtCore.QRect(10, 150, 81, 21)) self.qlULchTitle.setObjectName("qlULchTitle") self.qlPDMTitle = QtGui.QLabel(self.groupBoxInfo) self.qlPDMTitle.setGeometry(QtCore.QRect(10, 180, 81, 21)) self.qlPDMTitle.setObjectName("qlPDMTitle") self.qlSMPSTitle = QtGui.QLabel(self.groupBoxInfo) self.qlSMPSTitle.setGeometry(QtCore.QRect(10, 210, 81, 21)) self.qlSMPSTitle.setObjectName("qlSMPSTitle") self.qlBWTitle = QtGui.QLabel(self.groupBoxInfo) self.qlBWTitle.setGeometry(QtCore.QRect(10, 90, 81, 21)) self.qlBWTitle.setObjectName("qlBWTitle") self.qlBW = QtGui.QLabel(self.groupBoxInfo) self.qlBW.setGeometry(QtCore.QRect(120, 90, 81, 21)) self.qlBW.setStyleSheet("") self.qlBW.setObjectName("qlBW") self.qlDLch = QtGui.QLabel(self.groupBoxInfo) self.qlDLch.setGeometry(QtCore.QRect(120, 120, 81, 21)) self.qlDLch.setObjectName("qlDLch") self.qlULch = QtGui.QLabel(self.groupBoxInfo) self.qlULch.setGeometry(QtCore.QRect(120, 150, 81, 21)) self.qlULch.setObjectName("qlULch") self.qlICQTitle = QtGui.QLabel(self.groupBoxInfo) self.qlICQTitle.setGeometry(QtCore.QRect(10, 240, 81, 21)) self.qlICQTitle.setObjectName("qlICQTitle") self.qlePDM = QtGui.QLineEdit(self.groupBoxInfo) self.qlePDM.setGeometry(QtCore.QRect(100, 180, 51, 20)) self.qlePDM.setInputMethodHints(QtCore.Qt.ImhNone) self.qlePDM.setMaxLength(3) self.qlePDM.setAlignment(QtCore.Qt.AlignCenter) self.qlePDM.setObjectName("qlePDM") self.btnSetPDM = QtGui.QToolButton(self.groupBoxInfo) self.btnSetPDM.setGeometry(QtCore.QRect(160, 180, 41, 21)) self.btnSetPDM.setObjectName("btnSetPDM") self.qleSMPS = QtGui.QLineEdit(self.groupBoxInfo) self.qleSMPS.setGeometry(QtCore.QRect(100, 210, 51, 20)) self.qleSMPS.setInputMethodHints(QtCore.Qt.ImhNone) self.qleSMPS.setMaxLength(4) self.qleSMPS.setAlignment(QtCore.Qt.AlignCenter) self.qleSMPS.setObjectName("qleSMPS") self.btnSetSMPS = QtGui.QToolButton(self.groupBoxInfo) self.btnSetSMPS.setGeometry(QtCore.QRect(160, 210, 41, 21)) self.btnSetSMPS.setObjectName("btnSetSMPS") self.qleICQ = QtGui.QLineEdit(self.groupBoxInfo) self.qleICQ.setGeometry(QtCore.QRect(100, 240, 51, 20)) self.qleICQ.setInputMethodHints(QtCore.Qt.ImhNone) self.qleICQ.setMaxLength(2) self.qleICQ.setAlignment(QtCore.Qt.AlignCenter) self.qleICQ.setObjectName("qleICQ") self.btnSetICQ = QtGui.QToolButton(self.groupBoxInfo) self.btnSetICQ.setGeometry(QtCore.QRect(160, 240, 41, 21)) self.btnSetICQ.setObjectName("btnSetICQ") self.groupBoxSweep = QtGui.QGroupBox(self.frame) self.groupBoxSweep.setGeometry(QtCore.QRect(20, 450, 211, 151)) self.groupBoxSweep.setStyleSheet("font: 12pt \"Calibri\";") self.groupBoxSweep.setObjectName("groupBoxSweep") self.qlSweepPDMStart = QtGui.QLabel(self.groupBoxSweep) self.qlSweepPDMStart.setGeometry(QtCore.QRect(10, 30, 81, 21)) self.qlSweepPDMStart.setObjectName("qlSweepPDMStart") self.qlSweepPDMEnd = QtGui.QLabel(self.groupBoxSweep) self.qlSweepPDMEnd.setGeometry(QtCore.QRect(10, 60, 81, 21)) self.qlSweepPDMEnd.setObjectName("qlSweepPDMEnd") self.spinBoxPDMStart = QtGui.QSpinBox(self.groupBoxSweep) self.spinBoxPDMStart.setGeometry(QtCore.QRect(110, 30, 71, 22)) self.spinBoxPDMStart.setObjectName("spinBoxPDMStart") self.spinBoxPDMEnd = QtGui.QSpinBox(self.groupBoxSweep) self.spinBoxPDMEnd.setGeometry(QtCore.QRect(110, 60, 71, 22)) self.spinBoxPDMEnd.setObjectName("spinBoxPDMEnd") self.btnStartSweep = QtGui.QToolButton(self.groupBoxSweep) self.btnStartSweep.setGeometry(QtCore.QRect(50, 100, 91, 31)) self.btnStartSweep.setObjectName("btnStartSweep") self.btnChMinus = QtGui.QToolButton(self.frame) self.btnChMinus.setGeometry(QtCore.QRect(590, 500, 61, 61)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/icon/left.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnChMinus.setIcon(icon) self.btnChMinus.setIconSize(QtCore.QSize(32, 32)) self.btnChMinus.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnChMinus.setObjectName("btnChMinus") self.btnPDMPlus = QtGui.QToolButton(self.frame) self.btnPDMPlus.setGeometry(QtCore.QRect(660, 460, 61, 61)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(":/icon/up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnPDMPlus.setIcon(icon1) self.btnPDMPlus.setIconSize(QtCore.QSize(32, 32)) self.btnPDMPlus.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnPDMPlus.setObjectName("btnPDMPlus") self.btnChPlus = QtGui.QToolButton(self.frame) self.btnChPlus.setGeometry(QtCore.QRect(730, 500, 61, 61)) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(":/icon/right.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnChPlus.setIcon(icon2) self.btnChPlus.setIconSize(QtCore.QSize(32, 32)) self.btnChPlus.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnChPlus.setObjectName("btnChPlus") self.btnPDMMinus = QtGui.QToolButton(self.frame) self.btnPDMMinus.setGeometry(QtCore.QRect(660, 540, 61, 61)) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(":/icon/down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnPDMMinus.setIcon(icon3) self.btnPDMMinus.setIconSize(QtCore.QSize(32, 32)) self.btnPDMMinus.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnPDMMinus.setObjectName("btnPDMMinus") self.groupBoxMisc = QtGui.QGroupBox(self.frame) self.groupBoxMisc.setGeometry(QtCore.QRect(20, 20, 211, 131)) self.groupBoxMisc.setStyleSheet("font: 12pt \"Calibri\";") self.groupBoxMisc.setObjectName("groupBoxMisc") self.qlGPIBTitle = QtGui.QLabel(self.groupBoxMisc) self.qlGPIBTitle.setGeometry(QtCore.QRect(10, 30, 91, 21)) self.qlGPIBTitle.setObjectName("qlGPIBTitle") self.qlGPIBTitle_2 = QtGui.QLabel(self.groupBoxMisc) self.qlGPIBTitle_2.setGeometry(QtCore.QRect(10, 60, 91, 21)) self.qlGPIBTitle_2.setObjectName("qlGPIBTitle_2") self.qleGPIB = QtGui.QLineEdit(self.groupBoxMisc) self.qleGPIB.setGeometry(QtCore.QRect(110, 30, 41, 20)) self.qleGPIB.setInputMethodHints(QtCore.Qt.ImhNone) self.qleGPIB.setMaxLength(3) self.qleGPIB.setAlignment(QtCore.Qt.AlignCenter) self.qleGPIB.setObjectName("qleGPIB") self.btnSetGPIB = QtGui.QToolButton(self.groupBoxMisc) self.btnSetGPIB.setGeometry(QtCore.QRect(160, 30, 41, 21)) self.btnSetGPIB.setObjectName("btnSetGPIB") self.qleGPIB_2 = QtGui.QLineEdit(self.groupBoxMisc) self.qleGPIB_2.setGeometry(QtCore.QRect(110, 60, 41, 20)) self.qleGPIB_2.setInputMethodHints(QtCore.Qt.ImhNone) self.qleGPIB_2.setMaxLength(3) self.qleGPIB_2.setAlignment(QtCore.Qt.AlignCenter) self.qleGPIB_2.setObjectName("qleGPIB_2") self.btnSetGPIB_2 = QtGui.QToolButton(self.groupBoxMisc) self.btnSetGPIB_2.setGeometry(QtCore.QRect(160, 60, 41, 21)) self.btnSetGPIB_2.setObjectName("btnSetGPIB_2") self.btnSetCOM = QtGui.QToolButton(self.groupBoxMisc) self.btnSetCOM.setGeometry(QtCore.QRect(160, 90, 41, 21)) self.btnSetCOM.setObjectName("btnSetCOM") self.qleCOM = QtGui.QLineEdit(self.groupBoxMisc) self.qleCOM.setGeometry(QtCore.QRect(110, 90, 41, 20)) self.qleCOM.setInputMethodHints(QtCore.Qt.ImhNone) self.qleCOM.setMaxLength(3) self.qleCOM.setAlignment(QtCore.Qt.AlignCenter) self.qleCOM.setObjectName("qleCOM") self.qlCOMTitle = QtGui.QLabel(self.groupBoxMisc) self.qlCOMTitle.setGeometry(QtCore.QRect(10, 90, 91, 21)) self.qlCOMTitle.setObjectName("qlCOMTitle") self.frame_3 = QtGui.QFrame(self.frame) self.frame_3.setGeometry(QtCore.QRect(250, 460, 131, 31)) self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel) self.frame_3.setFrameShadow(QtGui.QFrame.Raised) self.frame_3.setObjectName("frame_3") self.btnTxOff = QtGui.QToolButton(self.frame_3) self.btnTxOff.setGeometry(QtCore.QRect(70, 0, 61, 31)) self.btnTxOff.setCheckable(True) self.btnTxOff.setAutoExclusive(True) self.btnTxOff.setObjectName("btnTxOff") self.btnTxOn = QtGui.QToolButton(self.frame_3) self.btnTxOn.setGeometry(QtCore.QRect(0, 0, 61, 31)) self.btnTxOn.setCheckable(True) self.btnTxOn.setAutoExclusive(True) self.btnTxOn.setObjectName("btnTxOn") self.tableWidget = QtGui.QTableWidget(self.frame) self.tableWidget.setGeometry(QtCore.QRect(250, 50, 761, 391)) self.tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.tableWidget.setStyleSheet("font: 10pt \"Calibri\";\n" "background-color: rgb(225, 225, 225);") self.tableWidget.setRowCount(15) self.tableWidget.setColumnCount(12) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(12) self.tableWidget.setRowCount(15) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(3, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(4, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(5, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(6, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(7, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(8, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(9, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(10, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(11, item) item = QtGui.QTableWidgetItem() self.tableWidget.setItem(0, 0, item) self.tableWidget.horizontalHeader().setCascadingSectionResizes(False) self.tableWidget.horizontalHeader().setDefaultSectionSize(60) self.tableWidget.verticalHeader().setVisible(True) self.tableWidget.verticalHeader().setDefaultSectionSize(25) self.qlMessage = QtGui.QLabel(self.frame) self.qlMessage.setGeometry(QtCore.QRect(260, 20, 591, 16)) self.qlMessage.setObjectName("qlMessage") self.groupBoxSMPSStep = QtGui.QGroupBox(self.frame) self.groupBoxSMPSStep.setGeometry(QtCore.QRect(800, 450, 101, 151)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.groupBoxSMPSStep.setFont(font) self.groupBoxSMPSStep.setObjectName("groupBoxSMPSStep") self.qleSMPSStep = QtGui.QLineEdit(self.groupBoxSMPSStep) self.qleSMPSStep.setGeometry(QtCore.QRect(10, 30, 81, 21)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.qleSMPSStep.setFont(font) self.qleSMPSStep.setInputMethodHints(QtCore.Qt.ImhNone) self.qleSMPSStep.setMaxLength(4) self.qleSMPSStep.setAlignment(QtCore.Qt.AlignCenter) self.qleSMPSStep.setObjectName("qleSMPSStep") self.btnSMPSPlus = QtGui.QToolButton(self.groupBoxSMPSStep) self.btnSMPSPlus.setGeometry(QtCore.QRect(10, 60, 81, 31)) self.btnSMPSPlus.setObjectName("btnSMPSPlus") self.btnSMPSMinus = QtGui.QToolButton(self.groupBoxSMPSStep) self.btnSMPSMinus.setGeometry(QtCore.QRect(10, 100, 81, 31)) self.btnSMPSMinus.setObjectName("btnSMPSMinus") self.groupBoxICQStep = QtGui.QGroupBox(self.frame) self.groupBoxICQStep.setGeometry(QtCore.QRect(910, 450, 101, 151)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.groupBoxICQStep.setFont(font) self.groupBoxICQStep.setObjectName("groupBoxICQStep") self.qleICQStep = QtGui.QLineEdit(self.groupBoxICQStep) self.qleICQStep.setGeometry(QtCore.QRect(10, 30, 81, 21)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.qleICQStep.setFont(font) self.qleICQStep.setInputMethodHints(QtCore.Qt.ImhNone) self.qleICQStep.setMaxLength(2) self.qleICQStep.setAlignment(QtCore.Qt.AlignCenter) self.qleICQStep.setObjectName("qleICQStep") self.btnICQPlus = QtGui.QToolButton(self.groupBoxICQStep) self.btnICQPlus.setGeometry(QtCore.QRect(10, 60, 81, 31)) self.btnICQPlus.setObjectName("btnICQPlus") self.btnICQMinus = QtGui.QToolButton(self.groupBoxICQStep) self.btnICQMinus.setGeometry(QtCore.QRect(10, 100, 81, 31)) self.btnICQMinus.setObjectName("btnICQMinus") self.groupBoxPA = QtGui.QGroupBox(self.frame) self.groupBoxPA.setGeometry(QtCore.QRect(250, 540, 131, 61)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.groupBoxPA.setFont(font) self.groupBoxPA.setObjectName("groupBoxPA") self.btnSetPARange = QtGui.QToolButton(self.groupBoxPA) self.btnSetPARange.setGeometry(QtCore.QRect(70, 30, 51, 21)) self.btnSetPARange.setObjectName("btnSetPARange") self.qlePARange = QtGui.QLineEdit(self.groupBoxPA) self.qlePARange.setGeometry(QtCore.QRect(10, 30, 51, 20)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.qlePARange.setFont(font) self.qlePARange.setInputMethodHints(QtCore.Qt.ImhNone) self.qlePARange.setMaxLength(1) self.qlePARange.setAlignment(QtCore.Qt.AlignCenter) self.qlePARange.setObjectName("qlePARange") self.frame_2 = QtGui.QFrame(self.frame) self.frame_2.setGeometry(QtCore.QRect(250, 500, 131, 31)) self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel) self.frame_2.setFrameShadow(QtGui.QFrame.Raised) self.frame_2.setObjectName("frame_2") self.btnHPM = QtGui.QToolButton(self.frame_2) self.btnHPM.setGeometry(QtCore.QRect(0, 0, 61, 31)) self.btnHPM.setCheckable(True) self.btnHPM.setAutoExclusive(True) self.btnHPM.setObjectName("btnHPM") self.btnLPM = QtGui.QToolButton(self.frame_2) self.btnLPM.setGeometry(QtCore.QRect(70, 0, 61, 31)) self.btnLPM.setCheckable(True) self.btnLPM.setAutoExclusive(True) self.btnLPM.setObjectName("btnLPM") self.groupBoxMIPI = QtGui.QGroupBox(self.frame) self.groupBoxMIPI.setGeometry(QtCore.QRect(400, 450, 171, 151)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.groupBoxMIPI.setFont(font) self.groupBoxMIPI.setObjectName("groupBoxMIPI") self.qlMIPISlaveIDTitle = QtGui.QLabel(self.groupBoxMIPI) self.qlMIPISlaveIDTitle.setGeometry(QtCore.QRect(90, 40, 71, 21)) self.qlMIPISlaveIDTitle.setObjectName("qlMIPISlaveIDTitle") self.qleMIPISlaveID = QtGui.QLineEdit(self.groupBoxMIPI) self.qleMIPISlaveID.setGeometry(QtCore.QRect(90, 70, 61, 20)) font = QtGui.QFont() font.setFamily("Calibri") font.setPointSize(12) self.qleMIPISlaveID.setFont(font) self.qleMIPISlaveID.setInputMethodHints(QtCore.Qt.ImhNone) self.qleMIPISlaveID.setMaxLength(1) self.qleMIPISlaveID.setAlignment(QtCore.Qt.AlignCenter) self.qleMIPISlaveID.setObjectName("qleMIPISlaveID") self.btnSetMIPISlaveID = QtGui.QToolButton(self.groupBoxMIPI) self.btnSetMIPISlaveID.setGeometry(QtCore.QRect(90, 100, 61, 31)) self.btnSetMIPISlaveID.setObjectName("btnSetMIPISlaveID") self.qlMIPIChTitle = QtGui.QLabel(self.groupBoxMIPI) self.qlMIPIChTitle.setGeometry(QtCore.QRect(10, 40, 71, 21)) self.qlMIPIChTitle.setObjectName("qlMIPIChTitle") self.qcbMIPICh = QtGui.QComboBox(self.groupBoxMIPI) self.qcbMIPICh.setGeometry(QtCore.QRect(10, 70, 61, 22)) self.qcbMIPICh.setObjectName("qcbMIPICh") self.qlAuthor = QtGui.QLabel(self.frame) self.qlAuthor.setGeometry(QtCore.QRect(890, 620, 171, 16)) self.qlAuthor.setStyleSheet("color: red") self.qlAuthor.setObjectName("qlAuthor") self.actionCopy = QtGui.QAction(mainDialog) self.actionCopy.setObjectName("actionCopy") self.retranslateUi(mainDialog) QtCore.QMetaObject.connectSlotsByName(mainDialog) mainDialog.setTabOrder(self.comboBoxTech, self.comboBoxBand) mainDialog.setTabOrder(self.comboBoxBand, self.spinBoxPDMStart) mainDialog.setTabOrder(self.spinBoxPDMStart, self.spinBoxPDMEnd) mainDialog.setTabOrder(self.spinBoxPDMEnd, self.btnStartSweep) mainDialog.setTabOrder(self.btnStartSweep, self.btnHPM) mainDialog.setTabOrder(self.btnHPM, self.btnLPM) mainDialog.setTabOrder(self.btnLPM, self.btnTxOn) mainDialog.setTabOrder(self.btnTxOn, self.btnTxOff) mainDialog.setTabOrder(self.btnTxOff, self.btnChMinus) mainDialog.setTabOrder(self.btnChMinus, self.btnChPlus) mainDialog.setTabOrder(self.btnChPlus, self.btnPDMPlus) mainDialog.setTabOrder(self.btnPDMPlus, self.btnPDMMinus) def retranslateUi(self, mainDialog): mainDialog.setWindowTitle(QtGui.QApplication.translate("mainDialog", "RF Tuning Tool", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxInfo.setTitle(QtGui.QApplication.translate("mainDialog", "Info", None, QtGui.QApplication.UnicodeUTF8)) self.qlTech.setText(QtGui.QApplication.translate("mainDialog", "Technology", None, QtGui.QApplication.UnicodeUTF8)) self.qlBand.setText(QtGui.QApplication.translate("mainDialog", "Band", None, QtGui.QApplication.UnicodeUTF8)) self.qlDLchTitle.setText(QtGui.QApplication.translate("mainDialog", "DL channel", None, QtGui.QApplication.UnicodeUTF8)) self.qlULchTitle.setText(QtGui.QApplication.translate("mainDialog", "UL channel", None, QtGui.QApplication.UnicodeUTF8)) self.qlPDMTitle.setText(QtGui.QApplication.translate("mainDialog", "PDM", None, QtGui.QApplication.UnicodeUTF8)) self.qlSMPSTitle.setText(QtGui.QApplication.translate("mainDialog", "SMPS", None, QtGui.QApplication.UnicodeUTF8)) self.qlBWTitle.setText(QtGui.QApplication.translate("mainDialog", "Bandwidth", None, QtGui.QApplication.UnicodeUTF8)) self.qlBW.setText(QtGui.QApplication.translate("mainDialog", "5MHz", None, QtGui.QApplication.UnicodeUTF8)) self.qlDLch.setText(QtGui.QApplication.translate("mainDialog", "10700", None, QtGui.QApplication.UnicodeUTF8)) self.qlULch.setText(QtGui.QApplication.translate("mainDialog", "9750", None, QtGui.QApplication.UnicodeUTF8)) self.qlICQTitle.setText(QtGui.QApplication.translate("mainDialog", "ICQ", None, QtGui.QApplication.UnicodeUTF8)) self.qlePDM.setInputMask(QtGui.QApplication.translate("mainDialog", "000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qlePDM.setText(QtGui.QApplication.translate("mainDialog", "80", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetPDM.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qleSMPS.setInputMask(QtGui.QApplication.translate("mainDialog", "0000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleSMPS.setText(QtGui.QApplication.translate("mainDialog", "3400", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetSMPS.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qleICQ.setInputMask(QtGui.QApplication.translate("mainDialog", "hh; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleICQ.setText(QtGui.QApplication.translate("mainDialog", "0", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetICQ.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxSweep.setTitle(QtGui.QApplication.translate("mainDialog", "Sweep", None, QtGui.QApplication.UnicodeUTF8)) self.qlSweepPDMStart.setText(QtGui.QApplication.translate("mainDialog", "Start PDM", None, QtGui.QApplication.UnicodeUTF8)) self.qlSweepPDMEnd.setText(QtGui.QApplication.translate("mainDialog", "End PDM", None, QtGui.QApplication.UnicodeUTF8)) self.btnStartSweep.setText(QtGui.QApplication.translate("mainDialog", "Start Sweep", None, QtGui.QApplication.UnicodeUTF8)) self.btnChMinus.setText(QtGui.QApplication.translate("mainDialog", "-ch", None, QtGui.QApplication.UnicodeUTF8)) self.btnChMinus.setShortcut(QtGui.QApplication.translate("mainDialog", "Left", None, QtGui.QApplication.UnicodeUTF8)) self.btnPDMPlus.setText(QtGui.QApplication.translate("mainDialog", "+PDM", None, QtGui.QApplication.UnicodeUTF8)) self.btnPDMPlus.setShortcut(QtGui.QApplication.translate("mainDialog", "Up", None, QtGui.QApplication.UnicodeUTF8)) self.btnChPlus.setText(QtGui.QApplication.translate("mainDialog", "+ch", None, QtGui.QApplication.UnicodeUTF8)) self.btnChPlus.setShortcut(QtGui.QApplication.translate("mainDialog", "Right", None, QtGui.QApplication.UnicodeUTF8)) self.btnPDMMinus.setText(QtGui.QApplication.translate("mainDialog", "-PDM", None, QtGui.QApplication.UnicodeUTF8)) self.btnPDMMinus.setShortcut(QtGui.QApplication.translate("mainDialog", "Down", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxMisc.setTitle(QtGui.QApplication.translate("mainDialog", "Misc", None, QtGui.QApplication.UnicodeUTF8)) self.qlGPIBTitle.setText(QtGui.QApplication.translate("mainDialog", "Callbox GPIB", None, QtGui.QApplication.UnicodeUTF8)) self.qlGPIBTitle_2.setText(QtGui.QApplication.translate("mainDialog", "Power Supply", None, QtGui.QApplication.UnicodeUTF8)) self.qleGPIB.setInputMask(QtGui.QApplication.translate("mainDialog", "000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleGPIB.setText(QtGui.QApplication.translate("mainDialog", "14", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetGPIB.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qleGPIB_2.setInputMask(QtGui.QApplication.translate("mainDialog", "000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleGPIB_2.setText(QtGui.QApplication.translate("mainDialog", "8", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetGPIB_2.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetCOM.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qleCOM.setInputMask(QtGui.QApplication.translate("mainDialog", "000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleCOM.setText(QtGui.QApplication.translate("mainDialog", "18", None, QtGui.QApplication.UnicodeUTF8)) self.qlCOMTitle.setText(QtGui.QApplication.translate("mainDialog", "Phone COM", None, QtGui.QApplication.UnicodeUTF8)) self.btnTxOff.setText(QtGui.QApplication.translate("mainDialog", "Tx OFF", None, QtGui.QApplication.UnicodeUTF8)) self.btnTxOn.setText(QtGui.QApplication.translate("mainDialog", "Tx ON", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("mainDialog", "Channel", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("mainDialog", "Tx power", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("mainDialog", "PDM", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(3).setText(QtGui.QApplication.translate("mainDialog", "Max curr", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(4).setText(QtGui.QApplication.translate("mainDialog", "min curr", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(5).setText(QtGui.QApplication.translate("mainDialog", "Current", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(6).setText(QtGui.QApplication.translate("mainDialog", "-5MHz", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(7).setText(QtGui.QApplication.translate("mainDialog", "+5MHz", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(8).setText(QtGui.QApplication.translate("mainDialog", "EURTA-1", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(9).setText(QtGui.QApplication.translate("mainDialog", "EUTRA+1", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(10).setText(QtGui.QApplication.translate("mainDialog", "SMPS", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(11).setText(QtGui.QApplication.translate("mainDialog", "ICQ", None, QtGui.QApplication.UnicodeUTF8)) __sortingEnabled = self.tableWidget.isSortingEnabled() self.tableWidget.setSortingEnabled(False) self.tableWidget.item(0, 0).setText(QtGui.QApplication.translate("mainDialog", "9750", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.setSortingEnabled(__sortingEnabled) self.qlMessage.setText(QtGui.QApplication.translate("mainDialog", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxSMPSStep.setTitle(QtGui.QApplication.translate("mainDialog", "SMPS Step", None, QtGui.QApplication.UnicodeUTF8)) self.qleSMPSStep.setInputMask(QtGui.QApplication.translate("mainDialog", "0000; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleSMPSStep.setText(QtGui.QApplication.translate("mainDialog", "100", None, QtGui.QApplication.UnicodeUTF8)) self.btnSMPSPlus.setText(QtGui.QApplication.translate("mainDialog", "+SMPS", None, QtGui.QApplication.UnicodeUTF8)) self.btnSMPSMinus.setText(QtGui.QApplication.translate("mainDialog", "-SMPS", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxICQStep.setTitle(QtGui.QApplication.translate("mainDialog", "ICQ Step", None, QtGui.QApplication.UnicodeUTF8)) self.qleICQStep.setInputMask(QtGui.QApplication.translate("mainDialog", "hh; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleICQStep.setText(QtGui.QApplication.translate("mainDialog", "5", None, QtGui.QApplication.UnicodeUTF8)) self.btnICQPlus.setText(QtGui.QApplication.translate("mainDialog", "+ICQ", None, QtGui.QApplication.UnicodeUTF8)) self.btnICQMinus.setText(QtGui.QApplication.translate("mainDialog", "-ICQ", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxPA.setTitle(QtGui.QApplication.translate("mainDialog", "PA Range", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetPARange.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qlePARange.setInputMask(QtGui.QApplication.translate("mainDialog", "9; ", None, QtGui.QApplication.UnicodeUTF8)) self.qlePARange.setText(QtGui.QApplication.translate("mainDialog", "3", None, QtGui.QApplication.UnicodeUTF8)) self.btnHPM.setText(QtGui.QApplication.translate("mainDialog", "HPM", None, QtGui.QApplication.UnicodeUTF8)) self.btnLPM.setText(QtGui.QApplication.translate("mainDialog", "LPM", None, QtGui.QApplication.UnicodeUTF8)) self.groupBoxMIPI.setTitle(QtGui.QApplication.translate("mainDialog", "MIPI", None, QtGui.QApplication.UnicodeUTF8)) self.qlMIPISlaveIDTitle.setText(QtGui.QApplication.translate("mainDialog", "Slave ID", None, QtGui.QApplication.UnicodeUTF8)) self.qleMIPISlaveID.setInputMask(QtGui.QApplication.translate("mainDialog", "H; ", None, QtGui.QApplication.UnicodeUTF8)) self.qleMIPISlaveID.setText(QtGui.QApplication.translate("mainDialog", "C", None, QtGui.QApplication.UnicodeUTF8)) self.btnSetMIPISlaveID.setText(QtGui.QApplication.translate("mainDialog", "Set", None, QtGui.QApplication.UnicodeUTF8)) self.qlMIPIChTitle.setText(QtGui.QApplication.translate("mainDialog", "Channel", None, QtGui.QApplication.UnicodeUTF8)) self.qlAuthor.setText(QtGui.QApplication.translate("mainDialog", "William CHANG @2014", None, QtGui.QApplication.UnicodeUTF8)) self.actionCopy.setText(QtGui.QApplication.translate("mainDialog", "Copy", None, QtGui.QApplication.UnicodeUTF8)) self.actionCopy.setShortcut(QtGui.QApplication.translate("mainDialog", "Ctrl+C", None, QtGui.QApplication.UnicodeUTF8)) import icon_rc
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) class TestClient(unittest.TestCase): PROJECT = "PROJECT" ZONE_NAME = "zone-name" @staticmethod def _get_target_class(): from google.cloud.dns.client import Client return Client def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor(self): from google.cloud.dns._http import Connection creds = _make_credentials() http = object() client = self._make_one(project=self.PROJECT, credentials=creds, _http=http) self.assertIsInstance(client._connection, Connection) self.assertIs(client._connection.credentials, creds) self.assertIs(client._connection.http, http) def test_quotas_defaults(self): PATH = "projects/%s" % (self.PROJECT,) MANAGED_ZONES = 1234 RRS_PER_RRSET = 23 RRSETS_PER_ZONE = 345 RRSET_ADDITIONS = 456 RRSET_DELETIONS = 567 TOTAL_SIZE = 67890 DATA = { "quota": { "managedZones": str(MANAGED_ZONES), "resourceRecordsPerRrset": str(RRS_PER_RRSET), "rrsetsPerManagedZone": str(RRSETS_PER_ZONE), "rrsetAdditionsPerChange": str(RRSET_ADDITIONS), "rrsetDeletionsPerChange": str(RRSET_DELETIONS), "totalRrdataSizePerChange": str(TOTAL_SIZE), } } CONVERTED = {key: int(value) for key, value in DATA["quota"].items()} creds = _make_credentials() client = self._make_one(self.PROJECT, creds) conn = client._connection = _Connection(DATA) quotas = client.quotas() self.assertEqual(quotas, CONVERTED) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req["method"], "GET") self.assertEqual(req["path"], "/%s" % PATH) def test_quotas_w_kind_key(self): PATH = "projects/%s" % (self.PROJECT,) MANAGED_ZONES = 1234 RRS_PER_RRSET = 23 RRSETS_PER_ZONE = 345 RRSET_ADDITIONS = 456 RRSET_DELETIONS = 567 TOTAL_SIZE = 67890 DATA = { "quota": { "managedZones": str(MANAGED_ZONES), "resourceRecordsPerRrset": str(RRS_PER_RRSET), "rrsetsPerManagedZone": str(RRSETS_PER_ZONE), "rrsetAdditionsPerChange": str(RRSET_ADDITIONS), "rrsetDeletionsPerChange": str(RRSET_DELETIONS), "totalRrdataSizePerChange": str(TOTAL_SIZE), } } CONVERTED = {key: int(value) for key, value in DATA["quota"].items()} WITH_KIND = {"quota": DATA["quota"].copy()} WITH_KIND["quota"]["kind"] = "dns#quota" creds = _make_credentials() client = self._make_one(self.PROJECT, creds) conn = client._connection = _Connection(WITH_KIND) quotas = client.quotas() self.assertEqual(quotas, CONVERTED) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req["method"], "GET") self.assertEqual(req["path"], "/%s" % PATH) def test_list_zones_defaults(self): import six from google.cloud.dns.zone import ManagedZone ID_1 = "123" ZONE_1 = "zone_one" DNS_1 = "one.example.com" ID_2 = "234" ZONE_2 = "zone_two" DNS_2 = "two.example.com" PATH = "projects/%s/managedZones" % (self.PROJECT,) TOKEN = "TOKEN" DATA = { "nextPageToken": TOKEN, "managedZones": [ { "kind": "dns#managedZone", "id": ID_1, "name": ZONE_1, "dnsName": DNS_1, }, { "kind": "dns#managedZone", "id": ID_2, "name": ZONE_2, "dnsName": DNS_2, }, ], } creds = _make_credentials() client = self._make_one(self.PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_zones() page = six.next(iterator.pages) zones = list(page) token = iterator.next_page_token self.assertEqual(len(zones), len(DATA["managedZones"])) for found, expected in zip(zones, DATA["managedZones"]): self.assertIsInstance(found, ManagedZone) self.assertEqual(found.zone_id, expected["id"]) self.assertEqual(found.name, expected["name"]) self.assertEqual(found.dns_name, expected["dnsName"]) self.assertEqual(token, TOKEN) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req["method"], "GET") self.assertEqual(req["path"], "/%s" % PATH) def test_list_zones_explicit(self): import six from google.cloud.dns.zone import ManagedZone ID_1 = "123" ZONE_1 = "zone_one" DNS_1 = "one.example.com" ID_2 = "234" ZONE_2 = "zone_two" DNS_2 = "two.example.com" PATH = "projects/%s/managedZones" % (self.PROJECT,) TOKEN = "TOKEN" DATA = { "managedZones": [ { "kind": "dns#managedZone", "id": ID_1, "name": ZONE_1, "dnsName": DNS_1, }, { "kind": "dns#managedZone", "id": ID_2, "name": ZONE_2, "dnsName": DNS_2, }, ] } creds = _make_credentials() client = self._make_one(self.PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_zones(max_results=3, page_token=TOKEN) page = six.next(iterator.pages) zones = list(page) token = iterator.next_page_token self.assertEqual(len(zones), len(DATA["managedZones"])) for found, expected in zip(zones, DATA["managedZones"]): self.assertIsInstance(found, ManagedZone) self.assertEqual(found.zone_id, expected["id"]) self.assertEqual(found.name, expected["name"]) self.assertEqual(found.dns_name, expected["dnsName"]) self.assertIsNone(token) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req["method"], "GET") self.assertEqual(req["path"], "/%s" % PATH) self.assertEqual(req["query_params"], {"maxResults": 3, "pageToken": TOKEN}) def test_zone_explicit(self): from google.cloud.dns.zone import ManagedZone DESCRIPTION = "DESCRIPTION" DNS_NAME = "test.example.com" creds = _make_credentials() client = self._make_one(self.PROJECT, creds) zone = client.zone(self.ZONE_NAME, DNS_NAME, DESCRIPTION) self.assertIsInstance(zone, ManagedZone) self.assertEqual(zone.name, self.ZONE_NAME) self.assertEqual(zone.dns_name, DNS_NAME) self.assertEqual(zone.description, DESCRIPTION) self.assertIs(zone._client, client) def test_zone_w_dns_name_wo_description(self): from google.cloud.dns.zone import ManagedZone DNS_NAME = "test.example.com" creds = _make_credentials() client = self._make_one(self.PROJECT, creds) zone = client.zone(self.ZONE_NAME, DNS_NAME) self.assertIsInstance(zone, ManagedZone) self.assertEqual(zone.name, self.ZONE_NAME) self.assertEqual(zone.dns_name, DNS_NAME) self.assertEqual(zone.description, DNS_NAME) self.assertIs(zone._client, client) def test_zone_wo_dns_name(self): from google.cloud.dns.zone import ManagedZone creds = _make_credentials() client = self._make_one(self.PROJECT, creds) zone = client.zone(self.ZONE_NAME) self.assertIsInstance(zone, ManagedZone) self.assertEqual(zone.name, self.ZONE_NAME) self.assertIsNone(zone.dns_name) self.assertIsNone(zone.description) self.assertIs(zone._client, client) class _Connection(object): def __init__(self, *responses): self._responses = responses self._requested = [] def api_request(self, **kw): self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response
''' This is the main program which executes an experiment description by communicating with the Node_Managers on the configured nodes. It has threads. * The main thread executes the experiment description * One thread is providing an XMLRPC Server for the Nodes to sent asynchonous feedback (mainly events) to the master program. These events are logged and can be used within the experiment description to have reactive experiments. The master program has the following command line parameters: ''' from Master_EventHandler import EventHandler import optparse import os import sys import time import xml.dom.minidom as dom from xml.dom import Node import xmlrpclib import subprocess import datetime import threading from exp_description import * import random import Queue import string import socket import shutil import shlex import re import xml.etree.ElementTree as ET ############################################################ #pretty print function ############################################################ def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def sendmail(subject, msg): # p = os.popen('mail andreas.dittrich@usi.ch -s "%s"' % subject, "w") p = os.popen('mail solished@usi.ch -s "%s"' % subject, "w") # p = os.popen('mail andreas.dittrich@usi.ch -s "%s" -c manu.john.panicker@usi.ch' % subject, "w") p.write("%s" % msg) p.close() def error_log(msg): global error_log_lock global experiment_data ''' This function shall be used to alert the experimenter that something important has gone wrong. An email can be sent in this case. Thread safe ''' error_log_lock.acquire() # logfile f = "%s/ExperiMasterErrors.log" %experiment_data.get_current_experiment_dir() FILE = open(f, "a") FILE.write("%s:ERROR " % time.strftime("%y%m%d-%H%M%S",time.localtime())) FILE.write("%s " % msg) FILE.close() error_log_lock.release() # email sendmail("Error with experiment",msg) def wait_for_threads_or_timeout(thread_list, run_timeout): global kill_threads kill_threads = 0 done = False while done!=True: time.sleep(1) run_timeout = run_timeout -1 if run_timeout % 10==0: print "Waiting for run threads to finish (%d)" %(run_timeout) if run_timeout==0: done=True done_count = 0 for actor_thread in thread_list: if actor_thread.is_alive()==False: done_count = done_count+1 if done_count == len(thread_list): done=True kill_threads = 1 for actor_thread in thread_list: if actor_thread.is_alive()==True: actor_thread.join() def do_thread(queue, node, function, arguments): r = 0 try: node.lock_node() if function=="run_init": r=node.run_init(arguments[0], arguments[1]) elif function=="run_exit": r=node.run_exit() elif function=="experiment_exit": r=node.experiment_exit() elif function=="experiment_init": r=node.experiment_init(arguments[0],arguments[1],arguments[2],arguments[3]) elif function=="fail_start_drop_sd": r=node.fail_start_drop_sd() elif function=="fail_stop_drop_sd": r=node.fail_stop_drop_sd() elif function=="get_topology": r = node.get_topology(arguments[0]) elif function=="traffic_stop_all": r = node.traffic_stop_all() elif function=="get_olsr": r = node.get_olsr() else: print "Function not matched %s"%function r=-5 except socket.error: print "Socket error while trying to exec %s on node %s" %(function, str(node)) if node.type=="env": print "Just an evn node, trying to continue..." error_log("Socket error while trying to exec %s on node %s"%(function,str(node))) r = -3 else: print "Its an acting node, giving up experiment" error_log("Critical:Socket error while trying to exec %s on node ",node %(function)) r = -2 except IOError: print "Socket IO error, issue retry" r = -1 finally: #print "Function:%s" % function node.unlock_node() queue.put(r) def parallel_exec( nodes, function, *args1): ''' executes requests in parallel and waits for all to return returns a list of nodes, that gave a negative result ''' thread_list = [] queue_list = [] failed_list = [] for i in range(len(nodes)): queue_list.append(Queue.Queue()) th = threading.Thread( target=do_thread, kwargs={'queue':queue_list[i], 'node':nodes[i],'function':function, 'arguments': args1} ) thread_list.append(th) #time.sleep(0.15) th.daemon = True th.start() for i in range(len(nodes)): ret=queue_list[i].get() if ret==-1: print "returned -1" failed_list.append(nodes[i]) if ret==-2: print "returned -2" raise Exception if ret==-3: print "gave up due to connect to env node" return failed_list def wait_for_event_new(waiter_name, event_from, event_type, event_param, start_time=0, timeout=0): ''' @param event_from is a list of nodes (strings), when empty this means "any" or "don't care" @param event_param is a list of params (strings) ''' global eh global kill_threads global verbose global simulate if simulate==True: return True if start_time==0: start_time = datetime.datetime.now() if timeout!=0: end_time = datetime.datetime.now()+datetime.timedelta(seconds=timeout) wait_result = 1 eh.event_condition.acquire() if len(event_from)==0: from_list = list(["any"]) else: from_list = list(event_from) if len(event_param)==0: param_list = list(["any"]) else: param_list = list(event_param) dependencies = [] for e_from in from_list: for e_param in param_list: dependencies.append( {'from':e_from, 'param':e_param, 'type':event_type} ) print "%s: Waiting for event %s:" %(waiter_name,event_type) while wait_result==1: eh.event_condition.wait(5) #if verbose: #print "%s: Still waiting for %d events" %(waiter_name, len( dependencies)) for (timestamp_master, node,timestamp,type,param) in eh.event_log: removables = [] for depend in dependencies: src_match=0 type_match=0 param_match=0 if depend['from']=="any" or depend['from']==node: src_match=1 if event_type=="any" or event_type==type: type_match=1 if depend['param']=="any" or depend['param']==param or depend['param'].replace("_","")==param: #this is a quirk for the grid10x10 param_match=1 #print "matches: %d %d %d(%s)" %(src_match, type_match, param_match,type) if src_match==1 and type_match==1 and param_match==1: delta=timestamp_master - start_time delay=delta.days*86400*1000+delta.seconds*1000+(delta.microseconds/1000) if delay > 0: if verbose: print "%s: Found new event %s from %s with param %s" %(waiter_name,type,node,param) #print "%s: remaing dependencies: " %waiter_name,dependencies # yed this is bad, but functionally it works dependencies.remove(depend) if len(dependencies)==0: wait_result = 0 break if timeout!=0: if datetime.datetime.now() > end_time: wait_result = -1 if kill_threads == 1: wait_result = -1 eh.event_condition.release() if wait_result==-1: return False else: return True def wait_wrapper(waiter_name, parameters, exp_wait_marker_time): param_dependency="any" event_dependency="any" instance_dependency="all" from_dependency="any" timeout=0 for (name, exp_param) in parameters.items(): if name=="param_dependency": if exp_param.type=="fix": param_dependency=exp_param.data else: param_dependency = exp.get_current_factor_value_by_id(exp_param.data) if name=="event_dependency": if exp_param.type=="fix": event_dependency=exp_param.data else: event_dependency = exp.get_current_factor_value_by_id(exp_param.data) if name=="from_dependency": if exp_param.type=="fix": from_dependency=exp_param.data else: actor_dependency = exp.get_current_factor_value_by_id(exp_param.data) if name=="timeout": if exp_param.type=="fix": timeout=int(exp_param.data) else: timeout = exp.get_current_factor_value_by_id(exp_param.data) # check which node map = exp.get_actor_node_map() from_nodes = [] param_nodes = [] #create a list of all nodes (maybe just one) if from_dependency!="any": from_abstract_nodes = map.get_abstract_nodes(from_dependency["actor"],from_dependency["instance"]) for an in from_abstract_nodes: from_nodes.append(nodeContainer.abstract_node_to_real_node(an).name) if param_dependency!="any": param_abstract_nodes = map.get_abstract_nodes(param_dependency["actor"],param_dependency["instance"]) for an in param_abstract_nodes: param_nodes.append(nodeContainer.abstract_node_to_real_node(an).name) wait_for_event_new(waiter_name,from_nodes,event_dependency, param_nodes, exp_wait_marker_time, timeout) ########################################################################################### ######## exp_thread ######################################################## ########################################################################################### class exp_actor_thread(threading.Thread): ''' This class represents the concrete threads. Each instance It utilises the actions that are defined by ExperiMaster This class is also used for the env and the fault threads ''' #exp_wait_marker_time #static exp_description exp_description = None def __init__(self, type, exp_description, id, instance_id, exp_action_list, node): ''' Initialises the thread @param type "sd" or "fault" @param id the abstract thread identity, given from the xml description @param instance_id is the id of the instance of an abstract thread, as given in the xml @param node ''' self.exp_type = type self.exp_id = id self.exp_instance_id = instance_id self.exp_action_list = exp_action_list self.exp_node = node self.exp_description = exp_description self.exp_wait_marker_time = 0 threading.Thread.__init__(self, name="%s.%s" %(id,instance_id)) def exp_execute_action(self, action): global verbose global eh global simulate s = "Thread %s.%s" %(self.exp_id,self.exp_instance_id) if action.action=="sd_init": if verbose: print "%s: sd_init" % s self.exp_node.lock_node() self.exp_node.SD_init() #change self.exp_node.unlock_node() if action.action=="sd_exit": if verbose: print "%s: sd_exit" % s self.exp_node.lock_node() self.exp_node.SD_exit() self.exp_node.unlock_node() if action.action=="sd_start_search": if verbose: print "%s: sd_start_search" % s self.exp_node.lock_node() self.exp_node.SD_start_search() #change self.exp_node.unlock_node() if action.action=="sd_stop_search": if verbose: print "%s: sd_stop_search" % s self.exp_node.lock_node() self.exp_node.SD_stop_search() #change self.exp_node.unlock_node() if action.action=="sd_publish": if verbose: print "%s: sd_publish" % s self.exp_node.lock_node() self.exp_node.SD_publish() #change self.exp_node.unlock_node() if action.action=="sd_unpublish": if verbose: print "%s: sd_unpublish" % s self.exp_node.lock_node() self.exp_node.SD_unpublish() #change self.exp_node.unlock_node() if action.action=="wait_for_event": if verbose: print "%s: wait_for_event" % s wait_wrapper(self.name, action.parameter_list, self.exp_wait_marker_time) self.exp_wait_marker_time = 0 if action.action=="wait_marker": if verbose: print "%s: wait_marker" % s self.exp_wait_marker_time = datetime.datetime.now() if action.action=="wait_time": if verbose: print "%s: wait_time" %s if len(action.parameter_list)>0: if action.parameter_list["value"].type=="ref": print "ALERT! NOT IMPLEMENTED YET!" else: if simulate==True: return time.sleep(float(action.parameter_list["value"].data)) if action.action=="fault_drop_sd_start": if verbose: print "%s: fault_drop_sd_start" %s self.exp_node.lock_node() self.exp_node.fail_start_drop_sd() self.exp_node.unlock_node() if action.action=="fault_drop_sd_stop": if verbose: print "%s: fault_drop_sd_stop" %s self.exp_node.lock_node() self.exp_node.fail_stop_drop_sd() self.exp_node.unlock_node() if action.action=="event_flag": if verbose: print "%s: event_flag" flag =action.parameter_list["value"].data eh.inject_event("local","","%s"%flag,"") if action.action=="get_olsr": print "%s: getting_olsr" %s self.exp_node.lock_node() self.exp_node.get_olsr() self.exp_node.unlock_node() def run(self): ''' This starts the execution of the actions ''' global kill_threads for action in self.exp_action_list: if kill_threads!=1: self.exp_execute_action(action) #change print "" ; else: print "Kill_threads flag is active!",kill_threads def do_traffic_thread(queue, start_stop, node, target, bw): r = 0 try: node.lock_node() if start_stop=="start": r = node.traffic_start(target,bw) if start_stop=="stop": r = node.traffic_stop_all() except socket.error: print "Socket error while trying to exec %s on node %s" %(start_stop, str(node)) if node.type=="env": print "Just an evn node, trying to continue..." error_log("Socket error while trying to exec %s on node %s"%(start_stop,str(node))) r = -3 else: print "Its an acting node, giving up experiment" error_log("Critical:Socket error while trying to exec %s on node ",node %(start_stop)) r = -2 except IOError: print "Socket IO error, issue retry" r = -1 finally: #print "Function:%s" % function node.unlock_node() queue.put(r) class exp_env_thread(threading.Thread): ''' This thread represents the changes on the environment ''' factor_value_run = None; def __init__(self, exp): self.exp = exp self.exp_wait_marker_time = 0 self.rand = random.Random() self.load_pairs = [] threading.Thread.__init__(self, name="env") def parallel_traffic( self, start_stop, RPCnodelist, target_list, bw): ''' executes requests in parallel and waits for all to return returns a list of nodes, that gave a negative result ''' thread_list = [] queue_list = [] failed_list = [] if len(RPCnodelist)!=len(target_list): print "error, Both lists need same length" return RPCnodelist for i in range(len(RPCnodelist)): queue_list.append(Queue.Queue()) th = threading.Thread( target=do_traffic_thread, kwargs={'queue':queue_list[i], 'node':RPCnodelist[i],'target':target_list[i],'start_stop':start_stop,'bw':bw} ) thread_list.append(th) #time.sleep(0.15) th.daemon = True th.start() for i in range(len(RPCnodelist)): ret=queue_list[i].get() if ret==-1: print "traffic returned -1" failed_list.append(RPCnodelist[i]) if ret==-2: print "traffic returned -2" raise Exception if ret==-3: print "traffic gave up due to connect to env node" return failed_list def traffic(self, start_stop, exp_action=0): global nodeContainer #TODO choice if len(nodeContainer.all_env())<2: return random_seed = None if (start_stop=="start"): bw = 0 random_pairs = 0 random_switch_amount = 0 for (name, exp_param) in exp_action.parameter_list.items(): if name=="random_pairs": if exp_param.type=="fix": random_pairs=exp_param.data else: random_pairs = exp.get_current_factor_value_by_id(exp_param.data) if name=="random_seed": if exp_param.type=="fix": random_seed=exp_param.data else: random_seed = exp.get_current_factor_value_by_id(exp_param.data) if name=="random_switch_seed": if exp_param.type=="fix": random_switch_seed=exp_param.data else: random_switch_seed = exp.get_current_factor_value_by_id(exp_param.data) if name=="random_switch_amount": if exp_param.type=="fix": random_switch_amount=int(exp_param.data) else: random_switch_amount = exp.get_current_factor_value_by_id(exp_param.data) if name=="bw": if exp_param.type=="fix": bw=exp_param.data else: bw = exp.get_current_factor_value_by_id(exp_param.data) # get pseudo randomly assigned pairs print "getting %d pairs, bw=%d" % (random_pairs,bw) if random_pairs==0: return if random_seed==None: self.load_pairs = nodeContainer.get_random_env_pairs(random_pairs) else: self.load_pairs = nodeContainer.get_random_env_pairs(random_pairs, random_seed) if random_switch_amount>0: if random_switch_seed!=None: self.rand.seed(random_switch_seed) for i in range(random_switch_amount): # Choose random pair from current loadnodes currpair = self.rand.randrange(0,len(self.load_pairs)) # Remove chosen pair from current loadnodes del self.load_pairs[currpair] # Now pick a new random pair env_nodes = nodeContainer.all_env() #env_nodes = nodeContainer.all_env() currlist = list(range(len(env_nodes))) newpartner1 = currlist[self.rand.randrange(0, len(currlist))] currlist.remove(newpartner1) newpartner2 = currlist[self.rand.randrange(0, len(currlist))] newpair = [env_nodes[newpartner1],env_nodes [newpartner2]] self.load_pairs.append(newpair) print "ENV: Replaced iperf pair %s with %s." % ( currpair, newpair) # for each pair, start the clients tmp_nodes = [] tmp_targets = [] for pair in self.load_pairs: tmp_nodes.append(pair[0]) tmp_targets.append(pair[1].name) tmp_nodes.append(pair[1]) tmp_targets.append(pair[0].name) self.parallel_traffic("start",tmp_nodes,tmp_targets,bw) # node1=pair[0] # node2=pair[1] # # node1.lock_node() # node1.traffic_start(pair[1].name, bw) # node1.unlock_node() # node2.lock_node() # node2.traffic_start(pair[0].name, bw) # node2.unlock_node() else: tmp_nodes = [] tmp_targets = [] for pair in self.load_pairs: tmp_nodes.append(pair[0]) tmp_targets.append(pair[1].name) tmp_nodes.append(pair[1]) tmp_targets.append(pair[0].name) # node0=pair[0] # node1=pair[1] # node0.lock_node() # node0.traffic_stop_all() # node0.unlock_node() # node1.lock_node() # node1.traffic_stop_all() # node1.unlock_node() #self.parallel_traffic("stop",tmp_nodes,tmp_targets,0) parallel_exec(tmp_nodes, "traffic_stop_all") def drop_sd_start(self): global nodeContainer all = nodeContainer.all() parallel_exec(all,"fail_start_drop_sd") def drop_sd_stop(self): global nodeContainer all = nodeContainer.all() parallel_exec(all,"fail_stop_drop_sd") def get_olsr(self): global nodeContainer all = nodeContainer.all() parallel_exec(all,"get_olsr") def exp_execute_action(self, exp_action): global verbose global simulate s = "ENV_Thread " if exp_action.action=="wait_for_event": if verbose: print "%s: wait_for_event" % s wait_wrapper( self.name, exp_action.parameter_list, self.exp_wait_marker_time) self.exp_wait_marker_time = 0 if exp_action.action=="wait_marker": if verbose: print "%s: wait_marker" % s self.exp_wait_marker_time = datetime.datetime.now() if exp_action.action=="env_traffic_start": print "%s: traffic_start" %s self.traffic("start", exp_action) if exp_action.action=="env_traffic_stop": self.traffic("stop") if exp_action.action=="wait_time": if verbose: print "%s: wait_time" %s if len(exp_action.parameter_list)>0: if exp_action.parameter_list["value"].type=="ref": print "ALERT! NOT IMPLEMENTED YET!" else: if simulate==True: return time.sleep(float(exp_action.parameter_list["value"].data)) if exp_action.action=="env_start_drop_sd": if verbose: print "%s: env_start_drop_sd" %s self.drop_sd_start() if exp_action.action=="env_stop_drop_sd": if verbose: print "%s: env_stop_drop_sd" %s self.drop_sd_stop() if exp_action.action=="event_flag": if verbose: print "%s: event_flag" %s flag =exp_action.parameter_list["value"].data eh.inject_event("local","","%s"%flag,"") if exp_action.action=="get_olsr": print "%s: getting_olsr" %s self.get_olsr() def run(self): ''' ''' global kill_threads for action in self.exp.env_process.action_list: if kill_threads!=1: self.exp_execute_action(action) def _create_actor_threads(exp, map): global nodeContainer print "Need to create %d threads " % map.get_num_of_total_instances() actor_list = [] for i in range(map.get_num_of_actors()): aid = map.get_actor_id(i) for j in range(map.get_num_of_instances(aid)): iid = map.get_instance_id(aid, j) abstract_node = map.get_abstract_node(aid,iid) print "Creating thread instance %s for actor id %s on abstract node %s" %(iid,aid,abstract_node) node = nodeContainer.abstract_node_to_real_node(abstract_node) #exp.node_processes.summary() actor = exp.node_processes.get_actor(aid) newactor = exp_actor_thread("sd", exp, aid, iid, actor.sd_action_list, node) actor_list.append(newactor) if actor.has_fault_actions()==True: newactor = exp_actor_thread("fault", exp, aid, iid, actor.fault_action_list, node) actor_list.append(newactor) return actor_list def build_full_run_matrix(full_run_matrix,run_matrix_done): #print full_run_matrix; found_flag =False; #[A]Substract the two matrices and obtain a new_run_matrix #The new_run_matrix contains ['N','N','N','N'] (NOP) in the places of the runs already done before while normal values (values of the full_matrix which is not done yet) #in the places that needs to be done this is done to preserve the same indexing system of the run_number that will be #used later. #print step for debugging if verbose: print "#########Full Matrix##################" for run_item_full_run_matrix in full_run_matrix: print run_item_full_run_matrix; for run_item_full_run_matrix in full_run_matrix: for run_item_run_matrix_done in run_matrix_done: if (run_item_full_run_matrix == run_item_run_matrix_done): #Found :Already done before found_flag =True; new_run_matrix.append(NOP); break; #Finished searching if (found_flag == False): new_run_matrix.append(run_item_full_run_matrix); else: # True -> do nothing found_flag = False; #print step for debugging if verbose: print "#########To be done##################" for run_item_new_run_matrix in new_run_matrix: print run_item_new_run_matrix; return new_run_matrix; def sort_xml_file(): tree = ET.parse('run_file.xml'); root = tree.getroot(); #Fetch all the nodes and the runs_nodes =[] ; runs_nodes = root.findall('run_done'); #sort them runs_nodes.sort(key=lambda run_node: int(run_node.get('run_matrix_value'))); #remove what was in the xml file f = open('run_file.xml', 'r+') f.truncate(); f.close(); #Write to the xml file run_file_f = open("run_file.xml","w+"); #Open the file for write and overwrite s= '<run_list>\n'; run_file_f.write(s); s= '</run_list>\n'; run_file_f.write(s); run_file_f.close(); tree = ET.parse('run_file.xml'); root = tree.getroot(); for rundone in runs_nodes: run_done=ET.Element("run_done",run_matrix_value = str(rundone.get('run_matrix_value')),value=str(rundone.get('value'))); root.append(run_done); #tree.write('run_file.xml'); for factor in rundone.findall('factor'): run_factor=ET.Element("factor",id=str(factor.get('id')),index = str(factor.get('index')),size=str(factor.get('size')),value=str(factor.get('value'))); run_done.append(run_factor); #tree.write('run_file.xml'); tree.write('run_file.xml'); indent(root); tree.write('run_file.xml'); def Delete_last_runs(delete_last_runs): ################################################################## #Delete routine :delete the last three runs ################################################################## #check if delete option is specified if(delete_last_runs == True): print "Deleting previous runs" #[1]remove the last three runs from the xml. tree = ET.parse('run_file.xml'); root = tree.getroot(); max_run =0; curr_run =0; #instantiation to use only the methods exp_run = experiment_description(options.experiment_description); #[A] find the max. run for run_done in root.findall('run_done'): curr_run = int(run_done.get('value')) if curr_run > max_run: max_run =curr_run ; ##print step for debugging if verbose: print "Max. run found"; print max_run if(max_run == 0): print "No items to delete" #[B] find & delete runs above max_run - 3 from the xml file #[I]case the total number of runs less that 4 elif(max_run < 4): for run_done in root.findall('run_done'): run_time_value = int(run_done.get('value')) run_matrix_value = int(run_done.get('run_matrix_value')); #delete from the xml file root.remove(run_done); #delete from the Masterdir Master_dir_to_be_deleted = "%s/%s" % (experiment_data.get_current_experiment_dir(),exp_run.factor_level_matrix.make_run_identifier(exp_run.run_matrix[run_matrix_value])); #delete from the node dir Node_dir_to_be_deleted = "%s/%s/%s/%s" % (experiment_root_nodes, results_dir_name_nodes,exp_run.experiment_name,exp_run.factor_level_matrix.make_run_identifier(exp_run.run_matrix[run_matrix_value])) ; #shutil.rmtree(Node_dir_to_be_deleted); #shutil.rmtree(Master_dir_to_be_deleted); print Node_dir_to_be_deleted; print Master_dir_to_be_deleted; else: for run_done in root.findall('run_done'): curr_run = int(run_done.get('value')) run_matrix_value = int(run_done.get('run_matrix_value')); if(curr_run > max_run -3): #delete this run the xml file root.remove(run_done); Master_dir_to_be_deleted = "%s/%s" % (experiment_data.get_current_experiment_dir(),exp_run.factor_level_matrix.make_run_identifier(exp_run.run_matrix[run_matrix_value])) Node_dir_to_be_deleted = "%s/%s/%s/%s" % (experiment_root_nodes, results_dir_name_nodes,exp_run.experiment_name,exp_run.factor_level_matrix.make_run_identifier(exp_run.run_matrix[run_matrix_value])) ; print Master_dir_to_be_deleted; print Node_dir_to_be_deleted; #delete Master and Nodes dir #shutil.rmtree(Master_dir_to_be_deleted); #shutil.rmtree(Node_dir_to_be_deleted); #i =i +1; tree.write('run_file.xml'); else: print "Run without deleting previous runs"; def adjust_value_attribute(): tree = ET.parse('run_file.xml'); root = tree.getroot(); #Open the xml file and adjust the value attribute new_actual_run_number =0; for run_done in root.findall('run_done'): run_done.set('value',str(new_actual_run_number)); new_actual_run_number = new_actual_run_number+1; tree.write('run_file.xml'); def build_run_matrix_done(run_matrix_done): old_run_item =None ; old_run_item =[]; tree = ET.parse('run_file.xml'); root = tree.getroot(); runs_done = root.findall('run_done'); for run_done in runs_done : for factor in run_done.findall('factor'): factor_id =factor.get('id'); factor_index =factor.get('index'); factor_value =int(factor.get('value')); old_run_item.append(factor_value); #for i in range(0,len(factor_list_new)): # new_run_item.insert(i,factor_value); #[c]add to the new run matrix an item indicates the run done. run_matrix_done.append(old_run_item); #print old_run_item; old_run_item =[]; if verbose: print "###########Done runs######################" for run_item_run_matrix_done in run_matrix_done: print run_item_run_matrix_done; return run_matrix_done; def run_experiment_new(exp,new_run_matrix): global nodeContainer global eh global kill_threads global forward print "\n======= Starting experiment runs ======\n" NOP_RUN =['N','N','N','N']; actual_run_number =0; done_runs =0; factor_value_run =[]; runs_positions_new_run_matrix =[]; tree = ET.parse('run_file.xml'); root = tree.getroot(); #Process the xml file to adjust it with respect to time (actual_run_number) #compute how many runs already done and to add the value of the run based on the latest run. for run_number in range(0, len(new_run_matrix)): if (new_run_matrix[run_number] == NOP_RUN): done_runs = done_runs+1; runs_positions_new_run_matrix.append(run_number); #compute how many runs already done and to add the value of the run based on the latest run. for run_number in range(0, len(new_run_matrix)): if (new_run_matrix[run_number] == NOP_RUN): actual_run_number = actual_run_number+1; #print runs_positions_new_run_matrix; if verbose: for i in range(0,len(runs_positions_new_run_matrix)): print runs_positions_new_run_matrix [i]; #Adjust the value of the run_matrix_value i =0; for run_done in root.findall('run_done'): run_done.set('run_matrix_value',str(runs_positions_new_run_matrix[i])) i = i+1; tree.write('run_file.xml'); #loop over run sequence, resuming automatically with forward variable for run_number in range(0, len(new_run_matrix)): if (new_run_matrix[run_number] == NOP_RUN): if verbose: print "Run number %d already_done" %run_number; run_done.set('run_matrix_value',str(run_number)); if verbose: print run_done.get('run_matrix_value'); tree.write('run_file.xml'); #loop over run sequence, resuming automatically with forward variable for run_number in range(0, len(new_run_matrix)): if (new_run_matrix[run_number] == NOP_RUN): if verbose: print "Run number %d already_done" %run_number; else: ################################################################### #update the xml file which contains information about each run done ################################################################### #[1]From run number get factor combination #Create a new experiment_description just to use methods exp_run = experiment_description(options.experiment_description); #Set run number exp_run.set_run_number(run_number); #get the factor combination : for factor in exp_run.factor_list: factor_value_run.append(exp_run.get_current_factor_level_by_id(factor.id)); if verbose: print factor_value_run ; #[2]Write this combination to the xml file #Build the xml file #build_xml_file(exp_run); #Add elements to factors to it where elements represent runs which has been done #Add a new element to the root represents the new run #Actually run number gives an indication about the order of time at which the run have been done. run_done=ET.Element("run_done",value=str(actual_run_number),run_matrix_value=str(run_number)); root.append(run_done); #Add the factor characterization for each run. i =0; for factor in exp_run.factor_list: i =i+1; run_factor=ET.Element("factor",id=str(factor.id),size=str(factor.levels.get_len()),index=str(i),value=str(exp_run.get_current_factor_level_by_id(factor.id))); run_done.append(run_factor); indent(root); tree.write('run_file.xml'); actual_run_number = actual_run_number+1; ################################################################### #Done update the xml ################################################################### ###### SETUP LEVELS OF ALL FACTORS FOR THIS RUN ######## exp.set_run_number(run_number) nodeContainer.update_actor_nodes() run_definition = exp.get_run_identifier() print "run number %d starting with factor combination %s" %(run_number,run_definition) # this used to allow continuation, not needed anymore # if forward!=0: # if run_number<forward or forward==exp.get_run_count()-1: # continue # clear log so the list won't get too long and the list object is blocked so long, # that remote clients time-out eh.clear_log() kill_threads = 0 try: ###### PREPARE NETWORK FOR RUN ################################ parallel_exec(nodeContainer.all(),"fail_start_drop_sd" ) try: #measure clock offsets measure_time_diff_via_communication_channel() time.sleep(1) except: pass finally: parallel_exec(nodeContainer.all(), "fail_stop_drop_sd" ) ################################################################# actor_nodes = nodeContainer.all_actors() env_nodes = nodeContainer.all_env() # init all try: parallel_run_init(actor_nodes,exp.get_current_run_name(), "actor") except: error_log("could not init run on at least one of the acting nodes, aborting") parallel_run_init(env_nodes, exp.get_current_run_name(), "env") map = exp.get_actor_node_map() print "Creating Threads" actor_list = _create_actor_threads(exp, map) env_thread = exp_env_thread( exp) actor_list.append(env_thread) print "Running %d threads" %len(actor_list) # start threads for t in actor_list: t.start() wait_for_threads_or_timeout(actor_list,exp.max_run_time) for t in actor_list: t.join() print "all threads finished" parallel_exec(actor_nodes, "run_exit") parallel_exec(env_nodes, "run_exit") except: raise actor_list =[] print "Leaving run function" def parallel_experiment_init(name, experiment_root, nodes, cap_interface, exp_protocol): print "starting experiment %s" % name failed=parallel_exec(nodes, "experiment_init", experiment_root, name, cap_interface, exp_protocol) if len(failed)!=0: print "First try had fails", failed parallel_exec(failed, "experiment_exit", experiment_root, name, cap_interface, exp_protocol) failed = parallel_exec(failed, "experiment_init", experiment_root, name, cap_interface, exp_protocol) if len(failed)!=0: print "Error, cannot init experiment on nodes ", failed return -1 return 0 def parallel_run_init(nodes, name, role): ''' Initialises the nodes, blocks until all are done or returns an error when not Before it retries automatically once ''' try: failed=parallel_exec(nodes,"run_init",name, role) if len(failed)!=0: parallel_exec(failed, "run_exit") failed = parallel_exec(failed, "run_init",name,role) if len(failed)!=0: print "Error, cannot init run on nodes ", failed return -1 return 0 except Exception, e: raise class exp_experiment_data: ''' This class contains all the data for an experiment (not just the run) and provides management functions relating a whole experiment ''' def __init__(self, experiment_name, exp, xmldescription): self.experiment_dir = "MasterResults/%s" % (experiment_name) if not os.path.exists(self.experiment_dir): os.makedirs(self.experiment_dir) self.exp = exp self.xmldescription_file = xmldescription def set_current_experiment_dir(self, dir): self.experiment_dir=dir def get_current_experiment_dir(self): return self.experiment_dir def copy_files(self): print "Copying the used script files into the MasterResults directory" try: shutil.copy2(os.path.realpath(__file__),"%s/ExperiMaster.py" %self.experiment_dir) shutil.copy2("%s/exp_description.py" % os.path.expanduser("~"), "%s/exp_description.py" % self.experiment_dir) shutil.copy2("%s/Master_EventHandler.py" % os.path.expanduser("~"), "%s/Master_EventHandler.py" % self.experiment_dir) shutil.copy2("%s"% self.xmldescription_file , "%s/" % (self.experiment_dir)) except Exception, e: print "There was a problem copying the experiment files to the Results folder", e class RPCNode(xmlrpclib.ServerProxy): ''' This class extends the xmlrpclib.ServerProxy class with a couple of properties and methods that are used for the experimentation system. ''' #ip = "" #name = "" #node = None #state = 0 def __init__(self, node_name, node_ip, abstract_id): self.name = node_name self.ip = node_ip self.abstract_id = abstract_id if abstract_id=="": self.type = "env" else: self.type = "actor" self.action_lock = threading.Lock() xmlrpclib.ServerProxy.__init__(self,"http://%s:8000" % node_name) def lock_node(self): self.action_lock.acquire() #print "lock %s acquired" %self.name def unlock_node(self): self.action_lock.release() #print "lock %s released" %self.name pass class SimuNode(): ''' This is a fake node, that does nothing, it's a simulation replacement for RPCNode ''' def __init__(self, name, node_ip, abstract_id): self.name = name self.ip = node_ip self.abstract_id = abstract_id if abstract_id=="": self.type = "env" else: self.type = "actor" self.action_lock = threading.Lock() pass def lock_node(self): self.action_lock.acquire() def unlock_node(self): self.action_lock.release() def experiment_init (self, root_dir, experiment_name, capture_interface, sdp_type): pass def experiment_exit(self): pass def run_init(self, name, role): pass def run_exit(self): pass def get_topology(self, partners): pass def SD_init(self): pass def SD_exit(self): pass def SD_publish(self): pass def SD_unpublish(self): pass def SD_start_search(self): pass def SD_stop_search(self): pass def capture_start(self): pass def capture_stop(self): pass def traffic_start(self, servernode, bandwidth): pass def traffic_stop_all(self): pass def fail_start_drop_sd(self): pass def fail_stop_drop_sd(self): pass class NodeContainer: """ This Container shall contain all the nodes that belong to an experiment. They can have a mapping to abstract nodes, when they take a role in service discovery, or they are just environment nodes that can be used to create traffic or other possible influences on the service discovery process. For each node, the hostname and the ip should be specified. """ def __init__(self, specs_node_list): global simulate self.rand = random.Random() for node in list(specs_node_list): found = 0 for check in specs_node_list: if node==check: found=found + 1 if found>1: print "Double entry %s in node file" %node exit() self.node_list = [] for node_name in specs_node_list: if simulate==False: self.node_list.append(RPCNode(str(node_name['real_id']), str(node_name['real_ip']), str(node_name['abstract_id']) )) else: self.node_list.append(SimuNode(node_name['real_id'], node_name['real_ip'], node_name['abstract_id'] )) def PrintStatus(self): print "NC: %d nodes in file." % (len(self.node_list)) def get_random_env_pairs(self, numberofpairs, random_seed=0): if random_seed!=0: self.rand.seed(random_seed) if len(self.node_list) >= 2: partners=[] for partner in range(numberofpairs): env_nodes = nodeContainer.all_env() #env_nodes = nodeContainer.all_env() currlist = list(range(len(env_nodes))) partner1 = currlist[self.rand.randrange(0, len(currlist))] currlist.remove(partner1) partner2 = currlist[self.rand.randrange(0, len(currlist))] partners.append( [env_nodes[partner1],env_nodes[partner2]]) return partners else: print "NC: Not enough nodes to pick from." return None def all(self): ''' returns the RPC node objects ''' return self.node_list def all_env(self): ''' returns all the RPC objects, that are env ''' result = [] for n in self.node_list: if n.type=="env": result.append(n) return result def all_actors(self): ''' returns all the RPC objects, that are currently (this run) acting ''' result = [] for n in self.node_list: if n.type=="actor": result.append(n) return result def all_can_be_actors(self): ''' returns all the nodes, that can be actor in the experiment (all that have a mapping to an abstract node) ''' result = [] for n in self.node_list: if n.abstract_id!="": result.append(n) return result def abstract_node_to_real_node(self, abstract_node): ''' this mapping should come from a config file ''' for node in self.node_list: if node.abstract_id == abstract_node: return node def update_actor_nodes(self): ''' This function updates the node types according to the current (run) to allow to other functions to return the correct values. ''' map=exp.get_actor_node_map() #set all nodes taking abstract roles to type #depending on whether they are in the current mapping for node in self.node_list: abstract=node.abstract_id if abstract!="": if map.contains_abstract_node(abstract): node.type = "actor" else: node.type = "env" def summary(self): print "NodeContainer" for node in self.node_list: print "\t node %s is abstract node %s, (IP:%s) and type %s" %(node.name,node.abstract_id,node.ip,node.type) def measure_time_diff_via_communication_channel(): ''' This is the function that creates a listing of of the clocking offsets between the master clock and the nodes ''' global nodeContainer global experiment_data global exp os.makedirs("%s/%s" %(experiment_data.get_current_experiment_dir(),exp.get_current_run_name() )) nodes = nodeContainer.all_actors() if verbose: print nodes for node in nodes: cmd = "ping -T tsonly -c 1 %s" % (node.name) ret = 0 output = open("%s/%s/ping_t_%s" %(experiment_data.get_current_experiment_dir(),exp.get_current_run_name(),node.name),"w") args = shlex.split(cmd) if verbose: print "executing ", cmd p = subprocess.Popen(args, stdout=output) p.communicate() output.close() ############################################################ #build_xml_file function ############################################################ ############################################################ def build_xml_file(experiment_xml_file): ##############XML Layout############################## #Write data to the xml file (run_exp_encode) #run_exp_encode :This file contains the runs that have been done with the factors combinations #<!-- Number of factor = 4 --> #<runs_list> # <!-- Run number 0 --> # <run value ="0"> # <factor id="" index="" value=""\> # <factor id="" index="" value=""\> # <factor id="" index="" value=""\> # <factor id="" index="" value=""\> # </run> # # <!--Run number 1 --> # . # . # . # #</run_list> ######################################################### xml_run_file='run_file.xml';#create a new file only if it doesn't exists if(os.path.exists(xml_run_file) == False): #create a new file only if it doesn't exists print "creating a new run_file.xml" #Create a new experiment_description just to use its methods exp_run = experiment_description(experiment_xml_file); Exp_Enc_f = open("run_file.xml","w+"); #Open the file for write and overwrite s='<!-- Number of factor = %d -->\n'%len(exp_run.factor_list); Exp_Enc_f.write(s); s= '<run_list>\n'; Exp_Enc_f.write(s); s= '</run_list>\n'; Exp_Enc_f.write(s); Exp_Enc_f.close(); else: #The file already exists print "The file run_file.xml already exists" if __name__ == '__main__': global eh global kill_threads global nodes_list global verbose global nodeContainer global error_log_lock global experiment_data global forward global simulate verbose = True #change eh_verbose = False simulate = False error_log_lock = threading.Lock() kill_threads = 0 # specifics capture_interface = "bmf0" experiment_root = os.path.expanduser("~") experiment_root_nodes = "%s/testbed" % experiment_root # Should be synchronized with Node_ManagerRPC.py on nodes results_dir_name_nodes = "results" forward = 0 forwardelement = "run_0_0_0_0"; factor_level_matrix = None ; factor_weight_matrix = None ; fact_value_run_matrix_Master= None ; old_run_item = None; new_run_item = None; run_matrix_done = None; full_run_matrix = None; new_run_matrix = None; NOP = None; delete_last_runs =False; factor_level_matrix = [] ; factor_weight_matrix = [] ; fact_value_run_matrix_Master = [] ; fact_value_run_matrix_Nodes = [] ; max_current_run_count_Master = 0 ; max_current_run_count_Nodes = 0 ; current_run_count_Master = 0 ; current_run_count_Nodes = 0 ; full_run_matrix =[]; new_run_item =[]; run_matrix_done =[]; new_run_matrix =[]; NOP =['N','N','N','N']; miss_match_flag = False; # Option parser ############################################################ parser = optparse.OptionParser( description='Run Service Discovery Experiments.', prog=os.path.basename(sys.argv[0]), version='%s 0.0.4' % os.path.basename(sys.argv[0]), ) parser.add_option('-f', metavar='experiment_description', dest='experiment_description', help='file with description') parser.add_option('-v', action='store_true', dest='verbose', help='give this option to produce more output') parser.add_option('--ve', action='store_true', dest='eh_verbose', help='activate verbosity on the event handler (EH)') # parser.add_option('--forward', metavar='forward', dest='forward', help='lets the run be forwarded (to continue at a previous state)') parser.add_option('--simulate', action='store_true', dest='simulate', help='simulates a run without calling RPCs and without waiting') parser.add_option('--del', action='store_true',default=False, dest='delete_last_runs', help='Start running without deleting the latest three runs'); options, arguments = parser.parse_args() if options.experiment_description==None: print 'No xml experiment description file given.' exit() if options.simulate==None: simulate=False else: simulate=True if options.verbose==None: verbose=False else: verbose=True if options.eh_verbose==None: eh_verbose=False else: eh_verbose=True #Build the xml file which contains the runs that have been already done build_xml_file(options.experiment_description); #instantiation to use only the methods exp_run = experiment_description(options.experiment_description); experiment_name = "%s" % exp_run.experiment_name experiment_data = exp_experiment_data(experiment_name, exp_run, options.experiment_description) # forward option has been disabled as resume is done automatically now # if options.forward!=None: # forward = int(options.forward) # print "Forwarding the experiment to run %d" % forward ############################################################ nodeContainer = None #Adjust the value attribute adjust_value_attribute(); if(options.delete_last_runs == True): #Delete last runs Delete_last_runs(options.delete_last_runs); print "Deleting options is enabled"; else: print "Deleting option is disabled" #Obtain the previous runs tree = ET.parse('run_file.xml'); root = tree.getroot(); sort_xml_file(); run_matrix_done = build_run_matrix_done(run_matrix_done); #Obtain the full_run_matrix exp_with_new_config = experiment_description(options.experiment_description) full_run_matrix = exp_with_new_config.get_run_matrix(); new_run_matrix = build_full_run_matrix(full_run_matrix,run_matrix_done); if (all(x==new_run_matrix[0] for x in new_run_matrix) == True): if(new_run_matrix[0] == NOP): print "The experiment is complete"; exit(); exp = experiment_description(options.experiment_description) if verbose: exp.summary() if exp.sd_protocol.lower() == "zeroconf".lower(): exp_protocol = "avahi" elif exp.sd_protocol.lower() == "slp".lower(): exp_protocol = "slp" else: print "Unsupported protocol set. Change to slp or zeroconf" exit() #run_experiment_new(exp,new_run_matrix); #remove experiment_data.copy_files() nodeContainer = NodeContainer(exp.get_all_spec_nodes()) if verbose: nodeContainer.summary() eh = EventHandler(eh_verbose) eh.start_event_handler() try: if parallel_experiment_init(experiment_name, experiment_root, nodeContainer.all(), capture_interface, exp_protocol)==-1: print "Cannot init experiment on all nodes, aborting!" kill_threads = 1 eh.stop_event_handler() exit() can_be_actors = nodeContainer.all_can_be_actors() can_be_names = [] for can_be in can_be_actors: can_be_names.append(can_be.name) print "Getting Topology information on the nodes that can act",can_be_names parallel_exec( can_be_actors, "get_topology",can_be_names) run_experiment_new(exp,new_run_matrix); except (KeyboardInterrupt, SystemExit): print "Have to quit now..." kill_threads = 1 finally: kill_threads = 1 parallel_exec(nodeContainer.all(), "fail_stop_drop_sd" ) time.sleep(1) parallel_exec(nodeContainer.all(), "experiment_exit") time.sleep(10) eh.stop_event_handler() print "Experiment done with all runs" print "Waiting 5 seconds for all threads to abort" sendmail("Experiment Complete","Experiment %s has finished." % experiment_data.get_current_experiment_dir()) time.sleep(5) exit()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Util functions for the numpy module.""" import ctypes from .. util import is_np_array, is_np_shape from .. base import _LIB, check_call, string_types, c_str_array, DLPackHandle from .. base import c_handle_array, c_str, mx_uint, NDArrayHandle, py_str from ..numpy import ndarray __all__ = ['save', 'load', 'to_dlpack_for_read', 'to_dlpack_for_write', 'from_dlpack'] PyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p) _c_str_dltensor = c_str('dltensor') _c_str_used_dltensor = c_str('used_dltensor') def _dlpack_deleter(pycapsule): pycapsule = ctypes.c_void_p(pycapsule) if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor): ptr = ctypes.c_void_p( ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)) check_call(_LIB.MXNDArrayCallDLPackDeleter(ptr)) _c_dlpack_deleter = PyCapsuleDestructor(_dlpack_deleter) def save(file, arr): """Saves a list of `ndarray`s or a dict of `str`->`ndarray` to file. Examples of filenames: - ``/path/to/file`` - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports) - ``hdfs://path/to/file`` (if compiled with HDFS supports) Parameters ---------- file : str Filename to which the data is saved. arr : `ndarray` or list of `ndarray`s or dict of `str` to `ndarray` The data to be saved. Notes ----- This function can only be called within numpy semantics, i.e., `npx.is_np_shape()` and `npx.is_np_array()` must both return true. """ if not (is_np_shape() and is_np_array()): raise ValueError('Cannot save `mxnet.numpy.ndarray` in legacy mode. Please activate' ' numpy semantics by calling `npx.set_np()` in the global scope' ' before calling this function.') if isinstance(arr, ndarray): arr = [arr] if isinstance(arr, dict): str_keys = arr.keys() nd_vals = arr.values() if any(not isinstance(k, string_types) for k in str_keys) or \ any(not isinstance(v, ndarray) for v in nd_vals): raise TypeError('Only accepts dict str->ndarray or list of ndarrays') keys = c_str_array(str_keys) handles = c_handle_array(nd_vals) elif isinstance(arr, list): if any(not isinstance(v, ndarray) for v in arr): raise TypeError('Only accepts dict str->ndarray or list of ndarrays') keys = None handles = c_handle_array(arr) else: raise ValueError("data needs to either be a ndarray, dict of (str, ndarray) pairs " "or a list of ndarrays.") check_call(_LIB.MXNDArraySave(c_str(file), mx_uint(len(handles)), handles, keys)) def load(file): """Loads an array from file. See more details in ``save``. Parameters ---------- file : str The filename. Returns ------- result : list of ndarrays or dict of str -> ndarray Data stored in the file. Notes ----- This function can only be called within numpy semantics, i.e., `npx.is_np_shape()` and `npx.is_np_array()` must both return true. """ if not (is_np_shape() and is_np_array()): raise ValueError('Cannot load `mxnet.numpy.ndarray` in legacy mode. Please activate' ' numpy semantics by calling `npx.set_np()` in the global scope' ' before calling this function.') if not isinstance(file, string_types): raise TypeError('file required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoad(c_str(file), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [ndarray(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), ndarray(NDArrayHandle(handles[i]))) for i in range(out_size.value)) def from_dlpack(dlpack): """Returns a np.ndarray backed by a dlpack tensor. Parameters ---------- dlpack: PyCapsule (the pointer of DLManagedTensor) input data Returns ------- np.ndarray an ndarray backed by a dlpack tensor Examples -------- >>> x = mx.np.ones((2,3)) >>> y = mx.npx.to_dlpack_for_read(x) >>> type(y) <class 'PyCapsule'> >>> z = mx.npx.from_dlpack(y) >>> type(z) <class 'mxnet.numpy.ndarray'> >>> z array([[1., 1., 1.], [1., 1., 1.]]) >>> w = mx.npx.to_dlpack_for_write(x) >>> type(w) <class 'PyCapsule'> >>> u = mx.npx.from_dlpack(w) >>> u += 1 >>> x array([[2., 2., 2.], [2., 2., 2.]]) """ handle = NDArrayHandle() dlpack = ctypes.py_object(dlpack) assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError( 'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.') dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor)) check_call(_LIB.MXNDArrayFromDLPackEx(dlpack_handle, False, ctypes.byref(handle))) # Rename PyCapsule (DLPack) ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor) # delete the deleter of the old dlpack ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None) return ndarray(handle=handle) def to_dlpack_for_read(data): """Returns a reference view of np.ndarray that represents as DLManagedTensor until all previous write operations on the current array are finished. Parameters ---------- data: np.ndarray input data. Returns ------- PyCapsule (the pointer of DLManagedTensor) a reference view of ndarray that represents as DLManagedTensor. Examples -------- >>> x = mx.np.ones((2,3)) >>> y = mx.npx.to_dlpack_for_read(x) >>> type(y) <class 'PyCapsule'> >>> z = mx.npx.from_dlpack(y) >>> z array([[1., 1., 1.], [1., 1., 1.]]) """ data.wait_to_read() dlpack = DLPackHandle() check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter) def to_dlpack_for_write(data): """Returns a reference view of ndarray that represents as DLManagedTensor until all previous read/write operations on the current array are finished. Parameters ---------- data: np.ndarray input data. Returns ------- PyCapsule (the pointer of DLManagedTensor) a reference view of np.ndarray that represents as DLManagedTensor. Examples -------- >>> x = mx.np.ones((2,3)) >>> w = mx.npx.to_dlpack_for_write(x) >>> type(w) <class 'PyCapsule'> >>> u = mx.npx.from_dlpack(w) >>> u += 1 >>> x array([[2., 2., 2.], [2., 2., 2.]]) """ check_call(_LIB.MXNDArrayWaitToWrite(data.handle)) dlpack = DLPackHandle() check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter)
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Eager-graph unified check numerics callback.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import threading import numpy as np from tensorflow.core.protobuf import debug_event_pb2 from tensorflow.python.debug.lib import op_callbacks_common from tensorflow.python.debug.lib import source_utils from tensorflow.python.framework import op_callbacks from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_debug_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util.tf_export import tf_export # Many ops have benign NaN outputs, and running them with check_numerics # on will create unwanted errors # TODO(b/142497024): Replace this whitelist with function decorators in the ops IGNORE_OP_OUTPUTS = ( # For FusedBatchNorm, if the input tensor is empty then batch_mean and # batch_variance will be NaN. reserve_space holds intermediate values # derived from batch_mean and batch_variance used for gradient calculation (b"FusedBatchNorm", 1), # batch_mean (b"FusedBatchNorm", 2), # batch_variance (b"FusedBatchNorm", 3), # reserve_space_1 (b"FusedBatchNorm", 4), # reserve_space_2 # Same as above (b"FusedBatchNormV2", 1), # batch_mean (b"FusedBatchNormV2", 2), # batch_variance (b"FusedBatchNormV2", 3), # reserve_space_1 (b"FusedBatchNormV2", 4), # reserve_space_2 # Same as above, but reserve_space_3 holds additional intermediate values (b"FusedBatchNormV3", 1), # batch_mean (b"FusedBatchNormV3", 2), # batch_variance (b"FusedBatchNormV3", 3), # reserve_space_1 (b"FusedBatchNormV3", 4), # reserve_space_2 (b"FusedBatchNormV3", 5), # reserve_space_3 ) # Some frequently used ops are generally safe and we can skip them to reduce # overhead. NOTE: This list is compiled by observing operations called by # models in practice and is not a comprehensive list of safe operations. SAFE_OPS = ( b"Concat", b"ConcatV2", b"ExpandDims", b"Fill", b"Gather", b"Maximum", b"Minimum", b"Reshape", b"Slice", b"Squeeze", b"Stack", b"StridedSlice", b"StridedSliceGrad", b"TensorListConcatV2", b"TensorListGather", b"TensorListGetItem", b"TensorListPopBack", b"TensorListStack", b"Transpose", b"Unpack", ) _state = threading.local() def limit_string_length(string, max_len=50): """Limit the length of input string. Args: string: Input string. max_len: (int or None) If int, the length limit. If None, no limit. Returns: Possibly length-limited string. """ if max_len is None or len(string) <= max_len: return string else: return "..." + string[len(string) - max_len:] # A dictionary that supports looking up the original input tensor names. _CHECK_NUMERICS_INPUT_LOOKUP = collections.defaultdict(dict) def _maybe_lookup_original_input_tensor(graph, tensor): if (graph and graph in _CHECK_NUMERICS_INPUT_LOOKUP and tensor.name in _CHECK_NUMERICS_INPUT_LOOKUP[graph]): return _CHECK_NUMERICS_INPUT_LOOKUP[graph][tensor.name] else: return tensor def get_check_numerics_error_message(slot, num_outputs, op_type, tensor, inputs, graph=None, traceback=None, stack_height_limit=30, path_length_limit=50): """Create a meaningful and user-friendly error message about offending tensor. The error message reveals the following info about the op that outputs NaN/Infinity: dtype, shape (to the extent known at graph-construction time), input tensors, stack trace for op creation (if is graph mode). Args: slot: (int) slot index of the tensor output. num_outputs: (int) total number of outputs of the op. op_type: (str) Type of the that generates `tensor`. tensor: (Tensor) the offending tensor, i.e., the tensor that contains Infinities or NaNs. inputs: (array of Tensor) inputs to the op that generates `tensor`. graph: (tf.Graph) the graph object that `tensor` belongs to. Available only under graph mode. traceback: (list of trace frames) the stack trace of the op's creation. Available only under graph model. stack_height_limit: (int or None) If int, limit to the height of the stack trace printed in the error message. If None, no limit to the height. path_length_limit: (int or None) Length limit for file paths included in the formatted stack trace. Returns: (str) A formatted error message. """ eager_vs_graph_qualifier = "graph" if graph else "eagerly-executing" message = "\n" message += ( "\n!!! Detected Infinity or NaN in output %d of " "%s op \"%s\" (# of outputs: %d) !!!\n" % (slot, eager_vs_graph_qualifier, op_type, num_outputs)) message += " dtype: %s\n" % tensor.dtype message += " shape: %s\n" % (tensor.shape,) if not graph: # This is an eager tensor. We can get its numpy value and count # NaNs and Infs. is_inf = np.isinf(tensor) num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.), is_inf)) num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.), is_inf)) num_nan = np.sum(np.isnan(tensor)) if num_neg_inf > 0: message += " # of -Inf elements: %s\n" % num_neg_inf if num_pos_inf > 0: message += " # of +Inf elements: %s\n" % num_pos_inf if num_nan: message += " # of +NaN elements: %s\n" % num_nan if len(inputs) > 1: message += "\n Input tensors (%d):\n" % len(inputs) for slot, input_tensor in enumerate(inputs): message += " %d: %s\n" % ( slot, _maybe_lookup_original_input_tensor(graph, input_tensor)) elif len(inputs) == 1: message += "\n Input tensor: %s\n" % ( _maybe_lookup_original_input_tensor(graph, inputs[0])) if graph and hasattr(graph, "name") and graph.name: message += " Graph name: \"%s\"\n" % graph.name # Format the stack trace for the op's creation. We omit files that # belong to tensorflow itself. if graph and traceback: message += ( "\n Stack trace of op's creation (\"->\": inferred user code):\n") if stack_height_limit is not None and len(traceback) > stack_height_limit: num_omitted_frames = len(traceback) - stack_height_limit message += " + ... (Omitted %d frames)\n" % num_omitted_frames for filepath, lineno, function_name, source_line in traceback[ -stack_height_limit:]: user_code_indicator = " " if not source_utils.guess_is_tensorflow_py_library(filepath): user_code_indicator = " -> " message += " + %s (L%d) %s\n" % ( limit_string_length(filepath, path_length_limit), lineno, function_name) if source_line is not None: message += "%s| %s\n" % (user_code_indicator, source_line) message += "\n" return message def _debug_summary(x): return gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=( debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)) class CheckNumericsCallback(object): """Wrapper for the numerics-checking callback for thread locality.""" def __init__(self, stack_height_limit, path_length_limit): self._stack_height_limit = stack_height_limit self._path_length_limit = path_length_limit # A dict mapping Placeholder tensors to their instrumenting debug tensors. # Used only under V1 graph mode, where we can't rely on auto control # dependency to execute the debug tensors and hence need to attach the debug # tensors as control dependencies of the ops that consume the Placeholder. self._placeholder_to_debug_tensor = dict() def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None): """Eager-function unified callback for checking numerics.""" del attrs, op_name # Unused op_type_bytes = compat.as_bytes(op_type) is_v1_graph_mode = not ops.executing_eagerly_outside_functions() if (op_type_bytes in op_callbacks_common.OP_CALLBACK_SKIP_OPS or op_type_bytes in SAFE_OPS): return None if graph: # Under graph mode. Insert check_numerics op. instrumented_outputs = [] if is_v1_graph_mode: for input_tensor in inputs: if input_tensor in self._placeholder_to_debug_tensor and outputs: outputs[0].op._add_control_input( # pylint: disable=protected-access self._placeholder_to_debug_tensor[input_tensor].op) for slot, output in enumerate(outputs): if (output.dtype.is_floating and (op_type_bytes, slot) not in IGNORE_OP_OUTPUTS): checked_output = array_ops.check_numerics_v2( # TF v2 has automatic control dependencies added to stateful async # ops, which allows us to run check_numerics asynchronously. # In the above case we use debug_summary to reduce all output # tensors asynchronously from the op being checked and then # process the tensor summary with check_numerics. output if is_v1_graph_mode else _debug_summary(output), get_check_numerics_error_message( slot, len(outputs), op_type, output, inputs, graph=graph, traceback=output.op.traceback)) _CHECK_NUMERICS_INPUT_LOOKUP[graph][checked_output.name] = output instrumented_outputs.append(self._get_output_tensor( op_type_bytes, output, checked_output, is_v1_graph_mode)) else: instrumented_outputs.append(output) return instrumented_outputs else: if op_type_bytes == b"CheckNumericsV2": # TODO(b/140334369): Remove this special casing logic once op_callback. # automatically prevents infinite recursion in eager mode. return None # Under eager mode. Eagerly execute check_numerics op. for slot, output in enumerate(outputs): if (output.dtype.is_floating and (op_type_bytes, slot) not in IGNORE_OP_OUTPUTS): array_ops.check_numerics_v2( output, get_check_numerics_error_message( slot, len(outputs), op_type, output, inputs, stack_height_limit=self._stack_height_limit, path_length_limit=self._path_length_limit)) def _get_output_tensor(self, op_type, tensor, checked_tensor, is_v1_graph_mode): """Determine what tensor to output from callback. Args: op_type: Type of the op that outputs the original symbolic tensor, as `bytes`. tensor: The original output symbolic tensor. checked_tensor: The debugger-instrumented, numerics-checking tensor. is_v1_graph_mode: Whether the debugged proggram is running under V1 graph mode. Returns: A symbolic tensor to be returned by the dumping op_callback. """ if is_v1_graph_mode: # Placeholders need special treatment under V1 graph mode. The # callback can't simply override the Placeholder tensor to the debug # tensor, as that would cause the Placeholder op to lack a value. # The debug tensor is remembered and will be attached as control # inputs to ops that consumer the Placeholders later. if op_type == b"Placeholder": self._placeholder_to_debug_tensor[tensor] = checked_tensor return tensor else: return checked_tensor else: # Under non-v1 graph mode, rely on auto control dependency to run the # checked tensor. return tensor @tf_export("debugging.enable_check_numerics") def enable_check_numerics(stack_height_limit=30, path_length_limit=50): r"""Enable tensor numerics checking in an eager/graph unified fashion. The numerics checking mechanism will cause any TensorFlow eager execution or graph execution to error out as soon as an op's output tensor contains infinity or NaN. This method is idempotent. Calling it multiple times has the same effect as calling it once. This method takes effect only on the thread in which it is called. When a op's float-type output tensor contains any Infinity or NaN, an `tf.errors.InvalidArgumentError` will be thrown, with an error message that reveals the following information: - The type of the op that generated the tensor with bad numerics. - Data type (dtype) of the tensor. - Shape of the tensor (to the extent known at the time of eager execution or graph construction). - Name of the containing graph (if available). - (Graph mode only): The stack trace of the intra-graph op's creation, with a stack-height limit and a path-length limit for visual clarity. The stack frames that belong to the user's code (as opposed to tensorflow's internal code) are highlighted with a text arrow ("->"). - (Eager mode only): How many of the offending tensor's elements are `Infinity` and `NaN`, respectively. Once enabled, the check-numerics mechanism can be disabled by using `tf.debugging.disable_check_numerics()`. Example usage: 1. Catching infinity during the execution of a `tf.function` graph: ```py import tensorflow as tf tf.debugging.enable_check_numerics() @tf.function def square_log_x_plus_1(x): v = tf.math.log(x + 1) return tf.math.square(v) x = -1.0 # When the following line runs, a function graph will be compiled # from the Python function `log_x_plus_1()`. Due to the # `enable_check_numerics()` call above, the graph will contain # numerics checking ops that will run during the function graph's # execution. The function call generates an -infinity when the Log # (logarithm) op operates on the output tensor of the Add op. # The program errors out at this line, printing an error message. y = log_x_plus_1(x) z = -y ``` 2. Catching NaN during eager execution: ```py import numpy as np import tensorflow as tf tf.debugging.enable_check_numerics() x = np.array([[0.0, -1.0], [4.0, 3.0]]) # The following line executes the Sqrt op eagerly. Due to the negative # element in the input array, a NaN is generated. Due to the # `enable_check_numerics()` call above, the program errors immediately # at this line, printing an error message. y = tf.math.sqrt(x) z = tf.matmul(y, y) ``` Args: stack_height_limit: Limit to the height of the printed stack trace. Applicable only to ops in `tf.function`s (graphs). path_length_limit: Limit to the file path included in the printed stack trace. Applicable only to ops in `tf.function`s (graphs). """ if not hasattr(_state, "check_numerics_callback"): _state.check_numerics_callback = CheckNumericsCallback( stack_height_limit, path_length_limit) op_callbacks.add_op_callback(_state.check_numerics_callback.callback) logging.info( "Enabled check-numerics callback in thread %s", threading.current_thread().name) @tf_export("debugging.disable_check_numerics") def disable_check_numerics(): """Disable the eager/graph unified numerics checking mechanism. This method can be used after a call to `tf.debugging.enable_check_numerics()` to disable the numerics-checking mechanism that catches inifnity and NaN values output by ops executed eagerly or in tf.function-compiled graphs. This method is idempotent. Calling it multiple times has the same effect as calling it once. This method takes effect only on the thread in which it is called. """ if not hasattr(_state, "check_numerics_callback"): return try: op_callbacks.remove_op_callback(_state.check_numerics_callback.callback) delattr(_state, "check_numerics_callback") logging.info( "Disabled check-numerics callback in thread %s", threading.current_thread().name) except KeyError: # Tolerate disabling the check numerics callback without # enable_check_numerics() being called first. pass
"""Parse (absolute and relative) URLs. urlparse module is based upon the following RFC specifications. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding and L. Masinter, January 2005. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter and L.Masinter, December 1999. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. Berners-Lee, R. Fielding, and L. Masinter, August 1998. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 1995. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. McCahill, December 1994 RFC 3986 is considered the current standard and any future changes to urlparse module should conform with it. The urlparse module is currently not entirely compliant with this RFC due to defacto scenarios for parsing, and for backward compatibility purposes, some parsing quirks from older RFCs are retained. The testcases in test_urlparse.py provides a good indicator of parsing behavior. """ import re import sys import collections __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", "urlsplit", "urlunsplit", "urlencode", "parse_qs", "parse_qsl", "quote", "quote_plus", "quote_from_bytes", "unquote", "unquote_plus", "unquote_to_bytes", "DefragResult", "ParseResult", "SplitResult", "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] # A classification of schemes ('' means apply by default) uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', 'wais', 'file', 'https', 'shttp', 'mms', 'prospero', 'rtsp', 'rtspu', '', 'sftp', 'svn', 'svn+ssh'] uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh'] uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', '', 'sftp', 'tel'] # These are not actually used anymore, but should stay for backwards # compatibility. (They are undocumented, but have a public-looking name.) non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', 'nntp', 'wais', 'https', 'shttp', 'snews', 'file', 'prospero', ''] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789' '+-.') # XXX: Consider replacing with functools.lru_cache MAX_CACHE_SIZE = 20 _parse_cache = {} def clear_cache(): """Clear the parse cache and the quoters cache.""" _parse_cache.clear() _safe_quoters.clear() # Helpers for bytes handling # For 3.2, we deliberately require applications that # handle improperly quoted URLs to do their own # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 _implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): return obj def _encode_result(obj, encoding=_implicit_encoding, errors=_implicit_errors): return obj.encode(encoding, errors) def _decode_args(args, encoding=_implicit_encoding, errors=_implicit_errors): return tuple(x.decode(encoding, errors) if x else '' for x in args) def _coerce_args(*args): # Invokes decode if necessary to create str args # and returns the coerced inputs along with # an appropriate result coercion function # - noop for str inputs # - encoding function otherwise str_input = isinstance(args[0], str) for arg in args[1:]: # We special-case the empty string to support the # "scheme=''" default argument to some functions if arg and isinstance(arg, str) != str_input: raise TypeError("Cannot mix str and non-str arguments") if str_input: return args + (_noop,) return _decode_args(args) + (_encode_result,) # Result objects are more helpful than simple tuples class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) class _NetlocResultMixinBase(object): """Shared methods for the parsed result objects containing a netloc element""" __slots__ = () @property def username(self): return self._userinfo[0] @property def password(self): return self._userinfo[1] @property def hostname(self): hostname = self._hostinfo[0] if not hostname: hostname = None elif hostname is not None: hostname = hostname.lower() return hostname @property def port(self): port = self._hostinfo[1] if port is not None: port = int(port, 10) if not ( 0 <= port <= 65535): raise ValueError("Port out of range 0-65535") return port class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition('@') if have_info: username, have_password, password = userinfo.partition(':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition('@') _, have_open_br, bracketed = hostinfo.partition('[') if have_open_br: hostname, _, port = bracketed.partition(']') _, _, port = port.partition(':') else: hostname, _, port = hostinfo.partition(':') if not port: port = None return hostname, port class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): __slots__ = () @property def _userinfo(self): netloc = self.netloc userinfo, have_info, hostinfo = netloc.rpartition(b'@') if have_info: username, have_password, password = userinfo.partition(b':') if not have_password: password = None else: username = password = None return username, password @property def _hostinfo(self): netloc = self.netloc _, _, hostinfo = netloc.rpartition(b'@') _, have_open_br, bracketed = hostinfo.partition(b'[') if have_open_br: hostname, _, port = bracketed.partition(b']') _, _, port = port.partition(b':') else: hostname, _, port = hostinfo.partition(b':') if not port: port = None return hostname, port from collections import namedtuple _DefragResultBase = namedtuple('DefragResult', 'url fragment') _SplitResultBase = namedtuple( 'SplitResult', 'scheme netloc path query fragment') _ParseResultBase = namedtuple( 'ParseResult', 'scheme netloc path params query fragment') _DefragResultBase.__doc__ = """ DefragResult(url, fragment) A 2-tuple that contains the url without fragment identifier and the fragment identifier as a separate argument. """ _DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" _DefragResultBase.fragment.__doc__ = """ Fragment identifier separated from URL, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _SplitResultBase.__doc__ = """ SplitResult(scheme, netloc, path, query, fragment) A 5-tuple that contains the different components of a URL. Similar to ParseResult, but does not split params. """ _SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" _SplitResultBase.netloc.__doc__ = """ Network location where the request is made to. """ _SplitResultBase.path.__doc__ = """ The hierarchical path, such as the path to a file to download. """ _SplitResultBase.query.__doc__ = """ The query component, that contains non-hierarchical data, that along with data in path component, identifies a resource in the scope of URI's scheme and network location. """ _SplitResultBase.fragment.__doc__ = """ Fragment identifier, that allows indirect identification of a secondary resource by reference to a primary resource and additional identifying information. """ _ParseResultBase.__doc__ = """ ParseResult(scheme, netloc, path, params, query, fragment) A 6-tuple that contains components of a parsed URL. """ _ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ _ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ _ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ _ParseResultBase.params.__doc__ = """ Parameters for last path element used to dereference the URI in order to provide access to perform some operation on the resource. """ _ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ _ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ # For backwards compatibility, alias _NetlocResultMixinStr # ResultBase is no longer part of the documented API, but it is # retained since deprecating it isn't worth the hassle ResultBase = _NetlocResultMixinStr # Structured result objects for string data class DefragResult(_DefragResultBase, _ResultMixinStr): __slots__ = () def geturl(self): if self.fragment: return self.url + '#' + self.fragment else: return self.url class SplitResult(_SplitResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResult(_ParseResultBase, _NetlocResultMixinStr): __slots__ = () def geturl(self): return urlunparse(self) # Structured result objects for bytes data class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): __slots__ = () def geturl(self): if self.fragment: return self.url + b'#' + self.fragment else: return self.url class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunsplit(self) class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): __slots__ = () def geturl(self): return urlunparse(self) # Set up the encode/decode result pairs def _fix_result_transcoding(): _result_pairs = ( (DefragResult, DefragResultBytes), (SplitResult, SplitResultBytes), (ParseResult, ParseResultBytes), ) for _decoded, _encoded in _result_pairs: _decoded._encoded_counterpart = _encoded _encoded._decoded_counterpart = _decoded _fix_result_transcoding() del _fix_result_transcoding def urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) def _splitparams(url): if '/' in url: i = url.find(';', url.rfind('/')) if i < 0: return url, '' else: i = url.find(';') return url[:i], url[i+1:] def _splitnetloc(url, start=0): delim = len(url) # position of end of domain part of url, default is end for c in '/?#': # look for delimiters; the order is NOT important wdelim = url.find(c, start) # find first of this delim if wdelim >= 0: # if found delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) allow_fragments = bool(allow_fragments) key = url, scheme, allow_fragments, type(url), type(scheme) cached = _parse_cache.get(key, None) if cached: return _coerce_result(cached) if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) for c in url[:i]: if c not in scheme_chars: break else: # make sure "url" is not actually a port number (in which case # "scheme" is really part of the path) rest = url[i+1:] if not rest or any(c not in '0123456789' for c in rest): # not a port number scheme, url = url[:i].lower(), rest if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) def urlunparse(components): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" scheme, netloc, url, params, query, fragment, _coerce_result = ( _coerce_args(*components)) if params: url = "%s;%s" % (url, params) return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) def urlunsplit(components): """Combine the elements of a tuple as returned by urlsplit() into a complete URL as a string. The data argument can be any five-item iterable. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had unnecessary delimiters (for example, a ? with an empty query; the RFC states that these are equivalent).""" scheme, netloc, url, query, fragment, _coerce_result = ( _coerce_args(*components)) if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if query: url = url + '?' + query if fragment: url = url + '#' + fragment return _coerce_result(url) def urljoin(base, url, allow_fragments=True): """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter.""" if not base: return url if not url: return base base, url, _coerce_result = _coerce_args(base, url) bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ urlparse(base, '', allow_fragments) scheme, netloc, path, params, query, fragment = \ urlparse(url, bscheme, allow_fragments) if scheme != bscheme or scheme not in uses_relative: return _coerce_result(url) if scheme in uses_netloc: if netloc: return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) netloc = bnetloc if not path and not params: path = bpath params = bparams if not query: query = bquery return _coerce_result(urlunparse((scheme, netloc, path, params, query, fragment))) base_parts = bpath.split('/') if base_parts[-1] != '': # the last item is not a directory, so will not be taken into account # in resolving the relative path del base_parts[-1] # for rfc3986, ignore all base path should the first character be root. if path[:1] == '/': segments = path.split('/') else: segments = base_parts + path.split('/') # filter out elements that would cause redundant slashes on re-joining # the resolved_path segments[1:-1] = filter(None, segments[1:-1]) resolved_path = [] for seg in segments: if seg == '..': try: resolved_path.pop() except IndexError: # ignore any .. segments that would otherwise cause an IndexError # when popped from resolved_path if resolving for rfc3986 pass elif seg == '.': continue else: resolved_path.append(seg) if segments[-1] in ('.', '..'): # do some post-processing here. if the last segment was a relative dir, # then we need to append the trailing '/' resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( resolved_path) or '/', params, query, fragment))) def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag)) _hexdig = '0123456789ABCDEFabcdef' _hextobyte = None def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if not string: # Is it a string-like object? string.split return b'' if isinstance(string, str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: return string res = [bits[0]] append = res.append # Delay the initialization of the table to not waste memory # if the function is never called global _hextobyte if _hextobyte is None: _hextobyte = {(a + b).encode(): bytes([int(a + b, 16)]) for a in _hexdig for b in _hexdig} for item in bits[1:]: try: append(_hextobyte[item[:2]]) append(item[2:]) except KeyError: append(b'%') append(item) return b''.join(res) _asciire = re.compile('([\x00-\x7f]+)') def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res) def parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. """ parsed_result = {} pairs = parse_qsl(qs, keep_blank_values, strict_parsing, encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) else: parsed_result[name] = [value] return parsed_result def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace'): """Parse a query given as a string argument. Arguments: qs: percent-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in percent-encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. encoding and errors: specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. Returns a list, as G-d intended. """ qs, _coerce_result = _coerce_args(qs) pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = nv[0].replace('+', ' ') name = unquote(name, encoding=encoding, errors=errors) name = _coerce_result(name) value = nv[1].replace('+', ' ') value = unquote(value, encoding=encoding, errors=errors) value = _coerce_result(value) r.append((name, value)) return r def unquote_plus(string, encoding='utf-8', errors='replace'): """Like unquote(), but also replace plus signs by spaces, as required for unquoting HTML form values. unquote_plus('%7e/abc+def') -> '~/abc def' """ string = string.replace('+', ' ') return unquote(string, encoding, errors) _ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' b'abcdefghijklmnopqrstuvwxyz' b'0123456789' b'_.-') _ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) _safe_quoters = {} class Quoter(collections.defaultdict): """A mapping from bytes (in range(0,256)) to strings. String values are percent-encoded byte values, unless the key < 128, and in the "safe" set (either the specified safe set, or default set). """ # Keeps a cache internally, using defaultdict, for efficiency (lookups # of cached keys don't call Python code at all). def __init__(self, safe): """safe: bytes object.""" self.safe = _ALWAYS_SAFE.union(safe) def __repr__(self): # Without this, will just display as a defaultdict return "<%s %r>" % (self.__class__.__name__, dict(self)) def __missing__(self, b): # Handle a cache miss. Store quoted string in cache and return. res = chr(b) if b in self.safe else '%{:02X}'.format(b) self[b] = res return res def quote(string, safe='/', encoding=None, errors=None): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists the following reserved characters. reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," Each of these characters is reserved in some component of a URL, but not necessarily in all of them. By default, the quote function is intended for quoting the path section of a URL. Thus, it will not encode '/'. This character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are used as reserved characters. string and safe may be either str or bytes objects. encoding and errors must not be specified if string is a bytes object. The optional encoding and errors parameters specify how to deal with non-ASCII characters, as accepted by the str.encode method. By default, encoding='utf-8' (characters are encoded with UTF-8), and errors='strict' (unsupported characters raise a UnicodeEncodeError). """ if isinstance(string, str): if not string: return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'strict' string = string.encode(encoding, errors) else: if encoding is not None: raise TypeError("quote() doesn't support 'encoding' for bytes") if errors is not None: raise TypeError("quote() doesn't support 'errors' for bytes") return quote_from_bytes(string, safe) def quote_plus(string, safe='', encoding=None, errors=None): """Like quote(), but also replace ' ' with '+', as required for quoting HTML form values. Plus signs in the original string are escaped unless they are included in safe. It also does not have safe default to '/'. """ # Check if ' ' in string, where string may either be a str or bytes. If # there are no spaces, the regular quote will produce the right answer. if ((isinstance(string, str) and ' ' not in string) or (isinstance(string, bytes) and b' ' not in string)): return quote(string, safe, encoding, errors) if isinstance(safe, str): space = ' ' else: space = b' ' string = quote(string, safe + space, encoding, errors) return string.replace(' ', '+') def quote_from_bytes(bs, safe='/'): """Like quote(), but accepts a bytes object rather than a str, and does not perform string-to-bytes encoding. It always returns an ASCII string. quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' """ if not isinstance(bs, (bytes, bytearray)): raise TypeError("quote_from_bytes() expected bytes") if not bs: return '' if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars safe = safe.encode('ascii', 'ignore') else: safe = bytes([c for c in safe if c < 128]) if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): return bs.decode() try: quoter = _safe_quoters[safe] except KeyError: _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ return ''.join([quoter(char) for char in bs]) def urlencode(query, doseq=False, safe='', encoding=None, errors=None, quote_via=quote_plus): """Encode a dict or sequence of two-element tuples into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. The components of a query arg may each be either a string or a bytes type. The safe, encoding, and errors parameters are passed down to the function specified by quote_via (encoding and errors only if a component is a str). """ if hasattr(query, "items"): query = query.items() else: # It's a bother at times that strings and string-like objects are # sequences. try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # Zero-length sequences of all types will get here and succeed, # but that's a minor nit. Since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty, va, tb = sys.exc_info() raise TypeError("not a valid non-string sequence " "or mapping object").with_traceback(tb) l = [] if not doseq: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) else: v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: for k, v in query: if isinstance(k, bytes): k = quote_via(k, safe) else: k = quote_via(str(k), safe, encoding, errors) if isinstance(v, bytes): v = quote_via(v, safe) l.append(k + '=' + v) elif isinstance(v, str): v = quote_via(v, safe, encoding, errors) l.append(k + '=' + v) else: try: # Is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_via(str(v), safe, encoding, errors) l.append(k + '=' + v) else: # loop over the sequence for elt in v: if isinstance(elt, bytes): elt = quote_via(elt, safe) else: elt = quote_via(str(elt), safe, encoding, errors) l.append(k + '=' + elt) return '&'.join(l) def to_bytes(url): """to_bytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed. # XXX get rid of to_bytes() if isinstance(url, str): try: url = url.encode("ASCII").decode() except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = str(url).strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url _typeprog = None def splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) match = _typeprog.match(url) if match: scheme, data = match.groups() return scheme.lower(), data return None, url _hostprog = None def splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: _hostprog = re.compile('//([^/?]*)(.*)', re.DOTALL) match = _hostprog.match(url) if match: host_port, path = match.groups() if path and path[0] != '/': path = '/' + path return host_port, path return None, url def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" user, delim, host = host.rpartition('@') return (user if delim else None), host def splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" user, delim, passwd = user.partition(':') return user, (passwd if delim else None) # splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: _portprog = re.compile('(.*):([0-9]*)$', re.DOTALL) match = _portprog.match(host) if match: host, port = match.groups() if port: return host, port return host, None def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" host, delim, port = host.rpartition(':') if not delim: host = port elif port: try: nport = int(port) except ValueError: nport = None return host, nport return host, defport def splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None def splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" path, delim, tag = url.rpartition('#') if delim: return path, tag return url, None def splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] def splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" attr, delim, value = attr.partition('=') return attr, (value if delim else None)
import os import sys import subprocess import tempfile from time import sleep from os.path import exists, join, abspath from shutil import rmtree, copytree from tempfile import mkdtemp import six from twisted.trial import unittest from twisted.internet import defer import scrapy from scrapy.utils.python import to_native_str from scrapy.utils.python import retry_on_eintr from scrapy.utils.test import get_testenv from scrapy.utils.testsite import SiteTest from scrapy.utils.testproc import ProcessTest class ProjectTest(unittest.TestCase): project_name = 'testproject' def setUp(self): self.temp_path = mkdtemp() self.cwd = self.temp_path self.proj_path = join(self.temp_path, self.project_name) self.proj_mod_path = join(self.proj_path, self.project_name) self.env = get_testenv() def tearDown(self): rmtree(self.temp_path) def call(self, *new_args, **kwargs): with tempfile.TemporaryFile() as out: args = (sys.executable, '-m', 'scrapy.cmdline') + new_args return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd, env=self.env, **kwargs) def proc(self, *new_args, **kwargs): args = (sys.executable, '-m', 'scrapy.cmdline') + new_args p = subprocess.Popen(args, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) waited = 0 interval = 0.2 while p.poll() is None: sleep(interval) waited += interval if waited > 15: p.kill() assert False, 'Command took too much time to complete' return p class StartprojectTest(ProjectTest): def test_startproject(self): self.assertEqual(0, self.call('startproject', self.project_name)) assert exists(join(self.proj_path, 'scrapy.cfg')) assert exists(join(self.proj_path, 'testproject')) assert exists(join(self.proj_mod_path, '__init__.py')) assert exists(join(self.proj_mod_path, 'items.py')) assert exists(join(self.proj_mod_path, 'pipelines.py')) assert exists(join(self.proj_mod_path, 'settings.py')) assert exists(join(self.proj_mod_path, 'spiders', '__init__.py')) self.assertEqual(1, self.call('startproject', self.project_name)) self.assertEqual(1, self.call('startproject', 'wrong---project---name')) self.assertEqual(1, self.call('startproject', 'sys')) class StartprojectTemplatesTest(ProjectTest): def setUp(self): super(StartprojectTemplatesTest, self).setUp() self.tmpl = join(self.temp_path, 'templates') self.tmpl_proj = join(self.tmpl, 'project') def test_startproject_template_override(self): copytree(join(scrapy.__path__[0], 'templates'), self.tmpl) with open(join(self.tmpl_proj, 'root_template'), 'w'): pass assert exists(join(self.tmpl_proj, 'root_template')) args = ['--set', 'TEMPLATES_DIR=%s' % self.tmpl] p = self.proc('startproject', self.project_name, *args) out = to_native_str(retry_on_eintr(p.stdout.read)) self.assertIn("New Scrapy project %r, using template directory" % self.project_name, out) self.assertIn(self.tmpl_proj, out) assert exists(join(self.proj_path, 'root_template')) class CommandTest(ProjectTest): def setUp(self): super(CommandTest, self).setUp() self.call('startproject', self.project_name) self.cwd = join(self.temp_path, self.project_name) self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name class GenspiderCommandTest(CommandTest): def test_arguments(self): # only pass one argument. spider script shouldn't be created self.assertEqual(2, self.call('genspider', 'test_name')) assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py')) # pass two arguments <name> <domain>. spider script should be created self.assertEqual(0, self.call('genspider', 'test_name', 'test.com')) assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py')) def test_template(self, tplname='crawl'): args = ['--template=%s' % tplname] if tplname else [] spname = 'test_spider' p = self.proc('genspider', spname, 'test.com', *args) out = to_native_str(retry_on_eintr(p.stdout.read)) self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out) self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py'))) p = self.proc('genspider', spname, 'test.com', *args) out = to_native_str(retry_on_eintr(p.stdout.read)) self.assertIn("Spider %r already exists in module" % spname, out) def test_template_basic(self): self.test_template('basic') def test_template_csvfeed(self): self.test_template('csvfeed') def test_template_xmlfeed(self): self.test_template('xmlfeed') def test_list(self): self.assertEqual(0, self.call('genspider', '--list')) def test_dump(self): self.assertEqual(0, self.call('genspider', '--dump=basic')) self.assertEqual(0, self.call('genspider', '-d', 'basic')) def test_same_name_as_project(self): self.assertEqual(2, self.call('genspider', self.project_name)) assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name)) class MiscCommandsTest(CommandTest): def test_list(self): self.assertEqual(0, self.call('list')) class RunSpiderCommandTest(CommandTest): def test_runspider(self): tmpdir = self.mktemp() os.mkdir(tmpdir) fname = abspath(join(tmpdir, 'myspider.py')) with open(fname, 'w') as f: f.write(""" import scrapy class MySpider(scrapy.Spider): name = 'myspider' def start_requests(self): self.logger.debug("It Works!") return [] """) p = self.proc('runspider', fname) log = to_native_str(p.stderr.read()) self.assertIn("DEBUG: It Works!", log) self.assertIn("INFO: Spider opened", log) self.assertIn("INFO: Closing spider (finished)", log) self.assertIn("INFO: Spider closed (finished)", log) def test_runspider_no_spider_found(self): tmpdir = self.mktemp() os.mkdir(tmpdir) fname = abspath(join(tmpdir, 'myspider.py')) with open(fname, 'w') as f: f.write(""" from scrapy.spiders import Spider """) p = self.proc('runspider', fname) log = to_native_str(p.stderr.read()) self.assertIn("No spider found in file", log) def test_runspider_file_not_found(self): p = self.proc('runspider', 'some_non_existent_file') log = to_native_str(p.stderr.read()) self.assertIn("File not found: some_non_existent_file", log) def test_runspider_unable_to_load(self): tmpdir = self.mktemp() os.mkdir(tmpdir) fname = abspath(join(tmpdir, 'myspider.txt')) with open(fname, 'w') as f: f.write("") p = self.proc('runspider', fname) log = to_native_str(p.stderr.read()) self.assertIn("Unable to load", log) class ParseCommandTest(ProcessTest, SiteTest, CommandTest): skip = not six.PY2 command = 'parse' def setUp(self): super(ParseCommandTest, self).setUp() self.spider_name = 'parse_spider' fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py')) with open(fname, 'w') as f: f.write(""" import scrapy class MySpider(scrapy.Spider): name = '{0}' def parse(self, response): if getattr(self, 'test_arg', None): self.logger.debug('It Works!') return [scrapy.Item(), dict(foo='bar')] """.format(self.spider_name)) fname = abspath(join(self.proj_mod_path, 'pipelines.py')) with open(fname, 'w') as f: f.write(""" import logging class MyPipeline(object): component_name = 'my_pipeline' def process_item(self, item, spider): logging.info('It Works!') return item """) fname = abspath(join(self.proj_mod_path, 'settings.py')) with open(fname, 'a') as f: f.write(""" ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1} """ % self.project_name) @defer.inlineCallbacks def test_spider_arguments(self): _, _, stderr = yield self.execute(['--spider', self.spider_name, '-a', 'test_arg=1', '-c', 'parse', self.url('/html')]) self.assertIn("DEBUG: It Works!", to_native_str(stderr)) @defer.inlineCallbacks def test_pipelines(self): _, _, stderr = yield self.execute(['--spider', self.spider_name, '--pipelines', '-c', 'parse', self.url('/html')]) self.assertIn("INFO: It Works!", to_native_str(stderr)) @defer.inlineCallbacks def test_parse_items(self): status, out, stderr = yield self.execute( ['--spider', self.spider_name, '-c', 'parse', self.url('/html')] ) self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out)) class BenchCommandTest(CommandTest): def test_run(self): p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001', '-s', 'CLOSESPIDER_TIMEOUT=0.01') log = to_native_str(p.stderr.read()) self.assertIn('INFO: Crawled', log) self.assertNotIn('Unhandled Error', log)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ctc_ops.ctc_loss_op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from six.moves import zip_longest from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import ctc_ops from tensorflow.python.platform import test def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks.""" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def flatten(list_of_lists): """Flatten one level of nesting.""" return itertools.chain.from_iterable(list_of_lists) class CTCGreedyDecoderTest(test.TestCase): def _testCTCDecoder(self, decoder, inputs, seq_lens, log_prob_truth, decode_truth, expected_err_re=None, **decoder_args): inputs_t = [ops.convert_to_tensor(x) for x in inputs] # convert inputs_t into a [max_time x batch_size x depth] tensor # from a len time python list of [batch_size x depth] tensors inputs_t = array_ops.stack(inputs_t) with self.cached_session(use_gpu=False) as sess: decoded_list, log_probability = decoder( inputs_t, sequence_length=seq_lens, **decoder_args) decoded_unwrapped = list( flatten([(st.indices, st.values, st.dense_shape) for st in decoded_list])) if expected_err_re is None: outputs = sess.run(decoded_unwrapped + [log_probability]) # Group outputs into (ix, vals, shape) tuples output_sparse_tensors = list(grouper(outputs[:-1], 3)) output_log_probability = outputs[-1] # Check the number of decoded outputs (top_paths) match self.assertEqual(len(output_sparse_tensors), len(decode_truth)) # For each SparseTensor tuple, compare (ix, vals, shape) for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth, decoded_list): self.assertAllEqual(out_st[0], truth_st[0]) # ix self.assertAllEqual(out_st[1], truth_st[1]) # vals self.assertAllEqual(out_st[2], truth_st[2]) # shape # Compare the shapes of the components with the truth. The # `None` elements are not known statically. self.assertEqual([None, truth_st[0].shape[1]], tf_st.indices.get_shape().as_list()) self.assertEqual([None], tf_st.values.get_shape().as_list()) self.assertShapeEqual(truth_st[2], tf_st.dense_shape) # Make sure decoded probabilities match self.assertAllClose(output_log_probability, log_prob_truth, atol=1e-6) else: with self.assertRaisesOpError(expected_err_re): sess.run(decoded_unwrapped + [log_probability]) @test_util.run_deprecated_v1 def testCTCGreedyDecoder(self): """Test two batch entries - best path decoder.""" max_time_steps = 6 # depth == 4 seq_len_0 = 4 input_prob_matrix_0 = np.asarray( [[1.0, 0.0, 0.0, 0.0], # t=0 [0.0, 0.0, 0.4, 0.6], # t=1 [0.0, 0.0, 0.4, 0.6], # t=2 [0.0, 0.9, 0.1, 0.0], # t=3 [0.0, 0.0, 0.0, 0.0], # t=4 (ignored) [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored) dtype=np.float32) input_log_prob_matrix_0 = np.log(input_prob_matrix_0) seq_len_1 = 5 # dimensions are time x depth input_prob_matrix_1 = np.asarray( [ [0.1, 0.9, 0.0, 0.0], # t=0 [0.0, 0.9, 0.1, 0.0], # t=1 [0.0, 0.0, 0.1, 0.9], # t=2 [0.0, 0.9, 0.1, 0.1], # t=3 [0.9, 0.1, 0.0, 0.0], # t=4 [0.0, 0.0, 0.0, 0.0] ], # t=5 (ignored) dtype=np.float32) input_log_prob_matrix_1 = np.log(input_prob_matrix_1) # len max_time_steps array of batch_size x depth matrices inputs = [ np.vstack( [input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]]) for t in range(max_time_steps) ] # batch_size length vector of sequence_lengths seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32) # batch_size length vector of negative log probabilities log_prob_truth = np.array([ np.sum(-np.log([1.0, 0.6, 0.6, 0.9])), np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9])) ], np.float32)[:, np.newaxis] # decode_truth: one SparseTensor (ix, vals, shape) decode_truth = [ ( np.array( [ [0, 0], # batch 0, 2 outputs [0, 1], [1, 0], # batch 1, 3 outputs [1, 1], [1, 2] ], dtype=np.int64), np.array( [ 0, 1, # batch 0 1, 1, 0 ], # batch 1 dtype=np.int64), # shape is batch x max_decoded_length np.array( [2, 3], dtype=np.int64)), ] self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens, log_prob_truth, decode_truth) @test_util.run_deprecated_v1 def testCTCDecoderBeamSearch(self): """Test one batch, two beams - hibernating beam search.""" # max_time_steps == 8 depth = 6 seq_len_0 = 5 input_prob_matrix_0 = np.asarray( [ [0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908], [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517], [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763], [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655], [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878], # Random entry added in at time=5 [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671] ], dtype=np.float32) # Add arbitrary offset - this is fine input_prob_matrix_0 = input_prob_matrix_0 + 2.0 # len max_time_steps array of batch_size x depth matrices inputs = ([ input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0) ] # Pad to max_time_steps = 8 + 2 * [np.zeros( (1, depth), dtype=np.float32)]) # batch_size length vector of sequence_lengths seq_lens = np.array([seq_len_0], dtype=np.int32) # batch_size length vector of log probabilities log_prob_truth = np.array( [ -5.811451, # output beam 0 -6.63339 # output beam 1 ], np.float32)[np.newaxis, :] # decode_truth: two SparseTensors, (ix, values, shape) decode_truth = [ # beam 0, batch 0, two outputs decoded (np.array( [[0, 0], [0, 1]], dtype=np.int64), np.array( [1, 0], dtype=np.int64), np.array( [1, 2], dtype=np.int64)), # beam 1, batch 0, one output decoded (np.array( [[0, 0]], dtype=np.int64), np.array( [1], dtype=np.int64), np.array( [1, 1], dtype=np.int64)), ] # Test correct decoding. self._testCTCDecoder( ctc_ops.ctc_beam_search_decoder, inputs, seq_lens, log_prob_truth, decode_truth, beam_width=2, top_paths=2) # Requesting more paths than the beam width allows. with self.assertRaisesRegex(errors.InvalidArgumentError, (".*requested more paths than the beam " "width.*")): self._testCTCDecoder( ctc_ops.ctc_beam_search_decoder, inputs, seq_lens, log_prob_truth, decode_truth, beam_width=2, top_paths=3) if __name__ == "__main__": test.main()
import os import astropy.io.fits as pyfits import numpy as np from scipy.interpolate import RegularGridInterpolator, interp1d def gaussian_fwhm(sigma): return 2. * np.sqrt(2. * np.log(2.)) * sigma class FastFiberAcceptance(object): """ This class reads an input fits file generated with specsim.fitgalsim ($DESIMODEL/data/throughput/galsim-fiber-acceptance.fits) and instanciates RegularGridInterpolator objects for 2D and 3D interpolation of the pre-computed galsim fiber acceptance as a function of sigma (atmosphere+telescope blur, in um on focal surface), fiber offset from source (in um on focal surface), and half light radius (in arcsec) from extended source. The average and rms interpolation function for POINT,DISK and BULGE profiles are loaded. """ def __init__(self,filename=None): if filename is None : if not "DESIMODEL" in os.environ : print("need environment variable DESIMODEL or specify filename in constructor") raise RuntimeError("need environment variable DESIMODEL or specify filename in constructor") filename=os.path.join(os.environ["DESIMODEL"],"data/throughput/galsim-fiber-acceptance.fits") hdulist=pyfits.open(filename) sigma=hdulist["SIGMA"].data offset=hdulist["OFFSET"].data hlradius=hdulist["HLRAD"].data self._sigma=sigma self._offset=offset self._hlradius=hlradius self._data = {} self.fiber_acceptance_func = {} self.fiber_acceptance_rms_func = {} self.psf_seeing_func = {} for source in ["POINT","DISK","BULGE"] : data=hdulist[source].data rms=hdulist[source[0]+"RMS"].data dim=len(data.shape) self._data[source] = data if dim == 2 : assert source == 'POINT' # POINT: zero offset. self.psf_seeing_func[source] = interp1d(data[::-1,0], sigma[::-1], kind='linear', copy=True, bounds_error=False, assume_sorted=False, fill_value=(sigma[-1],sigma[0])) self.fiber_acceptance_func[source] = RegularGridInterpolator(points=(sigma,offset),values=data,method="linear",bounds_error=False,fill_value=None) self.fiber_acceptance_rms_func[source] = RegularGridInterpolator(points=(sigma,offset),values=rms,method="linear",bounds_error=False,fill_value=None) elif dim == 3 : # Not POINT self.fiber_acceptance_func[source] = RegularGridInterpolator(points=(hlradius,sigma,offset),values=data,method="linear",bounds_error=False,fill_value=None) self.fiber_acceptance_rms_func[source] = RegularGridInterpolator(points=(hlradius,sigma,offset),values=rms,method="linear",bounds_error=False,fill_value=None) hdulist.close() def psf_seeing_sigma(self, psf_fiberfrac): return self.psf_seeing_func["POINT"](psf_fiberfrac) def psf_seeing_fwhm(self, psf_fiberfrac): sigma = self.psf_seeing_func["POINT"](psf_fiberfrac) return gaussian_fwhm(sigma) def rms(self,source,sigmas,offsets=None,hlradii=None) : """ returns fiber acceptance fraction rms for the given source,sigmas,offsets Args: source (string) : POINT, DISK or BULGE for point source, exponential profile or De Vaucouleurs profile sigmas (np.array) : arbitrary shape, values of sigmas in um for the PSF due to atmosphere and telescope blur Optional: hlradii (np.array) : same shape as sigmas, half light radius in arcsec for source offsets (np.array) : same shape as sigmas, values of offsets on focal surface between fiber and source, in um Returns np.array with same shape as input """ was_scalar = np.isscalar(sigmas) sigmas = np.atleast_1d(sigmas) original_shape = sigmas.shape if offsets is None : offsets=np.zeros(sigmas.shape) else : offsets=np.atleast_1d(offsets) assert(sigmas.shape==offsets.shape) if hlradii is not None : hlradii=np.atleast_1d(hlradii) assert(hlradii.shape==sigmas.shape) res = None if source == "POINT" : res = self.fiber_acceptance_rms_func[source](np.array([sigmas.ravel(),offsets.ravel()]).T) else : if hlradii is None : if source == "DISK" : hlradii = 0.45 * np.ones(sigmas.shape) elif source == "BULGE" : hlradii = 1. * np.ones(sigmas.shape) res = self.fiber_acceptance_rms_func[source](np.array([hlradii.ravel(),sigmas.ravel(),offsets.ravel()]).T) res[res<0] = 0. res[res>1] = 1. if was_scalar : return float(res[0]) return res.reshape(original_shape) def value(self,source,sigmas,offsets=None,hlradii=None) : """ returns the fiber acceptance for the given source,sigmas,offsets Args: source (string) : POINT, DISK or BULGE for point source, exponential profile or De Vaucouleurs profile sigmas (np.array) : arbitrary shape, values of sigmas in um for the PSF due to atmosphere and telescope blur offsets (np.array) : same shape as sigmas, values of offsets on focal surface between fiber and source, in um Optional: hlradii (np.array) : same shape as sigmas, half light radius in arcsec for source Returns np.array with same shape as input """ was_scalar = np.isscalar(sigmas) sigmas = np.atleast_1d(sigmas) original_shape = sigmas.shape if offsets is None : offsets=np.zeros(sigmas.shape) else : offsets=np.atleast_1d(offsets) assert(sigmas.shape==offsets.shape) if hlradii is not None : hlradii=np.atleast_1d(hlradii) assert(hlradii.shape==sigmas.shape) res = None if source == "POINT" : res = self.fiber_acceptance_func[source](np.array([sigmas.ravel(),offsets.ravel()]).T) else : if hlradii is None : if source == "DISK" : hlradii = 0.45 * np.ones(sigmas.shape) elif source == "BULGE" : hlradii = 1. * np.ones(sigmas.shape) res = self.fiber_acceptance_func[source](np.array([hlradii.ravel(),sigmas.ravel(),offsets.ravel()]).T) res[res<0] = 0. res[res>1] = 1. if was_scalar : return float(res[0]) return res.reshape(original_shape) if __name__ == '__main__': import numpy as np import pylab as pl from fastfiberacceptance import FastFiberAcceptance x = FastFiberAcceptance() fiberfracs= np.arange(0.0,1.0, 0.01) seeings= x.psf_seeing_sigma(fiberfracs) avg_platescale = 1.52 / 107. # [''/microns]. seeings *= avg_platescale print(x._sigma[::-1]) print(x._sigma[::-1] * avg_platescale) print(x._data['POINT'][::-1,0]) pl.figure() pl.subplot(121) pl.plot(fiberfracs, seeings) pl.plot(x._data['POINT'][::-1,0], x._sigma[::-1] * avg_platescale, marker='^', alpha=0.5) pl.xlabel('PSF FIBERFRAC') pl.ylabel('SEEING SIGMA [ARCSECONDS]') pl.subplot(122) fwhms= x.psf_seeing_fwhm(fiberfracs) fwhms *= avg_platescale pl.plot(fiberfracs, fwhms) pl.axhline(1.1, c='k', lw=0.5) pl.axvline(0.6, c='k', lw=0.5) pl.xlabel('PSF FIBERFRAC') pl.ylabel('SEEING FWHM [ARCSECONDS]') pl.show()
import pytest from jinja2 import DictLoader from jinja2 import Environment from jinja2 import PrefixLoader from jinja2 import Template from jinja2 import TemplateAssertionError from jinja2 import TemplateNotFound from jinja2 import TemplateSyntaxError from jinja2.utils import pass_context class TestCorner: def test_assigned_scoping(self, env): t = env.from_string( """ {%- for item in (1, 2, 3, 4) -%} [{{ item }}] {%- endfor %} {{- item -}} """ ) assert t.render(item=42) == "[1][2][3][4]42" t = env.from_string( """ {%- for item in (1, 2, 3, 4) -%} [{{ item }}] {%- endfor %} {%- set item = 42 %} {{- item -}} """ ) assert t.render() == "[1][2][3][4]42" t = env.from_string( """ {%- set item = 42 %} {%- for item in (1, 2, 3, 4) -%} [{{ item }}] {%- endfor %} {{- item -}} """ ) assert t.render() == "[1][2][3][4]42" def test_closure_scoping(self, env): t = env.from_string( """ {%- set wrapper = "<FOO>" %} {%- for item in (1, 2, 3, 4) %} {%- macro wrapper() %}[{{ item }}]{% endmacro %} {{- wrapper() }} {%- endfor %} {{- wrapper -}} """ ) assert t.render() == "[1][2][3][4]<FOO>" t = env.from_string( """ {%- for item in (1, 2, 3, 4) %} {%- macro wrapper() %}[{{ item }}]{% endmacro %} {{- wrapper() }} {%- endfor %} {%- set wrapper = "<FOO>" %} {{- wrapper -}} """ ) assert t.render() == "[1][2][3][4]<FOO>" t = env.from_string( """ {%- for item in (1, 2, 3, 4) %} {%- macro wrapper() %}[{{ item }}]{% endmacro %} {{- wrapper() }} {%- endfor %} {{- wrapper -}} """ ) assert t.render(wrapper=23) == "[1][2][3][4]23" class TestBug: def test_keyword_folding(self, env): env = Environment() env.filters["testing"] = lambda value, some: value + some assert ( env.from_string("{{ 'test'|testing(some='stuff') }}").render() == "teststuff" ) def test_extends_output_bugs(self, env): env = Environment( loader=DictLoader({"parent.html": "(({% block title %}{% endblock %}))"}) ) t = env.from_string( '{% if expr %}{% extends "parent.html" %}{% endif %}' "[[{% block title %}title{% endblock %}]]" "{% for item in [1, 2, 3] %}({{ item }}){% endfor %}" ) assert t.render(expr=False) == "[[title]](1)(2)(3)" assert t.render(expr=True) == "((title))" def test_urlize_filter_escaping(self, env): tmpl = env.from_string('{{ "http://www.example.org/<foo"|urlize }}') assert ( tmpl.render() == '<a href="http://www.example.org/&lt;foo" rel="noopener">' "http://www.example.org/&lt;foo</a>" ) def test_urlize_filter_closing_punctuation(self, env): tmpl = env.from_string( '{{ "(see http://www.example.org/?page=subj_<desc.h>)"|urlize }}' ) assert tmpl.render() == ( '(see <a href="http://www.example.org/?page=subj_&lt;desc.h&gt;" ' 'rel="noopener">http://www.example.org/?page=subj_&lt;desc.h&gt;</a>)' ) def test_loop_call_loop(self, env): tmpl = env.from_string( """ {% macro test() %} {{ caller() }} {% endmacro %} {% for num1 in range(5) %} {% call test() %} {% for num2 in range(10) %} {{ loop.index }} {% endfor %} {% endcall %} {% endfor %} """ ) assert tmpl.render().split() == [str(x) for x in range(1, 11)] * 5 def test_weird_inline_comment(self, env): env = Environment(line_statement_prefix="%") pytest.raises( TemplateSyntaxError, env.from_string, "% for item in seq {# missing #}\n...% endfor", ) def test_old_macro_loop_scoping_bug(self, env): tmpl = env.from_string( "{% for i in (1, 2) %}{{ i }}{% endfor %}" "{% macro i() %}3{% endmacro %}{{ i() }}" ) assert tmpl.render() == "123" def test_partial_conditional_assignments(self, env): tmpl = env.from_string("{% if b %}{% set a = 42 %}{% endif %}{{ a }}") assert tmpl.render(a=23) == "23" assert tmpl.render(b=True) == "42" def test_stacked_locals_scoping_bug(self, env): env = Environment(line_statement_prefix="#") t = env.from_string( """\ # for j in [1, 2]: # set x = 1 # for i in [1, 2]: # print x # if i % 2 == 0: # set x = x + 1 # endif # endfor # endfor # if a # print 'A' # elif b # print 'B' # elif c == d # print 'C' # else # print 'D' # endif """ ) assert t.render(a=0, b=False, c=42, d=42.0) == "1111C" def test_stacked_locals_scoping_bug_twoframe(self, env): t = Template( """ {% set x = 1 %} {% for item in foo %} {% if item == 1 %} {% set x = 2 %} {% endif %} {% endfor %} {{ x }} """ ) rv = t.render(foo=[1]).strip() assert rv == "1" def test_call_with_args(self, env): t = Template( """{% macro dump_users(users) -%} <ul> {%- for user in users -%} <li><p>{{ user.username|e }}</p>{{ caller(user) }}</li> {%- endfor -%} </ul> {%- endmacro -%} {% call(user) dump_users(list_of_user) -%} <dl> <dl>Realname</dl> <dd>{{ user.realname|e }}</dd> <dl>Description</dl> <dd>{{ user.description }}</dd> </dl> {% endcall %}""" ) assert [ x.strip() for x in t.render( list_of_user=[ { "username": "apo", "realname": "something else", "description": "test", } ] ).splitlines() ] == [ "<ul><li><p>apo</p><dl>", "<dl>Realname</dl>", "<dd>something else</dd>", "<dl>Description</dl>", "<dd>test</dd>", "</dl>", "</li></ul>", ] def test_empty_if_condition_fails(self, env): pytest.raises(TemplateSyntaxError, Template, "{% if %}....{% endif %}") pytest.raises( TemplateSyntaxError, Template, "{% if foo %}...{% elif %}...{% endif %}" ) pytest.raises(TemplateSyntaxError, Template, "{% for x in %}..{% endfor %}") def test_recursive_loop_compile(self, env): Template( """ {% for p in foo recursive%} {{p.bar}} {% for f in p.fields recursive%} {{f.baz}} {{p.bar}} {% if f.rec %} {{ loop(f.sub) }} {% endif %} {% endfor %} {% endfor %} """ ) Template( """ {% for p in foo%} {{p.bar}} {% for f in p.fields recursive%} {{f.baz}} {{p.bar}} {% if f.rec %} {{ loop(f.sub) }} {% endif %} {% endfor %} {% endfor %} """ ) def test_else_loop_bug(self, env): t = Template( """ {% for x in y %} {{ loop.index0 }} {% else %} {% for i in range(3) %}{{ i }}{% endfor %} {% endfor %} """ ) assert t.render(y=[]).strip() == "012" def test_correct_prefix_loader_name(self, env): env = Environment(loader=PrefixLoader({"foo": DictLoader({})})) with pytest.raises(TemplateNotFound) as e: env.get_template("foo/bar.html") assert e.value.name == "foo/bar.html" def test_pass_context_callable_class(self, env): class CallableClass: @pass_context def __call__(self, ctx): return ctx.resolve("hello") tpl = Template("""{{ callableclass() }}""") output = tpl.render(callableclass=CallableClass(), hello="TEST") expected = "TEST" assert output == expected def test_block_set_with_extends(self): env = Environment( loader=DictLoader({"main": "{% block body %}[{{ x }}]{% endblock %}"}) ) t = env.from_string('{% extends "main" %}{% set x %}42{% endset %}') assert t.render() == "[42]" def test_nested_for_else(self, env): tmpl = env.from_string( "{% for x in y %}{{ loop.index0 }}{% else %}" "{% for i in range(3) %}{{ i }}{% endfor %}" "{% endfor %}" ) assert tmpl.render() == "012" def test_macro_var_bug(self, env): tmpl = env.from_string( """ {% set i = 1 %} {% macro test() %} {% for i in range(0, 10) %}{{ i }}{% endfor %} {% endmacro %}{{ test() }} """ ) assert tmpl.render().strip() == "0123456789" def test_macro_var_bug_advanced(self, env): tmpl = env.from_string( """ {% macro outer() %} {% set i = 1 %} {% macro test() %} {% for i in range(0, 10) %}{{ i }}{% endfor %} {% endmacro %}{{ test() }} {% endmacro %}{{ outer() }} """ ) assert tmpl.render().strip() == "0123456789" def test_callable_defaults(self): env = Environment() env.globals["get_int"] = lambda: 42 t = env.from_string( """ {% macro test(a, b, c=get_int()) -%} {{ a + b + c }} {%- endmacro %} {{ test(1, 2) }}|{{ test(1, 2, 3) }} """ ) assert t.render().strip() == "45|6" def test_macro_escaping(self): env = Environment(autoescape=lambda x: False) template = "{% macro m() %}<html>{% endmacro %}" template += "{% autoescape true %}{{ m() }}{% endautoescape %}" assert env.from_string(template).render() def test_macro_scoping(self, env): tmpl = env.from_string( """ {% set n=[1,2,3,4,5] %} {% for n in [[1,2,3], [3,4,5], [5,6,7]] %} {% macro x(l) %} {{ l.pop() }} {% if l %}{{ x(l) }}{% endif %} {% endmacro %} {{ x(n) }} {% endfor %} """ ) assert list(map(int, tmpl.render().split())) == [3, 2, 1, 5, 4, 3, 7, 6, 5] def test_scopes_and_blocks(self): env = Environment( loader=DictLoader( { "a.html": """ {%- set foo = 'bar' -%} {% include 'x.html' -%} """, "b.html": """ {%- set foo = 'bar' -%} {% block test %}{% include 'x.html' %}{% endblock -%} """, "c.html": """ {%- set foo = 'bar' -%} {% block test %}{% set foo = foo %}{% include 'x.html' %}{% endblock -%} """, "x.html": """{{ foo }}|{{ test }}""", } ) ) a = env.get_template("a.html") b = env.get_template("b.html") c = env.get_template("c.html") assert a.render(test="x").strip() == "bar|x" assert b.render(test="x").strip() == "bar|x" assert c.render(test="x").strip() == "bar|x" def test_scopes_and_include(self): env = Environment( loader=DictLoader( { "include.html": "{{ var }}", "base.html": '{% include "include.html" %}', "child.html": '{% extends "base.html" %}{% set var = 42 %}', } ) ) t = env.get_template("child.html") assert t.render() == "42" def test_caller_scoping(self, env): t = env.from_string( """ {% macro detail(icon, value) -%} {% if value -%} <p><span class="fa fa-fw fa-{{ icon }}"></span> {%- if caller is undefined -%} {{ value }} {%- else -%} {{ caller(value, *varargs) }} {%- endif -%}</p> {%- endif %} {%- endmacro %} {% macro link_detail(icon, value, href) -%} {% call(value, href) detail(icon, value, href) -%} <a href="{{ href }}">{{ value }}</a> {%- endcall %} {%- endmacro %} """ ) assert t.module.link_detail("circle", "Index", "/") == ( '<p><span class="fa fa-fw fa-circle"></span><a href="/">Index</a></p>' ) def test_variable_reuse(self, env): t = env.from_string("{% for x in x.y %}{{ x }}{% endfor %}") assert t.render(x={"y": [0, 1, 2]}) == "012" t = env.from_string("{% for x in x.y %}{{ loop.index0 }}|{{ x }}{% endfor %}") assert t.render(x={"y": [0, 1, 2]}) == "0|01|12|2" t = env.from_string("{% for x in x.y recursive %}{{ x }}{% endfor %}") assert t.render(x={"y": [0, 1, 2]}) == "012" def test_double_caller(self, env): t = env.from_string( "{% macro x(caller=none) %}[{% if caller %}" "{{ caller() }}{% endif %}]{% endmacro %}" "{{ x() }}{% call x() %}aha!{% endcall %}" ) assert t.render() == "[][aha!]" def test_double_caller_no_default(self, env): with pytest.raises(TemplateAssertionError) as exc_info: env.from_string( "{% macro x(caller) %}[{% if caller %}" "{{ caller() }}{% endif %}]{% endmacro %}" ) assert exc_info.match( r'"caller" argument must be omitted or ' r"be given a default" ) t = env.from_string( "{% macro x(caller=none) %}[{% if caller %}" "{{ caller() }}{% endif %}]{% endmacro %}" ) with pytest.raises(TypeError) as exc_info: t.module.x(None, caller=lambda: 42) assert exc_info.match( r"\'x\' was invoked with two values for the " r"special caller argument" ) def test_macro_blocks(self, env): t = env.from_string( "{% macro x() %}{% block foo %}x{% endblock %}{% endmacro %}{{ x() }}" ) assert t.render() == "x" def test_scoped_block(self, env): t = env.from_string( "{% set x = 1 %}{% with x = 2 %}{% block y scoped %}" "{{ x }}{% endblock %}{% endwith %}" ) assert t.render() == "2" def test_recursive_loop_filter(self, env): t = env.from_string( """ <?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> {%- for page in [site.root] if page.url != this recursive %} <url><loc>{{ page.url }}</loc></url> {{- loop(page.children) }} {%- endfor %} </urlset> """ ) sm = t.render( this="/foo", site={"root": {"url": "/", "children": [{"url": "/foo"}, {"url": "/bar"}]}}, ) lines = [x.strip() for x in sm.splitlines() if x.strip()] assert lines == [ '<?xml version="1.0" encoding="UTF-8"?>', '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">', "<url><loc>/</loc></url>", "<url><loc>/bar</loc></url>", "</urlset>", ] def test_empty_if(self, env): t = env.from_string("{% if foo %}{% else %}42{% endif %}") assert t.render(foo=False) == "42" def test_subproperty_if(self, env): t = env.from_string( "{% if object1.subproperty1 is eq object2.subproperty2 %}42{% endif %}" ) assert ( t.render( object1={"subproperty1": "value"}, object2={"subproperty2": "value"} ) == "42" ) def test_set_and_include(self): env = Environment( loader=DictLoader( { "inc": "bar", "main": '{% set foo = "foo" %}{{ foo }}{% include "inc" %}', } ) ) assert env.get_template("main").render() == "foobar" def test_loop_include(self): env = Environment( loader=DictLoader( { "inc": "{{ i }}", "main": '{% for i in [1, 2, 3] %}{% include "inc" %}{% endfor %}', } ) ) assert env.get_template("main").render() == "123" def test_grouper_repr(self): from jinja2.filters import _GroupTuple t = _GroupTuple("foo", [1, 2]) assert t.grouper == "foo" assert t.list == [1, 2] assert repr(t) == "('foo', [1, 2])" assert str(t) == "('foo', [1, 2])" def test_custom_context(self, env): from jinja2.runtime import Context class MyContext(Context): pass class MyEnvironment(Environment): context_class = MyContext loader = DictLoader({"base": "{{ foobar }}", "test": '{% extends "base" %}'}) env = MyEnvironment(loader=loader) assert env.get_template("test").render(foobar="test") == "test" def test_legacy_custom_context(self, env): from jinja2.runtime import Context, missing with pytest.deprecated_call(): class MyContext(Context): def resolve(self, name): if name == "foo": return 42 return super().resolve(name) x = MyContext(env, parent={"bar": 23}, name="foo", blocks={}) assert x._legacy_resolve_mode assert x.resolve_or_missing("foo") == 42 assert x.resolve_or_missing("bar") == 23 assert x.resolve_or_missing("baz") is missing def test_recursive_loop_bug(self, env): tmpl = env.from_string( "{%- for value in values recursive %}1{% else %}0{% endfor -%}" ) assert tmpl.render(values=[]) == "0" def test_markup_and_chainable_undefined(self): from markupsafe import Markup from jinja2.runtime import ChainableUndefined assert str(Markup(ChainableUndefined())) == "" def test_scoped_block_loop_vars(self, env): tmpl = env.from_string( """\ Start {% for i in ["foo", "bar"] -%} {% block body scoped -%} {{ loop.index }}) {{ i }}{% if loop.last %} last{% endif -%} {%- endblock %} {% endfor -%} End""" ) assert tmpl.render() == "Start\n1) foo\n2) bar last\nEnd" def test_pass_context_loop_vars(self, env): @pass_context def test(ctx): return f"{ctx['i']}{ctx['j']}" tmpl = env.from_string( """\ {% set i = 42 %} {%- for idx in range(2) -%} {{ i }}{{ j }} {% set i = idx -%} {%- set j = loop.index -%} {{ test() }} {{ i }}{{ j }} {% endfor -%} {{ i }}{{ j }}""" ) tmpl.globals["test"] = test assert tmpl.render() == "42\n01\n01\n42\n12\n12\n42" def test_pass_context_scoped_loop_vars(self, env): @pass_context def test(ctx): return f"{ctx['i']}" tmpl = env.from_string( """\ {% set i = 42 %} {%- for idx in range(2) -%} {{ i }} {%- set i = loop.index0 -%} {% block body scoped %} {{ test() }} {% endblock -%} {% endfor -%} {{ i }}""" ) tmpl.globals["test"] = test assert tmpl.render() == "42\n0\n42\n1\n42" def test_pass_context_in_blocks(self, env): @pass_context def test(ctx): return f"{ctx['i']}" tmpl = env.from_string( """\ {%- set i = 42 -%} {{ i }} {% block body -%} {% set i = 24 -%} {{ test() }} {% endblock -%} {{ i }}""" ) tmpl.globals["test"] = test assert tmpl.render() == "42\n24\n42" def test_pass_context_block_and_loop(self, env): @pass_context def test(ctx): return f"{ctx['i']}" tmpl = env.from_string( """\ {%- set i = 42 -%} {% for idx in range(2) -%} {{ test() }} {%- set i = idx -%} {% block body scoped %} {{ test() }} {% set i = 24 -%} {{ test() }} {% endblock -%} {{ test() }} {% endfor -%} {{ test() }}""" ) tmpl.globals["test"] = test # values set within a block or loop should not # show up outside of it assert tmpl.render() == "42\n0\n24\n0\n42\n1\n24\n1\n42" @pytest.mark.parametrize("op", ["extends", "include"]) def test_cached_extends(self, op): env = Environment( loader=DictLoader( {"base": "{{ x }} {{ y }}", "main": f"{{% {op} 'base' %}}"} ) ) env.globals["x"] = "x" env.globals["y"] = "y" # template globals overlay env globals tmpl = env.get_template("main", globals={"x": "bar"}) assert tmpl.render() == "bar y" # base was loaded indirectly, it just has env globals tmpl = env.get_template("base") assert tmpl.render() == "x y" # set template globals for base, no longer uses env globals tmpl = env.get_template("base", globals={"x": 42}) assert tmpl.render() == "42 y" # templates are cached, they keep template globals set earlier tmpl = env.get_template("main") assert tmpl.render() == "bar y" tmpl = env.get_template("base") assert tmpl.render() == "42 y" def test_nested_loop_scoping(self, env): tmpl = env.from_string( "{% set output %}{% for x in [1,2,3] %}hello{% endfor %}" "{% endset %}{{ output }}" ) assert tmpl.render() == "hellohellohello" @pytest.mark.parametrize("unicode_char", ["\N{FORM FEED}", "\x85"]) def test_unicode_whitespace(env, unicode_char): content = "Lorem ipsum\n" + unicode_char + "\nMore text" tmpl = env.from_string(content) assert tmpl.render() == content
# # Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """ This file contains the copy database operation which ensures a database is exactly the same among two servers. """ import sys from mysql.utilities.exception import UtilError from mysql.utilities.common.database import Database from mysql.utilities.common.options import check_engine_options from mysql.utilities.common.server import connect_servers from mysql.utilities.command.dbexport import (get_change_master_command, get_copy_lock, get_gtid_commands) _RPL_COMMANDS, _RPL_FILE = 0, 1 _GTID_WARNING = ("# WARNING: The server supports GTIDs but you have elected " "to skip exexcuting the GTID_EXECUTED statement. Please " "refer to the MySQL online reference manual for more " "information about how to handle GTID enabled servers with " "backup and restore operations.") _GTID_BACKUP_WARNING = ("# WARNING: A partial copy from a server that has " "GTIDs enabled will by default include the GTIDs of " "all transactions, even those that changed suppressed " "parts of the database. If you don't want to generate " "the GTID statement, use the --skip-gtid option. To " "export all databases, use the --all option and do " "not specify a list of databases.") _NON_GTID_WARNING = ("# WARNING: The %s server does not support GTIDs yet the " "%s server does support GTIDs. To suppress this warning, " "use the --skip-gtid option when copying %s a non-GTID " "enabled server.") def _copy_objects(source, destination, db_list, options, show_message=True, do_create=True): """Copy objects for a list of databases This method loops through a list of databases copying the objects as controlled by the skip options. source[in] Server class instance for source destination[in] Server class instance for destination options[in] copy options show_message[in] if True, display copy message Default = True do_create[in] if True, execute create statement for database Default = True """ # Copy objects for db_name in db_list: if show_message: # Display copy message if not options.get('quiet', False): msg = "# Copying database %s " % db_name[0] if db_name[1]: msg += "renamed as %s" % (db_name[1]) print msg # Get a Database class instance db = Database(source, db_name[0], options) # Perform the copy db.init() db.copy_objects(db_name[1], options, destination, options.get("threads", False), do_create) def multiprocess_db_copy_task(copy_db_task): """Multiprocess copy database method. This method wraps the copy_db method to allow its concurrent execution by a pool of processes. copy_db_task[in] dictionary of values required by a process to perform the database copy task, namely: {'source_srv': <dict with source connect values>, 'dest_srv': <dict with destination connect values>, 'db_list': <list of databases to copy>, 'options': <dict of options>, } """ # Get input values to execute task. source_srv = copy_db_task.get('source_srv') dest_srv = copy_db_task.get('dest_srv') db_list = copy_db_task.get('db_list') options = copy_db_task.get('options') # Execute copy databases task. # NOTE: Must handle any exception here, because worker processes will not # propagate them to the main process. try: copy_db(source_srv, dest_srv, db_list, options) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) def copy_db(src_val, dest_val, db_list, options): """Copy a database This method will copy a database and all of its objects and data from one server (source) to another (destination). Options are available to selectively ignore each type of object. The do_drop parameter is used to permit the copy to overwrite an existing destination database (default is to not overwrite). src_val[in] a dictionary containing connection information for the source including: (user, password, host, port, socket) dest_val[in] a dictionary containing connection information for the destination including: (user, password, host, port, socket) options[in] a dictionary containing the options for the copy: (skip_tables, skip_views, skip_triggers, skip_procs, skip_funcs, skip_events, skip_grants, skip_create, skip_data, verbose, do_drop, quiet, connections, debug, exclude_names, exclude_patterns) Notes: do_drop - if True, the database on the destination will be dropped if it exists (default is False) quiet - do not print any information during operation (default is False) Returns bool True = success, False = error """ verbose = options.get("verbose", False) quiet = options.get("quiet", False) do_drop = options.get("do_drop", False) skip_views = options.get("skip_views", False) skip_procs = options.get("skip_procs", False) skip_funcs = options.get("skip_funcs", False) skip_events = options.get("skip_events", False) skip_grants = options.get("skip_grants", False) skip_data = options.get("skip_data", False) skip_triggers = options.get("skip_triggers", False) skip_tables = options.get("skip_tables", False) skip_gtid = options.get("skip_gtid", False) locking = options.get("locking", "snapshot") conn_options = { 'quiet': quiet, 'version': "5.1.30", } servers = connect_servers(src_val, dest_val, conn_options) cloning = (src_val == dest_val) or dest_val is None source = servers[0] if cloning: destination = servers[0] else: destination = servers[1] src_gtid = source.supports_gtid() == 'ON' dest_gtid = destination.supports_gtid() == 'ON'if destination else False # Get list of all databases from source if --all is specified. # Ignore system databases. if options.get("all", False): # The --all option is valid only if not cloning. if not cloning: if not quiet: print "# Including all databases." rows = source.get_all_databases() for row in rows: db_list.append((row[0], None)) # Keep same name else: raise UtilError("Cannot copy all databases on the same server.") elif not skip_gtid and src_gtid: # Check to see if this is a full copy (complete backup) all_dbs = source.exec_query("SHOW DATABASES") dbs = [db[0] for db in db_list] for db in all_dbs: if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"]: continue if not db[0] in dbs: print _GTID_BACKUP_WARNING break # Do error checking and preliminary work: # - Check user permissions on source and destination for all databases # - Check to see if executing on same server but same db name (error) # - Build list of tables to lock for copying data (if no skipping data) # - Check storage engine compatibility for db_name in db_list: source_db = Database(source, db_name[0]) if destination is None: destination = source if db_name[1] is None: db = db_name[0] else: db = db_name[1] dest_db = Database(destination, db) # Make a dictionary of the options access_options = { 'skip_views': skip_views, 'skip_procs': skip_procs, 'skip_funcs': skip_funcs, 'skip_grants': skip_grants, 'skip_events': skip_events, 'skip_triggers': skip_triggers, } source_db.check_read_access(src_val["user"], src_val["host"], access_options) # Make a dictionary containing the list of objects from source db source_objects = { "views": source_db.get_db_objects("VIEW", columns="full"), "procs": source_db.get_db_objects("PROCEDURE", columns="full"), "funcs": source_db.get_db_objects("FUNCTION", columns="full"), "events": source_db.get_db_objects("EVENT", columns="full"), "triggers": source_db.get_db_objects("TRIGGER", columns="full"), } dest_db.check_write_access(dest_val['user'], dest_val['host'], access_options, source_objects, do_drop) # Error is source db and destination db are the same and we're cloning if destination == source and db_name[0] == db_name[1]: raise UtilError("Destination database name is same as " "source - source = %s, destination = %s" % (db_name[0], db_name[1])) # Error is source database does not exist if not source_db.exists(): raise UtilError("Source database does not exist - %s" % db_name[0]) # Check storage engines check_engine_options(destination, options.get("new_engine", None), options.get("def_engine", None), False, options.get("quiet", False)) # Get replication commands if rpl_mode specified. # if --rpl specified, dump replication initial commands rpl_info = None # Turn off foreign keys if they were on at the start destination.disable_foreign_key_checks(True) # Get GTID commands if not skip_gtid: gtid_info = get_gtid_commands(source) if src_gtid and not dest_gtid: print _NON_GTID_WARNING % ("destination", "source", "to") elif not src_gtid and dest_gtid: print _NON_GTID_WARNING % ("source", "destination", "from") else: gtid_info = None if src_gtid and not cloning: print _GTID_WARNING # If cloning, turn off gtid generation if gtid_info and cloning: gtid_info = None # if GTIDs enabled, write the GTID commands if gtid_info and dest_gtid: # Check GTID version for complete feature support destination.check_gtid_version() # Check the gtid_purged value too destination.check_gtid_executed() for cmd in gtid_info[0]: print "# GTID operation:", cmd destination.exec_query(cmd, {'fetch': False, 'commit': False}) if options.get("rpl_mode", None): new_opts = options.copy() new_opts['multiline'] = False new_opts['strict'] = True rpl_info = get_change_master_command(src_val, new_opts) destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False}) # Copy (create) objects. # We need to delay trigger and events to after data is loaded new_opts = options.copy() new_opts['skip_triggers'] = True new_opts['skip_events'] = True # Get the table locks unless we are cloning with lock-all if not (cloning and locking == 'lock-all'): my_lock = get_copy_lock(source, db_list, options, True) _copy_objects(source, destination, db_list, new_opts) # If we are cloning, take the write locks prior to copying data if cloning and locking == 'lock-all': my_lock = get_copy_lock(source, db_list, options, True, cloning) # Copy tables data if not skip_data and not skip_tables: # Copy tables for db_name in db_list: # Get a Database class instance db = Database(source, db_name[0], options) # Perform the copy # Note: No longer use threads, use multiprocessing instead. db.init() db.copy_data(db_name[1], options, destination, connections=1, src_con_val=src_val, dest_con_val=dest_val) # if cloning with lock-all unlock here to avoid system table lock conflicts if cloning and locking == 'lock-all': my_lock.unlock() # Create triggers for all databases if not skip_triggers: new_opts = options.copy() new_opts['skip_tables'] = True new_opts['skip_views'] = True new_opts['skip_procs'] = True new_opts['skip_funcs'] = True new_opts['skip_events'] = True new_opts['skip_grants'] = True new_opts['skip_create'] = True _copy_objects(source, destination, db_list, new_opts, False, False) # Create events for all databases if not skip_events: new_opts = options.copy() new_opts['skip_tables'] = True new_opts['skip_views'] = True new_opts['skip_procs'] = True new_opts['skip_funcs'] = True new_opts['skip_triggers'] = True new_opts['skip_grants'] = True new_opts['skip_create'] = True _copy_objects(source, destination, db_list, new_opts, False, False) if not (cloning and locking == 'lock-all'): my_lock.unlock() # if GTIDs enabled, write the GTID-related commands if gtid_info and dest_gtid: print "# GTID operation:", gtid_info[1] destination.exec_query(gtid_info[1]) if options.get("rpl_mode", None): for cmd in rpl_info[_RPL_COMMANDS]: if cmd[0] == '#' and not quiet: print cmd else: if verbose: print cmd destination.exec_query(cmd) destination.exec_query("START SLAVE;") # Turn on foreign keys if they were on at the start destination.disable_foreign_key_checks(False) if not quiet: print "#...done." return True
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr import six from rally.benchmark import utils as bench_utils from rally.common.i18n import _ from rally.common import log as logging from rally.common import utils from rally import consts from rally import exceptions from neutronclient.common import exceptions as neutron_exceptions from novaclient import exceptions as nova_exceptions LOG = logging.getLogger(__name__) cidr_incr = utils.RAMInt() def generate_cidr(start_cidr="10.2.0.0/24"): """Generate next CIDR for network or subnet, without IP overlapping. This is process and thread safe, because `cidr_incr' points to value stored directly in RAM. This guarantees that CIDRs will be serial and unique even under hard multiprocessing/threading load. :param start_cidr: start CIDR str :returns: next available CIDR str """ cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) LOG.debug("CIDR generated: %s" % cidr) return cidr class NetworkWrapperException(exceptions.RallyException): msg_fmt = _("%(message)s") @six.add_metaclass(abc.ABCMeta) class NetworkWrapper(object): """Base class for network service implementations. We actually have two network services implementations, with different API: NovaNetwork and Neutron. The idea is (at least to try) to use unified service, which hides most differences and routines behind the scenes. This allows to significantly re-use and simplify code. """ START_CIDR = "10.2.0.0/24" SERVICE_IMPL = None def __init__(self, clients, config=None): if hasattr(clients, self.SERVICE_IMPL): self.client = getattr(clients, self.SERVICE_IMPL)() else: self.client = clients(self.SERVICE_IMPL) self.config = config or {} self.start_cidr = self.config.get("start_cidr", self.START_CIDR) @abc.abstractmethod def create_network(self): """Create network.""" @abc.abstractmethod def delete_network(self): """Delete network.""" @abc.abstractmethod def list_networks(self): """List networks.""" @abc.abstractmethod def create_floating_ip(self): """Create floating IP.""" @abc.abstractmethod def delete_floating_ip(self): """Delete floating IP.""" @abc.abstractmethod def supports_security_group(self): """Checks whether security group is supported.""" class NovaNetworkWrapper(NetworkWrapper): SERVICE_IMPL = consts.Service.NOVA def __init__(self, *args): super(NovaNetworkWrapper, self).__init__(*args) self.skip_cidrs = [n.cidr for n in self.client.networks.list()] def _generate_cidr(self): cidr = generate_cidr(start_cidr=self.start_cidr) while cidr in self.skip_cidrs: cidr = generate_cidr(start_cidr=self.start_cidr) return cidr def create_network(self, tenant_id, **kwargs): """Create network. :param tenant_id: str, tenant ID :param **kwargs: for compatibility, not used here :returns: dict, network data """ cidr = self._generate_cidr() label = utils.generate_random_name("rally_net_") network = self.client.networks.create( tenant_id=tenant_id, cidr=cidr, label=label) return {"id": network.id, "cidr": network.cidr, "name": network.label, "status": "ACTIVE", "external": False, "tenant_id": tenant_id} def delete_network(self, network): return self.client.networks.delete(network["id"]) def list_networks(self): return self.client.networks.list() def create_floating_ip(self, ext_network=None, **kwargs): """Allocate a floating ip from the given nova-network pool :param ext_network: name or external network, str :param **kwargs: for compatibility, not used here :returns: floating IP dict """ if not ext_network: try: ext_network = self.client.floating_ip_pools.list()[0].name except IndexError: raise NetworkWrapperException("No floating IP pools found") fip = self.client.floating_ips.create(ext_network) return {"id": fip.id, "ip": fip.ip} def _get_floating_ip(self, fip_id, do_raise=False): try: fip = self.client.floating_ips.get(fip_id) except nova_exceptions.NotFound: if not do_raise: return None raise exceptions.GetResourceNotFound( resource="Floating IP %s" % fip_id) return fip.id def delete_floating_ip(self, fip_id, wait=False): """Delete floating IP. :param fip_id: int floating IP id :param wait: if True then wait to return until floating ip is deleted """ self.client.floating_ips.delete(fip_id) if not wait: return bench_utils.wait_for_delete( fip_id, update_resource=lambda i: self._get_floating_ip(i, do_raise=True)) def supports_security_group(self): """Check whether security group is supported :return: result tuple. Always (True, "") for nova-network. :rtype: (bool, string) """ return True, "" class NeutronWrapper(NetworkWrapper): SERVICE_IMPL = consts.Service.NEUTRON SUBNET_IP_VERSION = 4 @property def external_networks(self): return self.client.list_networks(**{ "router:external": True})["networks"] def get_network(self, net_id=None, name=None): net = None try: if net_id: net = self.client.show_network(net_id)["network"] else: for net in self.client.list_networks(name=name)["networks"]: break return {"id": net["id"], "name": net["name"], "tenant_id": net["tenant_id"], "status": net["status"], "external": net["router:external"], "subnets": net["subnets"], "router_id": None} except (TypeError, neutron_exceptions.NeutronClientException): raise NetworkWrapperException( "Network not found: %s" % (name or net_id)) def create_router(self, external=False, **kwargs): """Create neutron router. :param external: bool, whether to set setup external_gateway_info :param **kwargs: POST /v2.0/routers request options :returns: neutron router dict """ if "name" not in kwargs: kwargs["name"] = utils.generate_random_name("rally_router_") if external and "external_gateway_info" not in kwargs: for net in self.external_networks: kwargs["external_gateway_info"] = { "network_id": net["id"], "enable_snat": True} return self.client.create_router({"router": kwargs})["router"] def _generate_cidr(self): # TODO(amaretskiy): Generate CIDRs unique for network, not cluster return generate_cidr(start_cidr=self.start_cidr) def create_network(self, tenant_id, **kwargs): """Create network. :param tenant_id: str, tenant ID :param **kwargs: extra options :returns: dict, network data """ network_args = { "network": { "tenant_id": tenant_id, "name": utils.generate_random_name("rally_net_") } } network = self.client.create_network(network_args)["network"] router = None if kwargs.get("add_router", False): router = self.create_router(external=True, tenant_id=tenant_id) subnets = [] subnets_num = kwargs.get("subnets_num", 0) for i in range(subnets_num): subnet_args = { "subnet": { "tenant_id": tenant_id, "network_id": network["id"], "name": utils.generate_random_name("rally_subnet_"), "ip_version": self.SUBNET_IP_VERSION, "cidr": self._generate_cidr(), "enable_dhcp": True, "dns_nameservers": kwargs.get("dns_nameservers", ["8.8.8.8", "8.8.4.4"]) } } subnet = self.client.create_subnet(subnet_args)["subnet"] subnets.append(subnet["id"]) if router: self.client.add_interface_router(router["id"], {"subnet_id": subnet["id"]}) return {"id": network["id"], "name": network["name"], "status": network["status"], "subnets": subnets, "external": network.get("router:external", False), "router_id": router and router["id"] or None, "tenant_id": tenant_id} def delete_network(self, network): net_dhcps = self.client.list_dhcp_agent_hosting_networks( network["id"])["agents"] for net_dhcp in net_dhcps: self.client.remove_network_from_dhcp_agent(net_dhcp["id"], network["id"]) router_id = network["router_id"] if router_id: self.client.remove_gateway_router(router_id) for subnet_id in network["subnets"]: self.client.remove_interface_router(router_id, {"subnet_id": subnet_id}) self.client.delete_router(router_id) for port in self.client.list_ports(network_id=network["id"])["ports"]: self.client.delete_port(port["id"]) for subnet_id in network["subnets"]: self._delete_subnet(subnet_id) return self.client.delete_network(network["id"]) def _delete_subnet(self, subnet_id): self.client.delete_subnet(subnet_id) def list_networks(self): return self.client.list_networks()["networks"] def create_port(self, network_id, **kwargs): """Create neutron port. :param network_id: neutron network id :param **kwargs: POST /v2.0/ports request options :returns: neutron port dict """ kwargs["network_id"] = network_id if "name" not in kwargs: kwargs["name"] = utils.generate_random_name("rally_port_") return self.client.create_port({"port": kwargs})["port"] def create_floating_ip(self, ext_network=None, int_network=None, tenant_id=None, port_id=None, **kwargs): """Create Neutron floating IP. :param ext_network: floating network name or dict :param int_network: fixed network name or dict :param tenant_id str tenant id :param port_id: str port id :param **kwargs: for compatibility, not used here :returns: floating IP dict """ if not tenant_id: raise ValueError("Missed tenant_id") net_id = None if type(ext_network) is dict: net_id = ext_network["id"] elif ext_network: ext_net = self.get_network(name=ext_network) if not ext_net["external"]: raise NetworkWrapperException("Network is not external: %s" % ext_network) net_id = ext_net["id"] else: ext_networks = self.external_networks if not ext_networks: raise NetworkWrapperException( "Failed to allocate floating IP: " "no external networks found") net_id = ext_networks[0]["id"] if not port_id: if type(int_network) is dict: port_id = self.create_port(int_network["id"])["id"] elif int_network: int_net = self.get_network(name=int_network) if int_net["external"]: raise NetworkWrapperException("Network is external: %s" % int_network) port_id = self.create_port(int_net["id"])["id"] kwargs = {"floatingip": {"floating_network_id": net_id}, "tenant_id": tenant_id, "port_id": port_id} fip = self.client.create_floatingip(kwargs)["floatingip"] return {"id": fip["id"], "ip": fip["floating_ip_address"]} def delete_floating_ip(self, fip_id, **kwargs): """Delete floating IP. :param fip_id: int floating IP id :param **kwargs: for compatibility, not used here """ self.client.delete_floatingip(fip_id) def supports_security_group(self): """Check whether security group is supported :return: result tuple :rtype: (bool, string) """ extensions = self.client.list_extensions().get("extensions", []) use_sg = any(ext.get("alias") == "security-group" for ext in extensions) if use_sg: return True, "" return False, _("neutron driver does not support security groups") def wrap(clients, config=None): """Returns available network wrapper instance. :param clients: rally.osclients.Clients instance :param config: task config dict :returns: NetworkWrapper subclass instance """ if hasattr(clients, "services"): services = clients.services() else: services = clients("services") if consts.Service.NEUTRON in services.values(): return NeutronWrapper(clients, config) return NovaNetworkWrapper(clients, config)
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file. # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. import os import numpy as np import numpy.testing as nt from ..mpi import MPI from .mpi import MPITestCase from ..tod import sim_focalplane as sfp def generate_hex(npix, width, poltype, fwhm): if poltype == "qu": pol_a = sfp.hex_pol_angles_qu(npix) pol_b = sfp.hex_pol_angles_qu(npix, offset=90.0) elif poltype == "radial": pol_a = sfp.hex_pol_angles_radial(npix) pol_b = sfp.hex_pol_angles_radial(npix, offset=90.0) dets_a = sfp.hex_layout(npix, width, "", "A", pol_a) dets_b = sfp.hex_layout(npix, width, "", "B", pol_b) dets = dict() dets.update(dets_a) dets.update(dets_b) # Pol color different for A/B detectors detpolcolor = dict() detpolcolor.update({ x : "red" for x in dets_a.keys() }) detpolcolor.update({ x : "blue" for x in dets_b.keys() }) # set the label to just the detector name detlabels = { x : x for x in dets.keys() } # fwhm and face color the same detfwhm = { x : fwhm for x in dets.keys() } # cycle through some colors just for fun pclr = [ (1.0, 0.0, 0.0, 0.1), (1.0, 0.5, 0.0, 0.1), (0.25, 0.5, 1.0, 0.1), (0.0, 0.75, 0.0, 0.1) ] detcolor = { y : pclr[(x // 2) % 4] for x, y in \ enumerate(sorted(dets.keys())) } # split out quaternions for plotting detquats = { x : dets[x]["quat"] for x in dets.keys() } return dets, detquats, detfwhm, detcolor, detpolcolor, detlabels def generate_rhombus(npix, width, fwhm, prefix, center): pol_a = sfp.rhomb_pol_angles_qu(npix) pol_b = sfp.rhomb_pol_angles_qu(npix, offset=90.0) dets_a = sfp.rhombus_layout(npix, width, prefix, "A", pol_a, center=center) dets_b = sfp.rhombus_layout(npix, width, prefix, "B", pol_b, center=center) dets = dict() dets.update(dets_a) dets.update(dets_b) # Pol color different for A/B detectors detpolcolor = dict() detpolcolor.update({ x : "red" for x in dets_a.keys() }) detpolcolor.update({ x : "blue" for x in dets_b.keys() }) # set the label to just the detector name detlabels = { x : x for x in dets.keys() } # fwhm and face color the same detfwhm = { x : fwhm for x in dets.keys() } # cycle through some colors just for fun pclr = [ (1.0, 0.0, 0.0, 0.1), (1.0, 0.5, 0.0, 0.1), (0.25, 0.5, 1.0, 0.1), (0.0, 0.75, 0.0, 0.1) ] detcolor = { y : pclr[(x // 2) % 4] for x, y in \ enumerate(sorted(dets.keys())) } # split out quaternions for plotting detquats = { x : dets[x]["quat"] for x in dets.keys() } return dets, detquats, detfwhm, detcolor, detpolcolor, detlabels class SimFocalplaneTest(MPITestCase): def setUp(self): self.outdir = "toast_test_output" if self.comm.rank == 0: if not os.path.isdir(self.outdir): os.mkdir(self.outdir) def test_cart_quat(self): xincr = np.linspace(-5.0, 5.0, num=10, endpoint=True) yincr = np.linspace(-5.0, 5.0, num=10, endpoint=True) offsets = list() for x in xincr: for y in yincr: ang = 3.6 * (x - xincr[0]) * (y - yincr[0]) offsets.append([x, y, ang]) quats = sfp.cartesian_to_quat(offsets) detquats = { "{}".format(x) : y for x, y in enumerate(quats) } fwhm = { x : 30.0 for x in detquats.keys() } outfile = os.path.join(self.outdir, "out_test_cart2quat.png") sfp.plot_focalplane(detquats, 12.0, 12.0, outfile, fwhm=fwhm) return def test_hex_nring(self): result = { 1 : 1, 7 : 2, 19 : 3, 37 : 4, 61 : 5, 91 : 6, 127 : 7, 169 : 8, 217 : 9, 271 : 10, 331 : 11, 397 : 12 } for npix, check in result.items(): test = sfp.hex_nring(npix) nt.assert_equal(test, check) return def test_vis_hex_small(self): dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = \ generate_hex(7, 5.0, "qu", 15.0) outfile = os.path.join(self.outdir, "out_test_vis_hex_small.png") sfp.plot_focalplane(detquats, 6.0, 6.0, outfile, fwhm=detfwhm, facecolor=detcolor, polcolor=detpolcolor, labels=detlabels) return def test_vis_hex_small_rad(self): dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = \ generate_hex(7, 5.0, "radial", 15.0) outfile = os.path.join(self.outdir, "out_test_vis_hex_small_rad.png") sfp.plot_focalplane(detquats, 6.0, 6.0, outfile, fwhm=detfwhm, facecolor=detcolor, polcolor=detpolcolor, labels=detlabels) return def test_vis_hex_medium(self): dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = \ generate_hex(91, 5.0, "qu", 10.0) outfile = os.path.join(self.outdir, "out_test_vis_hex_medium.png") sfp.plot_focalplane(detquats, 6.0, 6.0, outfile, fwhm=detfwhm, facecolor=detcolor, polcolor=detpolcolor, labels=detlabels) return def test_vis_hex_large(self): dets, detquats, detfwhm, detcolor, detpolcolor, detlabels = \ generate_hex(217, 5.0, "qu", 5.0) outfile = os.path.join(self.outdir, "out_test_vis_hex_large.png") sfp.plot_focalplane(detquats, 6.0, 6.0, outfile, fwhm=detfwhm, facecolor=detcolor, polcolor=detpolcolor, labels=detlabels) return def test_vis_rhombus(self): sixty = np.pi/3.0 thirty = np.pi/6.0 rtthree = np.sqrt(3.0) rdim = 8 rpix = rdim**2 hexwidth = 5.0 rwidth = hexwidth / rtthree # angular separation of rhombi margin = 0.60 * hexwidth centers = [ np.array([0.5*margin, 0.0, 0.0]), np.array([-0.5*np.cos(sixty)*margin, 0.5*np.sin(sixty)*margin, 120.0]), np.array([-0.5*np.cos(sixty)*margin, -0.5*np.sin(sixty)*margin, 240.0]) ] cquats = sfp.cartesian_to_quat(centers) dets = dict() detquats = dict() detfwhm = dict() detcolor = dict() detpolcolor = dict() detlabels = dict() for w, c in enumerate(cquats): wdets, wdetquats, wdetfwhm, wdetcolor, wdetpolcolor, wdetlabels = \ generate_rhombus(rpix, rwidth, 7.0, "{}".format(w), c) dets.update(wdets) detquats.update(wdetquats) detfwhm.update(wdetfwhm) detcolor.update(wdetcolor) detpolcolor.update(wdetpolcolor) detlabels.update(wdetlabels) outfile = os.path.join(self.outdir, "out_test_vis_rhombus.png") sfp.plot_focalplane(detquats, 1.2*hexwidth, 1.2*hexwidth, outfile, fwhm=detfwhm, facecolor=detcolor, polcolor=detpolcolor, labels=detlabels) return
#!/usr/bin/env python """Standard actions that happen on the client.""" import cStringIO as StringIO import ctypes import gzip import hashlib import os import platform import socket import sys import time import zlib import psutil import logging from grr.client import actions from grr.client import client_utils_common from grr.client import vfs from grr.client.client_actions import tempfiles from grr.lib import config_lib from grr.lib import flags from grr.lib import rdfvalue from grr.lib import utils from grr.lib.rdfvalues import crypto # We do not send larger buffers than this: MAX_BUFFER_SIZE = 640 * 1024 class ReadBuffer(actions.ActionPlugin): """Reads a buffer from a file and returns it to a server callback.""" in_rdfvalue = rdfvalue.BufferReference out_rdfvalue = rdfvalue.BufferReference def Run(self, args): """Reads a buffer on the client and sends it to the server.""" # Make sure we limit the size of our output if args.length > MAX_BUFFER_SIZE: raise RuntimeError("Can not read buffers this large.") try: fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) fd.Seek(args.offset) offset = fd.Tell() data = fd.Read(args.length) except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) return # Now return the data to the server self.SendReply(offset=offset, data=data, length=len(data), pathspec=fd.pathspec) HASH_CACHE = utils.FastStore(100) class TransferBuffer(actions.ActionPlugin): """Reads a buffer from a file and returns it to the server efficiently.""" in_rdfvalue = rdfvalue.BufferReference out_rdfvalue = rdfvalue.BufferReference def Run(self, args): """Reads a buffer on the client and sends it to the server.""" # Make sure we limit the size of our output if args.length > MAX_BUFFER_SIZE: raise RuntimeError("Can not read buffers this large.") data = vfs.ReadVFS(args.pathspec, args.offset, args.length, progress_callback=self.Progress) result = rdfvalue.DataBlob( data=zlib.compress(data), compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION) digest = hashlib.sha256(data).digest() # Ensure that the buffer is counted against this response. Check network # send limit. self.ChargeBytesToSession(len(data)) # Now return the data to the server into the special TransferStore well # known flow. self.grr_worker.SendReply( result, session_id=rdfvalue.SessionID(flow_name="TransferStore")) # Now report the hash of this blob to our flow as well as the offset and # length. self.SendReply(offset=args.offset, length=len(data), data=digest) class HashBuffer(actions.ActionPlugin): """Hash a buffer from a file and returns it to the server efficiently.""" in_rdfvalue = rdfvalue.BufferReference out_rdfvalue = rdfvalue.BufferReference def Run(self, args): """Reads a buffer on the client and sends it to the server.""" # Make sure we limit the size of our output if args.length > MAX_BUFFER_SIZE: raise RuntimeError("Can not read buffers this large.") data = vfs.ReadVFS(args.pathspec, args.offset, args.length) digest = hashlib.sha256(data).digest() # Now report the hash of this blob to our flow as well as the offset and # length. self.SendReply(offset=args.offset, length=len(data), data=digest) class CopyPathToFile(actions.ActionPlugin): """Copy contents of a pathspec to a file on disk.""" in_rdfvalue = rdfvalue.CopyPathToFileRequest out_rdfvalue = rdfvalue.CopyPathToFileRequest BLOCK_SIZE = 10 * 1024 * 1024 def _Copy(self, dest_fd): """Copy from VFS to file until no more data or self.length is reached. Args: dest_fd: file object to write to Returns: self.written: bytes written """ while self.written < self.length: to_read = min(self.length - self.written, self.BLOCK_SIZE) data = self.src_fd.read(to_read) if not data: break dest_fd.write(data) self.written += len(data) # Send heartbeats for long files. self.Progress() return self.written def Run(self, args): """Read from a VFS file and write to a GRRTempFile on disk. If file writing doesn't complete files won't be cleaned up. Args: args: see CopyPathToFile in jobs.proto """ self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress) self.src_fd.Seek(args.offset) offset = self.src_fd.Tell() self.length = args.length or (1024 ** 4) # 1 TB self.written = 0 suffix = ".gz" if args.gzip_output else "" self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir, lifetime=args.lifetime, suffix=suffix) self.dest_file = self.dest_fd.name with self.dest_fd: if args.gzip_output: gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd) # Gzip filehandle needs its own close method called with gzip_fd: self._Copy(gzip_fd) else: self._Copy(self.dest_fd) pathspec_out = rdfvalue.PathSpec( path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS) self.SendReply(offset=offset, length=self.written, src_path=args.src_path, dest_dir=args.dest_dir, dest_path=pathspec_out, gzip_output=args.gzip_output) class ListDirectory(ReadBuffer): """Lists all the files in a directory.""" in_rdfvalue = rdfvalue.ListDirRequest out_rdfvalue = rdfvalue.StatEntry def Run(self, args): """Lists a directory.""" try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for response in files: self.SendReply(response) class IteratedListDirectory(actions.IteratedAction): """Lists a directory as an iterator.""" in_rdfvalue = rdfvalue.ListDirRequest out_rdfvalue = rdfvalue.StatEntry def Iterate(self, request, client_state): """Restores its way through the directory using an Iterator.""" try: fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress) except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(fd.ListFiles()) files.sort(key=lambda x: x.pathspec.path) index = client_state.get("index", 0) length = request.iterator.number for response in files[index:index + length]: self.SendReply(response) # Update the state client_state["index"] = index + length class SuspendableListDirectory(actions.SuspendableAction): """Lists a directory as a suspendable client action.""" in_rdfvalue = rdfvalue.ListDirRequest out_rdfvalue = rdfvalue.StatEntry def Iterate(self): try: fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress) except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) return length = self.request.iterator.number for group in utils.Grouper(fd.ListFiles(), length): for response in group: self.SendReply(response) self.Suspend() class StatFile(ListDirectory): """Sends a StatResponse for a single file.""" in_rdfvalue = rdfvalue.ListDirRequest out_rdfvalue = rdfvalue.StatEntry def Run(self, args): """Sends a StatResponse for a single file.""" try: fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) res = fd.Stat() self.SendReply(res) except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) return class ExecuteCommand(actions.ActionPlugin): """Executes one of the predefined commands.""" in_rdfvalue = rdfvalue.ExecuteRequest out_rdfvalue = rdfvalue.ExecuteResponse def Run(self, command): """Run.""" cmd = command.cmd args = command.args time_limit = command.time_limit res = client_utils_common.Execute(cmd, args, time_limit) (stdout, stderr, status, time_used) = res # Limit output to 10MB so our response doesn't get too big. stdout = stdout[:10 * 1024 * 1024] stderr = stderr[:10 * 1024 * 1024] result = rdfvalue.ExecuteResponse( request=command, stdout=stdout, stderr=stderr, exit_status=status, # We have to return microseconds. time_used=int(1e6 * time_used)) self.SendReply(result) class ExecuteBinaryCommand(actions.ActionPlugin): """Executes a command from a passed in binary. Obviously this is a dangerous function, it provides for arbitrary code exec by the server running as root/SYSTEM. This is protected by the CONFIG[PrivateKeys.executable_signing_private_key], which should be stored offline and well protected. This method can be utilized as part of an autoupdate mechanism if necessary. NOTE: If the binary is too large to fit inside a single request, the request will have the more_data flag enabled, indicating more data is coming. """ in_rdfvalue = rdfvalue.ExecuteBinaryRequest out_rdfvalue = rdfvalue.ExecuteBinaryResponse suffix = "" def WriteBlobToFile(self, request, suffix=""): """Writes the blob to a file and returns its path.""" lifetime = 0 # Only set the lifetime thread on the last chunk written. if not request.more_data: lifetime = request.time_limit # Keep the file for at least 5 seconds after execution. if lifetime > 0: lifetime += 5 # First chunk truncates the file, later chunks append. if request.offset == 0: mode = "w+b" else: mode = "r+b" temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path, suffix=suffix, mode=mode) with temp_file: path = temp_file.name temp_file.seek(0, 2) if temp_file.tell() != request.offset: raise IOError("Chunks out of order Error.") # Write the new chunk. temp_file.write(request.executable.data) return path def CleanUp(self, path): """Removes the temp file.""" try: if os.path.exists(path): os.remove(path) except (OSError, IOError), e: logging.info("Failed to remove temporary file %s. Err: %s", path, e) def Run(self, args): """Run.""" # Verify the executable blob. args.executable.Verify(config_lib.CONFIG[ "Client.executable_signing_public_key"]) path = self.WriteBlobToFile(args, self.suffix) # Only actually run the file on the last chunk. if not args.more_data: self.ProcessFile(path, args) self.CleanUp(path) def ProcessFile(self, path, args): res = client_utils_common.Execute(path, args.args, args.time_limit, bypass_whitelist=True) (stdout, stderr, status, time_used) = res # Limit output to 10MB so our response doesn't get too big. stdout = stdout[:10 * 1024 * 1024] stderr = stderr[:10 * 1024 * 1024] result = rdfvalue.ExecuteBinaryResponse( stdout=stdout, stderr=stderr, exit_status=status, # We have to return microseconds. time_used=int(1e6 * time_used)) self.SendReply(result) class ExecutePython(actions.ActionPlugin): """Executes python code with exec. Obviously this is a dangerous function, it provides for arbitrary code exec by the server running as root/SYSTEM. This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which should be stored offline and well protected. """ in_rdfvalue = rdfvalue.ExecutePythonRequest out_rdfvalue = rdfvalue.ExecutePythonResponse def Run(self, args): """Run.""" time_start = time.time() class StdOutHook(object): def __init__(self, buf): self.buf = buf def write(self, text): self.buf.write(text) args.python_code.Verify(config_lib.CONFIG[ "Client.executable_signing_public_key"]) # The execed code can assign to this variable if it wants to return data. logging.debug("exec for python code %s", args.python_code.data[0:100]) context = globals().copy() context["py_args"] = args.py_args.ToDict() context["magic_return_str"] = "" # Export the Progress function to allow python hacks to call it. context["Progress"] = self.Progress stdout = StringIO.StringIO() with utils.Stubber(sys, "stdout", StdOutHook(stdout)): exec(args.python_code.data, context) # pylint: disable=exec-used stdout_output = stdout.getvalue() magic_str_output = context.get("magic_return_str") if stdout_output and magic_str_output: output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output) else: output = stdout_output or magic_str_output time_used = time.time() - time_start # We have to return microseconds. result = rdfvalue.ExecutePythonResponse( time_used=int(1e6 * time_used), return_val=utils.SmartStr(output)) self.SendReply(result) class Segfault(actions.ActionPlugin): """This action is just for debugging. It induces a segfault.""" in_rdfvalue = None out_rdfvalue = None def Run(self, unused_args): """Does the segfaulting.""" if flags.FLAGS.debug: logging.warning("Segfault action requested :(") print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents else: logging.warning("Segfault requested but not running in debug mode.") class ListProcesses(actions.ActionPlugin): """This action lists all the processes running on a machine.""" in_rdfvalue = None out_rdfvalue = rdfvalue.Process def Run(self, unused_arg): # psutil will cause an active loop on Windows 2000 if platform.system() == "Windows" and platform.version().startswith("5.0"): raise RuntimeError("ListProcesses not supported on Windows 2000") for proc in psutil.process_iter(): response = rdfvalue.Process() process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"] for field in process_fields: try: value = getattr(proc, field) if value is None: continue if callable(value): value = value() if not isinstance(value, (int, long)): value = utils.SmartUnicode(value) setattr(response, field, value) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError): pass try: for arg in proc.cmdline(): response.cmdline.append(utils.SmartUnicode(arg)) except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: response.nice = proc.nice() except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: # Not available on Windows. if hasattr(proc, "uids"): (response.real_uid, response.effective_uid, response.saved_uid) = proc.uids() (response.real_gid, response.effective_gid, response.saved_gid) = proc.gids() except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: response.ctime = long(proc.create_time() * 1e6) response.status = str(proc.status()) except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: # Not available on OSX. if hasattr(proc, "cwd"): response.cwd = utils.SmartUnicode(proc.cwd()) except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: response.num_threads = proc.num_threads() except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError): pass try: (response.user_cpu_time, response.system_cpu_time) = proc.cpu_times() # This is very time consuming so we do not collect cpu_percent here. # response.cpu_percent = proc.get_cpu_percent() except (psutil.NoSuchProcess, psutil.AccessDenied): pass try: response.RSS_size, response.VMS_size = proc.memory_info() response.memory_percent = proc.memory_percent() except (psutil.NoSuchProcess, psutil.AccessDenied): pass # Due to a bug in psutil, this function is disabled for now # (https://github.com/giampaolo/psutil/issues/340) # try: # for f in proc.open_files(): # response.open_files.append(utils.SmartUnicode(f.path)) # except (psutil.NoSuchProcess, psutil.AccessDenied): # pass try: for c in proc.connections(): conn = response.connections.Append(family=c.family, type=c.type, pid=proc.pid) try: conn.state = c.status except ValueError: logging.info("Encountered unknown connection status (%s).", c.status) try: conn.local_address.ip, conn.local_address.port = c.laddr # Could be in state LISTEN. if c.raddr: conn.remote_address.ip, conn.remote_address.port = c.raddr except AttributeError: conn.local_address.ip, conn.local_address.port = c.local_address # Could be in state LISTEN. if c.remote_address: (conn.remote_address.ip, conn.remote_address.port) = c.remote_address except (psutil.NoSuchProcess, psutil.AccessDenied): pass self.SendReply(response) # Reading information here is slow so we heartbeat between processes. self.Progress() class SendFile(actions.ActionPlugin): """This action encrypts and sends a file to a remote listener.""" in_rdfvalue = rdfvalue.SendFileRequest out_rdfvalue = rdfvalue.StatEntry BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB def Send(self, sock, msg): totalsent = 0 n = len(msg) while totalsent < n: sent = sock.send(msg[totalsent:]) if sent == 0: raise RuntimeError("socket connection broken") totalsent += sent def Run(self, args): """Run.""" # Open the file. fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) if args.address_family == rdfvalue.NetworkAddress.Family.INET: family = socket.AF_INET elif args.address_family == rdfvalue.NetworkAddress.Family.INET6: family = socket.AF_INET6 else: raise RuntimeError("Socket address family not supported.") s = socket.socket(family, socket.SOCK_STREAM) try: s.connect((args.host, args.port)) except socket.error as e: raise RuntimeError(str(e)) cipher = crypto.AES128CBCCipher(args.key, args.iv, crypto.Cipher.OP_ENCRYPT) while True: data = fd.read(self.BLOCK_SIZE) if not data: break self.Send(s, cipher.Update(data)) # Send heartbeats for long files. self.Progress() self.Send(s, cipher.Final()) s.close() self.SendReply(fd.Stat()) class StatFS(actions.ActionPlugin): """Call os.statvfs for a given list of paths. OS X and Linux only. Note that a statvfs call for a network filesystem (e.g. NFS) that is unavailable, e.g. due to no network, will result in the call blocking. """ in_rdfvalue = rdfvalue.StatFSRequest out_rdfvalue = rdfvalue.Volume def Run(self, args): if platform.system() == "Windows": raise RuntimeError("os.statvfs not available on Windows") for path in args.path_list: try: fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype), progress_callback=self.Progress) st = fd.StatFS() mount_point = fd.GetMountPoint() except (IOError, OSError), e: self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e) continue unix = rdfvalue.UnixVolume(mount_point=mount_point) # On linux pre 2.6 kernels don't have frsize, so we fall back to bsize. # The actual_available_allocation_units attribute is set to blocks # available to the unprivileged user, root may have some additional # reserved space. result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize), sectors_per_allocation_unit=1, total_allocation_units=st.f_blocks, actual_available_allocation_units=st.f_bavail, unix=unix) self.SendReply(result)
#!/usr/bin/env python import numpy as np import jsonrpclib from simplejson import loads def get_named_people_from_sen(sen): """ returns a list of annotated words that correspond to named entities in sen. sen must be an annotated sentence dict. each element of the returned list is a list of annotated words. Each list corresponds to a single entity. result looks like: [[[u'Dan', {u'CharacterOffsetBegin': u'0', u'CharacterOffsetEnd': u'3', u'Lemma': u'Dan', u'NamedEntityTag': u'PERSON', u'PartOfSpeech': u'NNP'}], [u'Calacci', {u'CharacterOffsetBegin': u'4', u'CharacterOffsetEnd': u'11', u'Lemma': u'Calacci', u'NamedEntityTag': u'PERSON', u'PartOfSpeech': u'NNP'}]], [[u'Shane', {u'CharacterOffsetBegin': u'18', u'CharacterOffsetEnd': u'23', u'Lemma': u'Shane', u'NamedEntityTag': u'PERSON', u'PartOfSpeech': u'NNP'}], [u'Boissiere', {u'CharacterOffsetBegin': u'24', u'CharacterOffsetEnd': u'33', u'Lemma': u'Boissiere', u'NamedEntityTag': u'PERSON', u'PartOfSpeech': u'NNP'}]]] """ wordlist = sen['words'] entities = [] named = [] for index, word in enumerate(wordlist): if word[1]['NamedEntityTag'] == 'PERSON': named.append(word) try: next = wordlist[index+1] except: named = [] break if next[1]['NamedEntityTag'] != 'PERSON': if named: entities.append(named) named = [] return entities def get_named_people_by_sentence(sen_dict): """ produces a list of named people for each sentence in the annotated sentence dict. each element of the list is a list of annotated words that correspond to named entities, as returned by 'get_named_people_from_sen'. if there are no named people in a sentence, that sentences' entry will be an empty list. """ named = [get_named_people_from_sen(sen) for sen in sen_dict['sentences']] return named def get_named_people(sen_dict): """ produces a list of all named people in the annotated sentence dict. the result is a list of annotated words that correspond to named entities. """ named = [] for sen in sen_dict['sentences']: named.extend(get_named_people_from_sen(sen)) return named def annotated_words_from_ref(ref_info, sen_dict): """ retrieves the annotated word objects from sen_dict that correspond to the coreference information in ref_info. the coreference info must be in the same form that is returned by stanford-corenlp; something that looks like: [u'Dan Calacci', 0, 1, 0, 2] """ sentence_index = ref_info[1] start_word_index = ref_info[3] end_word_index = ref_info[4] return sen_dict['sentences'][sentence_index]['words'][start_word_index:end_word_index] #for word in sen_dict['sentences'][sentence_index]['words']: def coreferences_for(string, sen_dict): """ returns the list of references to the given string that are in sen_dict. if the given string doesn't have any coreferences, it will return None. otherwise, it'll return a list, where each element is a coreference pair: [[[u'He', 1, 0, 0, 1], [u'Dan Calacci', 0, 1, 0, 2]], [[u'He', 2, 0, 0, 1], [u'Dan Calacci', 0, 1, 0, 2]]] where He -> Dan Calacci, and He -> Dan Calacci """ if not sen_dict.has_key('coref'): return None for coref in sen_dict['coref']: for ref_pair in coref: for ref in ref_pair: if string in ref: return coref def cindices_of_references(string, sen_dict): """ returns the character indices of all references to the given string. this includes the character indices for coreferences as well as actual references. returns a list of integer tuples that correspond to the indices: [(0, 11), (64, 66), (35, 37)] """ indices = [] coreferences = coreferences_for(string, sen_dict) for ref_pair in coreferences: for ref in ref_pair: annotated_refs = annotated_words_from_ref(ref, sen_dict) start_index = annotated_refs[0][1]['CharacterOffsetBegin'] end_index = annotated_refs[-1][1]['CharacterOffsetEnd'] interval = (int(start_index), int(end_index)) indices.append(interval) return list(set(indices)) def windices_of_name(string, sen_dict): """ produces the list of word/sentence indices for the given string. This only works for exact matches of string in sen_dict. produces a list-wrapped tuple of [(sen_index, start_index, end_index)]. if it's not found, it returns [(0, 0, 0)] """ sentence_index = 0 start_word_index = 0 end_word_index = 0 for sindex, sentence in enumerate(sen_dict['sentences']): for windex, word in enumerate(sentence['words']): if word[0] == string.split()[0]: matched = True for i, s in enumerate(string.split()[1:]): if sentence['words'][windex+i+1][0] != s: matched = False if matched: start_word_index = windex sentence_index = sindex end_word_index = windex + len(string.split()) return [(sentence_index, start_word_index, end_word_index)] def windices_of_references(string, sen_dict): """ returns a list of word/sentence indices for all coreferences to the given string in sen_dict. returns [(0,0,0)] if there were no coreferences found. """ indices = [] coreferences = coreferences_for(string, sen_dict) if not coreferences: return [(0, 0, 0)] for ref_pair in coreferences: for ref in ref_pair: sen_id = ref[1] start_index = ref[3] end_index = ref[4] interval = (int(sen_id), int(start_index), int(end_index)) indices.append(interval) return list(set(indices)) def windices_of_named_entities_and_references(sen_dict): """ returns the word/sentence indices for each reference to every named entity in sen_dict. the result is a dictionary where the keys are the strings that were recognized as named entities, and the values are a list of word/sentence index tuples: {u'Abarca': [(3, 0, 2), (3, 4, 5), (3, 3, 5)], u'Dan Calacci': [(0, 0, 2), (2, 0, 1), (1, 0, 1)], u'Shane Boissiere': [(0, 3, 5)]} """ named_entities = get_named_people(sen_dict) entity_and_references = {} for name in named_entities: name_as_string = "" for index, token in enumerate(name): if index != 0: name_as_string += (" " + token[0]) else: name_as_string += token[0] if coreferences_for(name_as_string, sen_dict): entity_and_references[name_as_string] = windices_of_references(name_as_string, sen_dict) else: entity_and_references[name_as_string] = windices_of_name(name_as_string, sen_dict) return entity_and_references def get_corenlp_object(speech, server): if len(speech.split()) > 100: return None try: return loads(server.parse(speech)) except (KeyboardInterrupt, SystemExit): raise except: return None def mention_list_by_sentence_no_anaphora(obj): """ returns a list of lists of mentions, where the nth index of the list corresponds to the nth sentence in obj: [['Andrea', 'Dan'], ['Shane'], []] this does not include mentions via anaphora. """ indices_and_mentions = windices_of_named_entities_and_references(obj) mentions_by_sentence = range(len(obj['sentences'])) mentions_by_sentence = map(lambda i: [], mentions_by_sentence) for name, indices in indices_and_mentions.items(): for index in indices: sen_index = index[0] mentions_by_sentence[sen_index].append(name) mentions_by_sentence[sen_index] = list(set(mentions_by_sentence[sen_index])) return mentions_by_sentence def mention_list_by_sentence_with_anaphora(a_obj, prev_obj, server): """ returns a list of lists of mentions, where the nth index of the list corresponds to the nth sentence in obj: [['Andrea', 'Dan'], ['Shane'], []] this includes mentions via anaphora. a_obj is the speechact to retrieve mentions from. prev_obj is the previous speechact. """ len_prev = len(prev_obj['sentences']) a_text = " ".join([s['text'] for s in a_obj['sentences']]) prev_text = " ".join([s['text'] for s in prev_obj['sentences']]) combined_obj = get_corenlp_object(prev_text +" "+ a_text, server) if not combined_obj: return np.nan entities_and_refs = windices_of_named_entities_and_references(combined_obj) mentions_by_sentence = range(len(a_obj['sentences'])) mentions_by_sentence = map(lambda i: [], mentions_by_sentence) for entity, references in entities_and_refs.items(): for ref in references: sen_ref = ref[0] if sen_ref < len_prev: # previous speechat. continue else: mentions_by_sentence[sen_ref - len_prev].append(entity) return mentions_by_sentence def mention_list_for_speechact_no_anaphora(obj): return windices_of_named_entities_and_references(obj).keys()
import chainer from chainer.backends.cuda import cupy import chainer.testing import chainer.testing.attr import chainermn from chainermn import nccl import mock import numpy as np import pytest import unittest class ExampleModel(chainer.Chain): def __init__(self): super(ExampleModel, self).__init__() with self.init_scope(): self.a = chainer.links.Linear(2, 3) self.b = chainer.links.Linear(3, 4) self.c = chainer.links.Linear(4, 5) class TestDoubleBufferingOptimizer(unittest.TestCase): def setup(self, batched_copy): if nccl.get_build_version() < 2000: pytest.skip('This test requires NCCL version >= 2.0') self.comm = chainermn.create_communicator('pure_nccl', batched_copy=batched_copy) device = self.comm.intra_rank chainer.cuda.get_device_from_id(device).use() self.target = ExampleModel() self.target.to_device(cupy.cuda.Device()) self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.c.W.data[:] = self.comm.rank + 2 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.target.c.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock def check_update(self, batched_copy): self.setup(batched_copy) self.optimizer = chainermn.create_multi_node_optimizer( self.actual_optimizer, self.comm, double_buffering=True) opt = self.optimizer.setup(self.target) assert opt is self.optimizer self.optimizer.update() self.assertEqual(self.actual_optimizer.t, 0) self.optimizer.target.a.W.grad[:] = self.comm.rank self.optimizer.target.b.W.grad[:] = self.comm.rank + 1 self.optimizer.target.c.W.grad[:] = self.comm.rank + 2 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 0) base = (self.comm.size - 1.0) / 2 chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 0) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 1) * np.ones((4, 3))) chainer.testing.assert_allclose( self.optimizer.communicated_target.c.W.grad, (base + 2) * np.ones((5, 4))) self.optimizer.target.a.W.grad[:] = self.comm.rank + 3 self.optimizer.target.b.W.grad[:] = self.comm.rank + 4 self.optimizer.target.c.W.grad[:] = self.comm.rank + 5 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 1) self.optimizer.target.a.W.update_rule.update.assert_called_once_with( self.optimizer.target.a.W) self.optimizer.target.b.W.update_rule.update.assert_called_once_with( self.optimizer.target.b.W) self.optimizer.target.c.W.update_rule.update.assert_called_once_with( self.optimizer.target.c.W) chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 3) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 4) * np.ones((4, 3))) chainer.testing.assert_allclose( self.optimizer.communicated_target.c.W.grad, (base + 5) * np.ones((5, 4))) self.comm.finalize() @chainer.testing.attr.gpu def test_update_without_batched_copy(self): self.check_update(False) @chainer.testing.attr.gpu def test_update_with_batched_copy(self): self.check_update(True) class DynamicExampleModel(chainer.Chain): def __init__(self): super(DynamicExampleModel, self).__init__() with self.init_scope(): self.a = chainer.links.Linear(2, 3) self.b = chainer.links.Linear(3, 4) class TestDoubleBufferingOptimizerWithDynamicModel(unittest.TestCase): def setup(self, batched_copy): if nccl.get_build_version() < 2000: pytest.skip('This test requires NCCL version >= 2.0') self.comm = chainermn.create_communicator('pure_nccl', batched_copy=batched_copy) device = self.comm.intra_rank chainer.cuda.get_device_from_id(device).use() self.target = DynamicExampleModel() self.target.to_device(cupy.cuda.Device()) self.target.a.W.data[:] = self.comm.rank self.target.b.W.data[:] = self.comm.rank + 1 self.target.a.W.grad[:] = 0 self.target.b.W.grad[:] = 0 self.actual_optimizer = chainer.GradientMethod() self.actual_optimizer.create_update_rule = mock.MagicMock def check_update(self, batched_copy): self.setup(batched_copy) self.optimizer = chainermn.create_multi_node_optimizer( self.actual_optimizer, self.comm, double_buffering=True) opt = self.optimizer.setup(self.target) assert opt is self.optimizer self.optimizer.update() self.assertEqual(self.actual_optimizer.t, 0) self.optimizer.target.a.W.grad[:] = self.comm.rank self.optimizer.target.b.W.grad[:] = self.comm.rank + 1 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 0) base = (self.comm.size - 1.0) / 2 chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 0) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 1) * np.ones((4, 3))) self.optimizer.target.a.W.grad[:] = self.comm.rank + 3 self.optimizer.target.b.W.grad[:] = self.comm.rank + 4 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 1) self.optimizer.target.a.W.update_rule.update.assert_called_once_with( self.optimizer.target.a.W) self.optimizer.target.b.W.update_rule.update.assert_called_once_with( self.optimizer.target.b.W) chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 3) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 4) * np.ones((4, 3))) with self.target.init_scope(): c = chainer.links.Linear(4, 4) c.to_device(cupy.cuda.Device()) self.target.c = c if self.comm.rank == 0: self.target.c.W.data[:] = self.comm.rank + 2 self.optimizer.setup(self.target) self.optimizer.update() self.assertEqual(self.actual_optimizer.t, 0) send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data) recv_buf = self.comm.mpi_comm.allgather(send_buf) for i in range(1, self.comm.size): chainer.testing.assert_allclose(recv_buf[0], recv_buf[i]) self.optimizer.target.a.W.grad[:] = self.comm.rank + 6 self.optimizer.target.b.W.grad[:] = self.comm.rank + 7 self.optimizer.target.c.W.grad[:] = self.comm.rank + 8 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 0) base = (self.comm.size - 1.0) / 2 chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 6) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 7) * np.ones((4, 3))) chainer.testing.assert_allclose( self.optimizer.communicated_target.c.W.grad, (base + 8) * np.ones((4, 4))) self.optimizer.target.a.W.grad[:] = self.comm.rank + 9 self.optimizer.target.b.W.grad[:] = self.comm.rank + 10 self.optimizer.target.c.W.grad[:] = self.comm.rank + 11 self.optimizer.update() self.optimizer.wait() self.assertEqual(self.actual_optimizer.t, 1) self.optimizer.target.a.W.update_rule.update.assert_called_once_with( self.optimizer.target.a.W) self.optimizer.target.b.W.update_rule.update.assert_called_once_with( self.optimizer.target.b.W) self.optimizer.target.c.W.update_rule.update.assert_called_once_with( self.optimizer.target.c.W) chainer.testing.assert_allclose( self.optimizer.communicated_target.a.W.grad, (base + 9) * np.ones((3, 2))) chainer.testing.assert_allclose( self.optimizer.communicated_target.b.W.grad, (base + 10) * np.ones((4, 3))) chainer.testing.assert_allclose( self.optimizer.communicated_target.c.W.grad, (base + 11) * np.ones((4, 4))) self.comm.finalize() @chainer.testing.attr.gpu def test_update_without_batched_copy(self): self.check_update(False) @chainer.testing.attr.gpu def test_update_with_batched_copy(self): self.check_update(True)
from datetime import datetime from django.test import SimpleTestCase, override_settings FULL_RESPONSE = 'Test conditional get response' LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47) LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT' LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT' LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT' EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT' ETAG = '"b4246ffc4f62314ca13147c9d4f76974"' WEAK_ETAG = 'W/"b4246ffc4f62314ca13147c9d4f76974"' # weak match to ETAG EXPIRED_ETAG = '"7fae4cd4b0f81e7d2914700043aa8ed6"' @override_settings(ROOT_URLCONF='conditional_processing.urls') class ConditionalGet(SimpleTestCase): def assertFullResponse(self, response, check_last_modified=True, check_etag=True): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, FULL_RESPONSE.encode()) if check_last_modified: self.assertEqual(response['Last-Modified'], LAST_MODIFIED_STR) if check_etag: self.assertEqual(response['ETag'], ETAG) def assertNotModified(self, response): self.assertEqual(response.status_code, 304) self.assertEqual(response.content, b'') def test_without_conditions(self): response = self.client.get('/condition/') self.assertFullResponse(response) def test_if_modified_since(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertFullResponse(response) def test_if_unmodified_since(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) # Several etags in If-None-Match is a bit exotic but why not? self.client.defaults['HTTP_IF_NONE_MATCH'] = '%s, %s' % (ETAG, EXPIRED_ETAG) response = self.client.get('/condition/') self.assertNotModified(response) def test_weak_if_none_match(self): """ If-None-Match comparisons use weak matching, so weak and strong ETags with the same value result in a 304 response. """ self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/weak_etag/') self.assertNotModified(response) response = self.client.put('/condition/weak_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = WEAK_ETAG response = self.client.get('/condition/weak_etag/') self.assertNotModified(response) response = self.client.put('/condition/weak_etag/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) def test_all_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = '*' response = self.client.get('/condition/') self.assertNotModified(response) response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/no_etag/') self.assertFullResponse(response, check_last_modified=False, check_etag=False) def test_if_match(self): self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.put('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.put('/condition/') self.assertEqual(response.status_code, 412) def test_weak_if_match(self): """ If-Match comparisons use strong matching, so any comparison involving a weak ETag return a 412 response. """ self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/weak_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_MATCH'] = WEAK_ETAG response = self.client.get('/condition/weak_etag/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_all_if_match(self): self.client.defaults['HTTP_IF_MATCH'] = '*' response = self.client.get('/condition/') self.assertFullResponse(response) response = self.client.get('/condition/no_etag/') self.assertEqual(response.status_code, 412) def test_both_headers(self): # see https://tools.ietf.org/html/rfc7232#section-6 self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/') self.assertNotModified(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertFullResponse(response) def test_both_headers_2(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = ETAG response = self.client.get('/condition/') self.assertFullResponse(response) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/') self.assertEqual(response.status_code, 412) def test_single_condition_1(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertNotModified(response) response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_2(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/etag/') self.assertNotModified(response) response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_3(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_4(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_5(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified2/') self.assertNotModified(response) response = self.client.get('/condition/etag2/') self.assertFullResponse(response, check_last_modified=False) def test_single_condition_6(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/etag2/') self.assertNotModified(response) response = self.client.get('/condition/last_modified2/') self.assertFullResponse(response, check_etag=False) def test_single_condition_7(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/etag/') self.assertEqual(response.status_code, 412) def test_single_condition_8(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.get('/condition/last_modified/') self.assertFullResponse(response, check_etag=False) def test_single_condition_9(self): self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR response = self.client.get('/condition/last_modified2/') self.assertEqual(response.status_code, 412) response = self.client.get('/condition/etag2/') self.assertEqual(response.status_code, 412) def test_single_condition_head(self): self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR response = self.client.head('/condition/') self.assertNotModified(response) def test_unquoted(self): """ The same quoted ETag should be set on the header regardless of whether etag_func() in condition() returns a quoted or an unquoted ETag. """ response_quoted = self.client.get('/condition/etag/') response_unquoted = self.client.get('/condition/unquoted_etag/') self.assertEqual(response_quoted['ETag'], response_unquoted['ETag']) # It's possible that the matching algorithm could use the wrong value even # if the ETag header is set correctly correctly (as tested by # test_unquoted()), so check that the unquoted value is matched. def test_unquoted_if_none_match(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG response = self.client.get('/condition/unquoted_etag/') self.assertNotModified(response) response = self.client.put('/condition/unquoted_etag/') self.assertEqual(response.status_code, 412) self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG response = self.client.get('/condition/unquoted_etag/') self.assertFullResponse(response, check_last_modified=False) def test_invalid_etag(self): self.client.defaults['HTTP_IF_NONE_MATCH'] = '"""' response = self.client.get('/condition/etag/') self.assertFullResponse(response, check_last_modified=False)
# Copyright (c) 2008, Casey Duncan (casey dot duncan at gmail dot com) # see PERLIN_LICENSE.txt for details """Perlin noise -- pure python implementation""" __version__ = '$Id: perlin.py 521 2008-12-15 03:03:52Z casey.duncan $' from math import floor, fmod, sqrt from random import randint # 3D Gradient vectors _GRAD3 = ((1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0), (1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1), (0,1,1),(0,-1,1),(0,1,-1),(0,-1,-1), (1,1,0),(0,-1,1),(-1,1,0),(0,-1,-1), ) # 4D Gradient vectors _GRAD4 = ((0,1,1,1), (0,1,1,-1), (0,1,-1,1), (0,1,-1,-1), (0,-1,1,1), (0,-1,1,-1), (0,-1,-1,1), (0,-1,-1,-1), (1,0,1,1), (1,0,1,-1), (1,0,-1,1), (1,0,-1,-1), (-1,0,1,1), (-1,0,1,-1), (-1,0,-1,1), (-1,0,-1,-1), (1,1,0,1), (1,1,0,-1), (1,-1,0,1), (1,-1,0,-1), (-1,1,0,1), (-1,1,0,-1), (-1,-1,0,1), (-1,-1,0,-1), (1,1,1,0), (1,1,-1,0), (1,-1,1,0), (1,-1,-1,0), (-1,1,1,0), (-1,1,-1,0), (-1,-1,1,0), (-1,-1,-1,0)) # A lookup table to traverse the simplex around a given point in 4D. # Details can be found where this table is used, in the 4D noise method. _SIMPLEX = ( (0,1,2,3),(0,1,3,2),(0,0,0,0),(0,2,3,1),(0,0,0,0),(0,0,0,0),(0,0,0,0),(1,2,3,0), (0,2,1,3),(0,0,0,0),(0,3,1,2),(0,3,2,1),(0,0,0,0),(0,0,0,0),(0,0,0,0),(1,3,2,0), (0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0), (1,2,0,3),(0,0,0,0),(1,3,0,2),(0,0,0,0),(0,0,0,0),(0,0,0,0),(2,3,0,1),(2,3,1,0), (1,0,2,3),(1,0,3,2),(0,0,0,0),(0,0,0,0),(0,0,0,0),(2,0,3,1),(0,0,0,0),(2,1,3,0), (0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0), (2,0,1,3),(0,0,0,0),(0,0,0,0),(0,0,0,0),(3,0,1,2),(3,0,2,1),(0,0,0,0),(3,1,2,0), (2,1,0,3),(0,0,0,0),(0,0,0,0),(0,0,0,0),(3,1,0,2),(0,0,0,0),(3,2,0,1),(3,2,1,0)) # Simplex skew constants _F2 = 0.5 * (sqrt(3.0) - 1.0) _G2 = (3.0 - sqrt(3.0)) / 6.0 _F3 = 1.0 / 3.0 _G3 = 1.0 / 6.0 class BaseNoise: """Noise abstract base class""" permutation = (151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190,6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168,68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152,2,44,154,163,70,221,153,101,155,167,43,172,9, 129,22,39,253,9,98,108,110,79,113,224,232,178,185,112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214,31,181,199,106,157,184,84,204,176,115,121,50,45,127,4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180) period = len(permutation) # Double permutation array so we don't need to wrap permutation = permutation * 2 def __init__(self, period=None, permutation_table=None): """Initialize the noise generator. With no arguments, the default period and permutation table are used (256). The default permutation table generates the exact same noise pattern each time. An integer period can be specified, to generate a random permutation table with period elements. The period determines the (integer) interval that the noise repeats, which is useful for creating tiled textures. period should be a power-of-two, though this is not enforced. Note that the speed of the noise algorithm is indpendent of the period size, though larger periods mean a larger table, which consume more memory. A permutation table consisting of an iterable sequence of whole numbers can be specified directly. This should have a power-of-two length. Typical permutation tables are a sequnce of unique integers in the range [0,period) in random order, though other arrangements could prove useful, they will not be "pure" simplex noise. The largest element in the sequence must be no larger than period-1. period and permutation_table may not be specified togther. """ if period is not None and permutation_table is not None: raise ValueError( 'Can specify either period or permutation_table, not both') if period is not None: self.randomize(period) elif permutation_table is not None: self.permutation = tuple(permutation_table) * 2 self.period = len(permutation_table) def randomize(self, period=None): """Randomize the permutation table used by the noise functions. This makes them generate a different noise pattern for the same inputs. """ if period is not None: self.period = period perm = range(self.period) perm_right = self.period - 1 for i in list(perm): j = randint(0, perm_right) perm[i], perm[j] = perm[j], perm[i] self.permutation = tuple(perm) * 2 class SimplexNoise(BaseNoise): """Perlin simplex noise generator Adapted from Stefan Gustavson's Java implementation described here: http://staffwww.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf To summarize: "In 2001, Ken Perlin presented 'simplex noise', a replacement for his classic noise algorithm. Classic 'Perlin noise' won him an academy award and has become an ubiquitous procedural primitive for computer graphics over the years, but in hindsight it has quite a few limitations. Ken Perlin himself designed simplex noise specifically to overcome those limitations, and he spent a lot of good thinking on it. Therefore, it is a better idea than his original algorithm. A few of the more prominent advantages are: * Simplex noise has a lower computational complexity and requires fewer multiplications. * Simplex noise scales to higher dimensions (4D, 5D and up) with much less computational cost, the complexity is O(N) for N dimensions instead of the O(2^N) of classic Noise. * Simplex noise has no noticeable directional artifacts. Simplex noise has a well-defined and continuous gradient everywhere that can be computed quite cheaply. * Simplex noise is easy to implement in hardware." """ def noise2(self, x, y): """2D Perlin simplex noise. Return a floating point value from -1 to 1 for the given x, y coordinate. The same value is always returned for a given x, y pair unless the permutation table changes (see randomize above). """ # Skew input space to determine which simplex (triangle) we are in s = (x + y) * _F2 i = floor(x + s) j = floor(y + s) t = (i + j) * _G2 x0 = x - (i - t) # "Unskewed" distances from cell origin y0 = y - (j - t) if x0 > y0: i1 = 1; j1 = 0 # Lower triangle, XY order: (0,0)->(1,0)->(1,1) else: i1 = 0; j1 = 1 # Upper triangle, YX order: (0,0)->(0,1)->(1,1) x1 = x0 - i1 + _G2 # Offsets for middle corner in (x,y) unskewed coords y1 = y0 - j1 + _G2 x2 = x0 + _G2 * 2.0 - 1.0 # Offsets for last corner in (x,y) unskewed coords y2 = y0 + _G2 * 2.0 - 1.0 # Determine hashed gradient indices of the three simplex corners perm = self.permutation ii = int(i) % self.period jj = int(j) % self.period gi0 = perm[ii + perm[jj]] % 12 gi1 = perm[ii + i1 + perm[jj + j1]] % 12 gi2 = perm[ii + 1 + perm[jj + 1]] % 12 # Calculate the contribution from the three corners tt = 0.5 - x0**2 - y0**2 if tt > 0: g = _GRAD3[gi0] noise = tt**4 * (g[0] * x0 + g[1] * y0) else: noise = 0.0 tt = 0.5 - x1**2 - y1**2 if tt > 0: g = _GRAD3[gi1] noise += tt**4 * (g[0] * x1 + g[1] * y1) tt = 0.5 - x2**2 - y2**2 if tt > 0: g = _GRAD3[gi2] noise += tt**4 * (g[0] * x2 + g[1] * y2) return noise * 70.0 # scale noise to [-1, 1] def noise3(self, x, y, z): """3D Perlin simplex noise. Return a floating point value from -1 to 1 for the given x, y, z coordinate. The same value is always returned for a given x, y, z pair unless the permutation table changes (see randomize above). """ # Skew the input space to determine which simplex cell we're in s = (x + y + z) * _F3 i = floor(x + s) j = floor(y + s) k = floor(z + s) t = (i + j + k) * _G3 x0 = x - (i - t) # "Unskewed" distances from cell origin y0 = y - (j - t) z0 = z - (k - t) # For the 3D case, the simplex shape is a slightly irregular tetrahedron. # Determine which simplex we are in. if x0 >= y0: if y0 >= z0: i1 = 1; j1 = 0; k1 = 0 i2 = 1; j2 = 1; k2 = 0 elif x0 >= z0: i1 = 1; j1 = 0; k1 = 0 i2 = 1; j2 = 0; k2 = 1 else: i1 = 0; j1 = 0; k1 = 1 i2 = 1; j2 = 0; k2 = 1 else: # x0 < y0 if y0 < z0: i1 = 0; j1 = 0; k1 = 1 i2 = 0; j2 = 1; k2 = 1 elif x0 < z0: i1 = 0; j1 = 1; k1 = 0 i2 = 0; j2 = 1; k2 = 1 else: i1 = 0; j1 = 1; k1 = 0 i2 = 1; j2 = 1; k2 = 0 # Offsets for remaining corners x1 = x0 - i1 + _G3 y1 = y0 - j1 + _G3 z1 = z0 - k1 + _G3 x2 = x0 - i2 + 2.0 * _G3 y2 = y0 - j2 + 2.0 * _G3 z2 = z0 - k2 + 2.0 * _G3 x3 = x0 - 1.0 + 3.0 * _G3 y3 = y0 - 1.0 + 3.0 * _G3 z3 = z0 - 1.0 + 3.0 * _G3 # Calculate the hashed gradient indices of the four simplex corners perm = self.permutation ii = int(i) % self.period jj = int(j) % self.period kk = int(k) % self.period gi0 = perm[ii + perm[jj + perm[kk]]] % 12 gi1 = perm[ii + i1 + perm[jj + j1 + perm[kk + k1]]] % 12 gi2 = perm[ii + i2 + perm[jj + j2 + perm[kk + k2]]] % 12 gi3 = perm[ii + 1 + perm[jj + 1 + perm[kk + 1]]] % 12 # Calculate the contribution from the four corners noise = 0.0 tt = 0.6 - x0**2 - y0**2 - z0**2 if tt > 0: g = _GRAD3[gi0] noise = tt**4 * (g[0] * x0 + g[1] * y0 + g[2] * z0) else: noise = 0.0 tt = 0.6 - x1**2 - y1**2 - z1**2 if tt > 0: g = _GRAD3[gi1] noise += tt**4 * (g[0] * x1 + g[1] * y1 + g[2] * z1) tt = 0.6 - x2**2 - y2**2 - z2**2 if tt > 0: g = _GRAD3[gi2] noise += tt**4 * (g[0] * x2 + g[1] * y2 + g[2] * z2) tt = 0.6 - x3**2 - y3**2 - z3**2 if tt > 0: g = _GRAD3[gi3] noise += tt**4 * (g[0] * x3 + g[1] * y3 + g[2] * z3) return noise * 32.0 def lerp(t, a, b): return a + t * (b - a) def grad3(hash, x, y, z): g = _GRAD3[hash % 16] return x*g[0] + y*g[1] + z*g[2] class TileableNoise(BaseNoise): """Tileable implemention of Perlin "improved" noise. This is based on the reference implementation published here: http://mrl.nyu.edu/~perlin/noise/ """ def noise3(self, x, y, z, repeat, base=0.0): """Tileable 3D noise. repeat specifies the integer interval in each dimension when the noise pattern repeats. base allows a different texture to be generated for the same repeat interval. """ i = int(fmod(floor(x), repeat)) j = int(fmod(floor(y), repeat)) k = int(fmod(floor(z), repeat)) ii = (i + 1) % repeat jj = (j + 1) % repeat kk = (k + 1) % repeat if base: i += base; j += base; k += base ii += base; jj += base; kk += base x -= floor(x); y -= floor(y); z -= floor(z) fx = x**3 * (x * (x * 6 - 15) + 10) fy = y**3 * (y * (y * 6 - 15) + 10) fz = z**3 * (z * (z * 6 - 15) + 10) perm = self.permutation A = perm[i] AA = perm[A + j] AB = perm[A + jj] B = perm[ii] BA = perm[B + j] BB = perm[B + jj] return lerp(fz, lerp(fy, lerp(fx, grad3(perm[AA + k], x, y, z), grad3(perm[BA + k], x - 1, y, z)), lerp(fx, grad3(perm[AB + k], x, y - 1, z), grad3(perm[BB + k], x - 1, y - 1, z))), lerp(fy, lerp(fx, grad3(perm[AA + kk], x, y, z - 1), grad3(perm[BA + kk], x - 1, y, z - 1)), lerp(fx, grad3(perm[AB + kk], x, y - 1, z - 1), grad3(perm[BB + kk], x - 1, y - 1, z - 1))))
# coding=utf-8 # Copyright 2020 Google LLC.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # python3 """Cloud BigQuery module.""" import logging import os from typing import Any, Dict, Union from google.cloud import bigquery from google.cloud import exceptions import config_parser # Main workflow sql. _MAIN_WORKFLOW_SQL = 'scripts/main_workflow.sql' _BEST_SELLERS_WORKFLOW_SQL = 'scripts/market_insights/best_sellers_workflow.sql' # Set logging level. logging.getLogger().setLevel(logging.INFO) logging.getLogger('googleapiclient.discovery').setLevel(logging.WARNING) def create_dataset_if_not_exists(project_id: str, dataset_id: str) -> None: """Creates BigQuery dataset if it doesn't exists. Args: project_id: A cloud project id. dataset_id: BigQuery dataset id. """ # Construct a BigQuery client object. client = bigquery.Client(project=project_id) fully_qualified_dataset_id = f'{project_id}.{dataset_id}' try: client.get_dataset(fully_qualified_dataset_id) logging.info('Dataset %s already exists.', fully_qualified_dataset_id) except exceptions.NotFound: logging.info('Dataset %s is not found.', fully_qualified_dataset_id) dataset = bigquery.Dataset(fully_qualified_dataset_id) dataset.location = config_parser.get_dataset_location() client.create_dataset(dataset) logging.info('Dataset %s created.', fully_qualified_dataset_id) def load_language_codes(project_id: str, dataset_id: str) -> None: """Loads language codes.""" client = bigquery.Client(project=project_id) fully_qualified_table_id = f'{project_id}.{dataset_id}.language_codes' job_config = bigquery.LoadJobConfig( source_format=bigquery.SourceFormat.CSV, skip_leading_rows=1, autodetect=True, ) file_name = 'data/language_codes.csv' with open(file_name, 'rb') as source_file: job = client.load_table_from_file( source_file, fully_qualified_table_id, job_config=job_config) job.result() def load_geo_targets(project_id: str, dataset_id: str) -> None: """Loads geo targets.""" client = bigquery.Client(project=project_id) fully_qualified_table_id = f'{project_id}.{dataset_id}.geo_targets' job_config = bigquery.LoadJobConfig( source_format=bigquery.SourceFormat.CSV, skip_leading_rows=1, autodetect=True, ) file_name = 'data/geo_targets.csv' with open(file_name, 'rb') as source_file: job = client.load_table_from_file( source_file, fully_qualified_table_id, job_config=job_config) job.result() def read_file(file_path: str) -> str: """Reads and returns contents of the file. Args: file_path: File path. Returns: content: File content. Raises: FileNotFoundError: If the provided file is not found. """ try: with open(file_path, 'r') as stream: content = stream.read() except FileNotFoundError: raise FileNotFoundError(f'The file "{file_path}" could not be found.') else: return content def configure_sql(sql_path: str, query_params: Dict[str, Any]) -> str: """Configures parameters of SQL script with variables supplied. Args: sql_path: Path to SQL script. query_params: Configuration containing query parameter values. Returns: sql_script: String representation of SQL script with parameters assigned. """ sql_script = read_file(sql_path) params = {} for param_key, param_value in query_params.items(): # If given value is list of strings (ex. 'a,b,c'), create tuple of # strings (ex. ('a', 'b', 'c')) to pass to SQL IN operator. if isinstance(param_value, str) and ',' in param_value: params[param_key] = tuple(param_value.split(',')) else: params[param_key] = param_value return sql_script.format(**params) def execute_queries(project_id: str, dataset_id: str, merchant_id: str, customer_id: str, enable_market_insights: bool) -> None: """Executes list of queries.""" # Sql files to be executed in a specific order. The prefix "scripts" should be omitted. sql_files = [ '1_product_view.sql', 'targeted_products/targeted_product_ddl.sql', 'targeted_products/construct_parsed_criteria.sql', '2_product_metrics_view.sql', '3_customer_view.sql', '4_product_detailed_view.sql', 'materialize_product_detailed.sql', 'materialize_product_historical.sql', ] if enable_market_insights: market_insights_sql_files = [ 'market_insights/snapshot_view.sql', 'market_insights/historical_view.sql' ] sql_files.extend(market_insights_sql_files) prefix = 'scripts' query_params = { 'project_id': project_id, 'dataset': dataset_id, 'merchant_id': merchant_id, 'external_customer_id': customer_id } location = config_parser.get_dataset_location() client = bigquery.Client(project=project_id) for sql_file in sql_files: try: query = configure_sql(os.path.join(prefix, sql_file), query_params) query_job = client.query(query, location=location) query_job.result() except: logging.exception('Error in %s', sql_file) raise def get_main_workflow_sql(project_id: str, dataset_id: str, merchant_id: str, customer_id: str) -> str: """Returns main workflow sql. Args: project_id: A cloud project id. dataset_id: BigQuery dataset id. merchant_id: Merchant center id. customer_id: Google Ads customer id. """ query_params = { 'project_id': project_id, 'dataset': dataset_id, 'merchant_id': merchant_id, 'external_customer_id': customer_id } return configure_sql(_MAIN_WORKFLOW_SQL, query_params) def get_best_sellers_workflow_sql(project_id: str, dataset_id: str, merchant_id: str) -> str: """Returns main workflow sql. Args: project_id: A cloud project id. dataset_id: BigQuery dataset id. merchant_id: Merchant center id. """ query_params = { 'project_id': project_id, 'dataset': dataset_id, 'merchant_id': merchant_id } return configure_sql(_BEST_SELLERS_WORKFLOW_SQL, query_params)
""" Utilities for local system calls, everything here is cross-platform. become_daemon was originally taken from Django: https://github.com/django/django/commit/5836a5771f2aefca83349b111f4191d6485af1d5#diff-f7d80be2ccf77f4f009d08dcac4b7736 We might want to refactor this into: system/__init__.py system/posix.py system/windows.py etc.. """ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import logging import os import sys import six from django.db import connections from .conf import KOLIBRI_HOME from .conf import OPTIONS from kolibri.utils.android import on_android logger = logging.getLogger(__name__) def _posix_pid_exists(pid): """Check whether PID exists in the current process table.""" import errno if pid < 0: return False try: # Send signal 0, this is harmless os.kill(pid, 0) except OSError as e: return e.errno == errno.EPERM else: return True def _windows_pid_exists(pid): import ctypes kernel32 = ctypes.windll.kernel32 SYNCHRONIZE = 0x100000 process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid) if process != 0: kernel32.CloseHandle(process) return True return False buffering = int(six.PY3) # No unbuffered text I/O on Python 3 (#20815). def _posix_become_daemon( our_home_dir=".", out_log="/dev/null", err_log="/dev/null", umask=0o022 ): "Robustly turn into a UNIX daemon, running in our_home_dir." # First fork try: if os.fork() > 0: sys.exit(0) # kill off parent except OSError as e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) os.setsid() os.chdir(our_home_dir) os.umask(umask) # Second fork try: if os.fork() > 0: os._exit(0) except OSError as e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if sys.platform != "darwin": # This block breaks on OS X # Fix courtesy of https://github.com/serverdensity/python-daemon/blob/master/daemon.py#L94 si = open("/dev/null", "r") so = open(out_log, "a+", buffering) se = open(err_log, "a+", buffering) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # Set custom file descriptors so that they get proper buffering. sys.stdout, sys.stderr = so, se def _windows_become_daemon(our_home_dir=".", out_log=None, err_log=None, umask=0o022): """ If we're not running under a POSIX system, just simulate the daemon mode by doing redirections and directory changing. """ os.chdir(our_home_dir) os.umask(umask) sys.stdin.close() old_stderr = sys.stderr old_stdout = sys.stdout if err_log: sys.stderr = open(err_log, "a", buffering) else: sys.stderr = _WindowsNullDevice() if out_log: sys.stdout = open(out_log, "a", buffering) else: sys.stdout = _WindowsNullDevice() # Redirect stderr and stdout os.dup2(sys.stderr.fileno(), old_stderr.fileno()) os.dup2(sys.stdout.fileno(), old_stdout.fileno()) old_stderr.flush() old_stdout.flush() class _WindowsNullDevice: "A writeable object that writes to nowhere -- like /dev/null." def write(self, s): pass def get_free_space(path=KOLIBRI_HOME): while path and not os.path.exists(path): path = os.path.dirname(path) # look to parent if it doesn't exist if not path: raise Exception("Could not calculate free space") if sys.platform.startswith("win"): import ctypes free = ctypes.c_ulonglong(0) check = ctypes.windll.kernel32.GetDiskFreeSpaceExW( ctypes.c_wchar_p(path), None, None, ctypes.pointer(free) ) if check == 0: raise ctypes.winError() result = free.value elif on_android(): # This is meant for android, which needs to interact with android API to understand free # space. If we're somehow getting here on non-android, we've got a problem. try: from jnius import autoclass StatFs = autoclass("android.os.StatFs") AndroidString = autoclass("java.lang.String") st = StatFs(AndroidString(path)) try: # for api version 18+ result = st.getFreeBlocksLong() * st.getBlockSizeLong() except Exception: # for api versions < 18 result = st.getFreeBlocks() * st.getBlockSize() except Exception as e: raise e else: st = os.statvfs(os.path.realpath(path)) result = st.f_bavail * st.f_frsize return max(result - OPTIONS["Deployment"]["MINIMUM_DISK_SPACE"], 0) _become_daemon_function = None def become_daemon(**kwargs): # close all connections before forking, to avoid SQLite corruption: # https://www.sqlite.org/howtocorrupt.html#_carrying_an_open_database_connection_across_a_fork_ connections.close_all() _become_daemon_function(**kwargs) def _posix_get_fd_limit(): """ Determines the File Descriptor (FD) limit :return: int """ import resource fd_soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE) return fd_soft_limit def _windows_get_fd_limit(): """ Determines the File Descriptor (FD) limit :return: int """ # TODO: "determine" it return 512 # Utility functions if os.name == "posix": pid_exists = _posix_pid_exists get_fd_limit = _posix_get_fd_limit _become_daemon_function = _posix_become_daemon else: pid_exists = _windows_pid_exists get_fd_limit = _windows_get_fd_limit _become_daemon_function = _windows_become_daemon
"""Calendar printing functions Note when comparing these calendars to the ones printed by cal(1): By default, these calendars have Monday as the first day of the week, and Sunday as the last (the European convention). Use setfirstweekday() to set the first day of the week (0=Monday, 6=Sunday).""" import sys import datetime import locale as _locale from itertools import repeat __all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", "firstweekday", "isleap", "leapdays", "weekday", "monthrange", "monthcalendar", "prmonth", "month", "prcal", "calendar", "timegm", "month_name", "month_abbr", "day_name", "day_abbr", "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar", "LocaleHTMLCalendar", "weekheader"] # Exception raised for bad input (with string parameter for details) error = ValueError # Exceptions raised for bad input class IllegalMonthError(ValueError): def __init__(self, month): self.month = month def __str__(self): return "bad month number %r; must be 1-12" % self.month class IllegalWeekdayError(ValueError): def __init__(self, weekday): self.weekday = weekday def __str__(self): return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday # Constants for months referenced later January = 1 February = 2 # Number of days per month (except for February in leap years) mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # This module used to have hard-coded lists of day and month names, as # English strings. The classes following emulate a read-only version of # that, but supply localized names. Note that the values are computed # fresh on each call, in case the user changes locale between calls. class _localized_month: _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)] _months.insert(0, lambda x: "") def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._months[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 13 class _localized_day: # January 1, 2001, was a Monday. _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)] def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._days[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 7 # Full and abbreviated names of weekdays day_name = _localized_day('%A') day_abbr = _localized_day('%a') # Full and abbreviated names of months (1-based arrays!!!) month_name = _localized_month('%B') month_abbr = _localized_month('%b') # Constants for weekdays (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) def isleap(year): """Return True for leap years, False for non-leap years.""" return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def leapdays(y1, y2): """Return number of leap years in range [y1, y2). Assume y1 <= y2.""" y1 -= 1 y2 -= 1 return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) def weekday(year, month, day): """Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).""" if not datetime.MINYEAR <= year <= datetime.MAXYEAR: year = 2000 + year % 400 return datetime.date(year, month, day).weekday() def monthrange(year, month): """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for year, month.""" if not 1 <= month <= 12: raise IllegalMonthError(month) day1 = weekday(year, month, 1) ndays = mdays[month] + (month == February and isleap(year)) return day1, ndays def _monthlen(year, month): return mdays[month] + (month == February and isleap(year)) def _prevmonth(year, month): if month == 1: return year-1, 12 else: return year, month-1 def _nextmonth(year, month): if month == 12: return year+1, 1 else: return year, month+1 class Calendar(object): """ Base calendar class. This class doesn't do any formatting. It simply provides data to subclasses. """ def __init__(self, firstweekday=0): self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday def getfirstweekday(self): return self._firstweekday % 7 def setfirstweekday(self, firstweekday): self._firstweekday = firstweekday firstweekday = property(getfirstweekday, setfirstweekday) def iterweekdays(self): """ Return an iterator for one week of weekday numbers starting with the configured first one. """ for i in range(self.firstweekday, self.firstweekday + 7): yield i%7 def itermonthdates(self, year, month): """ Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month. """ for y, m, d in self.itermonthdays3(year, month): yield datetime.date(y, m, d) def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ day1, ndays = monthrange(year, month) days_before = (day1 - self.firstweekday) % 7 yield from repeat(0, days_before) yield from range(1, ndays + 1) days_after = (self.firstweekday - day1 - ndays) % 7 yield from repeat(0, days_after) def itermonthdays2(self, year, month): """ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7 def itermonthdays3(self, year, month): """ Like itermonthdates(), but will yield (year, month, day) tuples. Can be used for dates outside of datetime.date range. """ day1, ndays = monthrange(year, month) days_before = (day1 - self.firstweekday) % 7 days_after = (self.firstweekday - day1 - ndays) % 7 y, m = _prevmonth(year, month) end = _monthlen(y, m) + 1 for d in range(end-days_before, end): yield y, m, d for d in range(1, ndays + 1): yield year, month, d y, m = _nextmonth(year, month) for d in range(1, days_after + 1): yield y, m, d def itermonthdays4(self, year, month): """ Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples. Can be used for dates outside of datetime.date range. """ for i, (y, m, d) in enumerate(self.itermonthdays3(year, month)): yield y, m, d, (self.firstweekday + i) % 7 def monthdatescalendar(self, year, month): """ Return a matrix (list of lists) representing a month's calendar. Each row represents a week; week entries are datetime.date values. """ dates = list(self.itermonthdates(year, month)) return [ dates[i:i+7] for i in range(0, len(dates), 7) ] def monthdays2calendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; week entries are (day number, weekday number) tuples. Day numbers outside this month are zero. """ days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] def monthdayscalendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; days outside this month are zero. """ days = list(self.itermonthdays(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] def yeardatescalendar(self, year, width=3): """ Return the data for the specified year ready for formatting. The return value is a list of month rows. Each month row contains up to width months. Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ months = [ self.monthdatescalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardays2calendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero. """ months = [ self.monthdays2calendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardayscalendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. """ months = [ self.monthdayscalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] class TextCalendar(Calendar): """ Subclass of Calendar that outputs a calendar as a simple plain text similar to the UNIX program cal. """ def prweek(self, theweek, width): """ Print a single week (no newline). """ print(self.formatweek(theweek, width), end='') def formatday(self, day, weekday, width): """ Returns a formatted day. """ if day == 0: s = '' else: s = '%2i' % day # right-align single-digit days return s.center(width) def formatweek(self, theweek, width): """ Returns a single week in a string (no newline). """ return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) def formatweekday(self, day, width): """ Returns a formatted week day name. """ if width >= 9: names = day_name else: names = day_abbr return names[day][:width].center(width) def formatweekheader(self, width): """ Return a header for a week. """ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) def formatmonthname(self, theyear, themonth, width, withyear=True): """ Return a formatted month name. """ s = month_name[themonth] if withyear: s = "%s %r" % (s, theyear) return s.center(width) def prmonth(self, theyear, themonth, w=0, l=0): """ Print a month's calendar. """ print(self.formatmonth(theyear, themonth, w, l), end='') def formatmonth(self, theyear, themonth, w=0, l=0): """ Return a month's calendar string (multi-line). """ w = max(2, w) l = max(1, l) s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) s = s.rstrip() s += '\n' * l s += self.formatweekheader(w).rstrip() s += '\n' * l for week in self.monthdays2calendar(theyear, themonth): s += self.formatweek(week, w).rstrip() s += '\n' * l return s def formatyear(self, theyear, w=2, l=1, c=6, m=3): """ Returns a year's calendar as a multi-line string. """ w = max(2, w) l = max(1, l) c = max(2, c) colwidth = (w + 1) * 7 - 1 v = [] a = v.append a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) a('\n'*l) header = self.formatweekheader(w) for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): # months in this row months = range(m*i+1, min(m*(i+1)+1, 13)) a('\n'*l) names = (self.formatmonthname(theyear, k, colwidth, False) for k in months) a(formatstring(names, colwidth, c).rstrip()) a('\n'*l) headers = (header for k in months) a(formatstring(headers, colwidth, c).rstrip()) a('\n'*l) # max number of weeks for this row height = max(len(cal) for cal in row) for j in range(height): weeks = [] for cal in row: if j >= len(cal): weeks.append('') else: weeks.append(self.formatweek(cal[j], w)) a(formatstring(weeks, colwidth, c).rstrip()) a('\n' * l) return ''.join(v) def pryear(self, theyear, w=0, l=0, c=6, m=3): """Print a year's calendar.""" print(self.formatyear(theyear, w, l, c, m), end='') class HTMLCalendar(Calendar): """ This calendar returns complete HTML pages. """ # CSS classes for the day <td>s cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] # CSS classes for the day <th>s cssclasses_weekday_head = cssclasses # CSS class for the days before and after current month cssclass_noday = "noday" # CSS class for the month's head cssclass_month_head = "month" # CSS class for the month cssclass_month = "month" # CSS class for the year's table head cssclass_year_head = "year" # CSS class for the whole year table cssclass_year = "year" def formatday(self, day, weekday): """ Return a day as a table cell. """ if day == 0: # day outside month return '<td class="%s">&nbsp;</td>' % self.cssclass_noday else: return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day) def formatweek(self, theweek): """ Return a complete week as a table row. """ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) return '<tr>%s</tr>' % s def formatweekday(self, day): """ Return a weekday name as a table header. """ return '<th class="%s">%s</th>' % ( self.cssclasses_weekday_head[day], day_abbr[day]) def formatweekheader(self): """ Return a header for a week as a table row. """ s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) return '<tr>%s</tr>' % s def formatmonthname(self, theyear, themonth, withyear=True): """ Return a month name as a table row. """ if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="%s">%s</th></tr>' % ( self.cssclass_month_head, s) def formatmonth(self, theyear, themonth, withyear=True): """ Return a formatted month as a table. """ v = [] a = v.append a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % ( self.cssclass_month)) a('\n') a(self.formatmonthname(theyear, themonth, withyear=withyear)) a('\n') a(self.formatweekheader()) a('\n') for week in self.monthdays2calendar(theyear, themonth): a(self.formatweek(week)) a('\n') a('</table>') a('\n') return ''.join(v) def formatyear(self, theyear, width=3): """ Return a formatted year as a table of tables. """ v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % self.cssclass_year) a('\n') a('<tr><th colspan="%d" class="%s">%s</th></tr>' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v) def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): """ Return a formatted year as a complete HTML page. """ if encoding is None: encoding = sys.getdefaultencoding() v = [] a = v.append a('<?xml version="1.0" encoding="%s"?>\n' % encoding) a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n') a('<html>\n') a('<head>\n') a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding) if css is not None: a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css) a('<title>Calendar for %d</title>\n' % theyear) a('</head>\n') a('<body>\n') a(self.formatyear(theyear, width)) a('</body>\n') a('</html>\n') return ''.join(v).encode(encoding, "xmlcharrefreplace") class different_locale: def __init__(self, locale): self.locale = locale def __enter__(self): self.oldlocale = _locale.getlocale(_locale.LC_TIME) _locale.setlocale(_locale.LC_TIME, self.locale) def __exit__(self, *args): _locale.setlocale(_locale.LC_TIME, self.oldlocale) class LocaleTextCalendar(TextCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): TextCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day, width): with different_locale(self.locale): return super().formatweekday(day, width) def formatmonthname(self, theyear, themonth, width, withyear=True): with different_locale(self.locale): return super().formatmonthname(theyear, themonth, width, withyear) class LocaleHTMLCalendar(HTMLCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): HTMLCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day): with different_locale(self.locale): return super().formatweekday(day) def formatmonthname(self, theyear, themonth, withyear=True): with different_locale(self.locale): return super().formatmonthname(theyear, themonth, withyear) # Support for old module level interface c = TextCalendar() firstweekday = c.getfirstweekday def setfirstweekday(firstweekday): if not MONDAY <= firstweekday <= SUNDAY: raise IllegalWeekdayError(firstweekday) c.firstweekday = firstweekday monthcalendar = c.monthdayscalendar prweek = c.prweek week = c.formatweek weekheader = c.formatweekheader prmonth = c.prmonth month = c.formatmonth calendar = c.formatyear prcal = c.pryear # Spacing of month columns for multi-column year calendar _colwidth = 7*3 - 1 # Amount printed by prweek() _spacing = 6 # Number of spaces between columns def format(cols, colwidth=_colwidth, spacing=_spacing): """Prints multi-column formatting for year calendars""" print(formatstring(cols, colwidth, spacing)) def formatstring(cols, colwidth=_colwidth, spacing=_spacing): """Returns a string formatted from n strings, centered within n columns.""" spacing *= ' ' return spacing.join(c.center(colwidth) for c in cols) EPOCH = 1970 _EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() def timegm(tuple): """Unrelated but handy function to calculate Unix timestamp from GMT.""" year, month, day, hour, minute, second = tuple[:6] days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 hours = days*24 + hour minutes = hours*60 + minute seconds = minutes*60 + second return seconds def main(args): import argparse parser = argparse.ArgumentParser() textgroup = parser.add_argument_group('text only arguments') htmlgroup = parser.add_argument_group('html only arguments') textgroup.add_argument( "-w", "--width", type=int, default=2, help="width of date column (default 2)" ) textgroup.add_argument( "-l", "--lines", type=int, default=1, help="number of lines for each week (default 1)" ) textgroup.add_argument( "-s", "--spacing", type=int, default=6, help="spacing between months (default 6)" ) textgroup.add_argument( "-m", "--months", type=int, default=3, help="months per row (default 3)" ) htmlgroup.add_argument( "-c", "--css", default="calendar.css", help="CSS to use for page" ) parser.add_argument( "-L", "--locale", default=None, help="locale to be used from month and weekday names" ) parser.add_argument( "-e", "--encoding", default=None, help="encoding to use for output" ) parser.add_argument( "-t", "--type", default="text", choices=("text", "html"), help="output type (text or html)" ) parser.add_argument( "year", nargs='?', type=int, help="year number (1-9999)" ) parser.add_argument( "month", nargs='?', type=int, help="month number (1-12, text only)" ) options = parser.parse_args(args[1:]) if options.locale and not options.encoding: parser.error("if --locale is specified --encoding is required") sys.exit(1) locale = options.locale, options.encoding if options.type == "html": if options.locale: cal = LocaleHTMLCalendar(locale=locale) else: cal = HTMLCalendar() encoding = options.encoding if encoding is None: encoding = sys.getdefaultencoding() optdict = dict(encoding=encoding, css=options.css) write = sys.stdout.buffer.write if options.year is None: write(cal.formatyearpage(datetime.date.today().year, **optdict)) elif options.month is None: write(cal.formatyearpage(options.year, **optdict)) else: parser.error("incorrect number of arguments") sys.exit(1) else: if options.locale: cal = LocaleTextCalendar(locale=locale) else: cal = TextCalendar() optdict = dict(w=options.width, l=options.lines) if options.month is None: optdict["c"] = options.spacing optdict["m"] = options.months if options.year is None: result = cal.formatyear(datetime.date.today().year, **optdict) elif options.month is None: result = cal.formatyear(options.year, **optdict) else: result = cal.formatmonth(options.year, options.month, **optdict) write = sys.stdout.write if options.encoding: result = result.encode(options.encoding) write = sys.stdout.buffer.write write(result) if __name__ == "__main__": main(sys.argv)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: from ._models_py3 import AbsoluteDeleteOption from ._models_py3 import AdHocBackupRuleOptions from ._models_py3 import AdhocBackupTriggerOption from ._models_py3 import AdhocBasedTaggingCriteria from ._models_py3 import AdhocBasedTriggerContext from ._models_py3 import AuthCredentials from ._models_py3 import AzureBackupDiscreteRecoveryPoint from ._models_py3 import AzureBackupFindRestorableTimeRangesRequest from ._models_py3 import AzureBackupFindRestorableTimeRangesRequestResource from ._models_py3 import AzureBackupFindRestorableTimeRangesResponse from ._models_py3 import AzureBackupFindRestorableTimeRangesResponseResource from ._models_py3 import AzureBackupJob from ._models_py3 import AzureBackupJobResource from ._models_py3 import AzureBackupJobResourceList from ._models_py3 import AzureBackupParams from ._models_py3 import AzureBackupRecoveryPoint from ._models_py3 import AzureBackupRecoveryPointBasedRestoreRequest from ._models_py3 import AzureBackupRecoveryPointResource from ._models_py3 import AzureBackupRecoveryPointResourceList from ._models_py3 import AzureBackupRecoveryTimeBasedRestoreRequest from ._models_py3 import AzureBackupRehydrationRequest from ._models_py3 import AzureBackupRestoreRequest from ._models_py3 import AzureBackupRestoreWithRehydrationRequest from ._models_py3 import AzureBackupRule from ._models_py3 import AzureOperationalStoreParameters from ._models_py3 import AzureRetentionRule from ._models_py3 import BackupCriteria from ._models_py3 import BackupInstance from ._models_py3 import BackupInstanceResource from ._models_py3 import BackupInstanceResourceList from ._models_py3 import BackupParameters from ._models_py3 import BackupPolicy from ._models_py3 import BackupSchedule from ._models_py3 import BackupVault from ._models_py3 import BackupVaultResource from ._models_py3 import BackupVaultResourceList from ._models_py3 import BaseBackupPolicy from ._models_py3 import BaseBackupPolicyResource from ._models_py3 import BaseBackupPolicyResourceList from ._models_py3 import BasePolicyRule from ._models_py3 import CheckNameAvailabilityRequest from ._models_py3 import CheckNameAvailabilityResult from ._models_py3 import ClientDiscoveryDisplay from ._models_py3 import ClientDiscoveryForLogSpecification from ._models_py3 import ClientDiscoveryForProperties from ._models_py3 import ClientDiscoveryForServiceSpecification from ._models_py3 import ClientDiscoveryResponse from ._models_py3 import ClientDiscoveryValueForSingleApi from ._models_py3 import CopyOnExpiryOption from ._models_py3 import CopyOption from ._models_py3 import CustomCopyOption from ._models_py3 import DataStoreInfoBase from ._models_py3 import DataStoreParameters from ._models_py3 import Datasource from ._models_py3 import DatasourceSet from ._models_py3 import Day from ._models_py3 import DeleteOption from ._models_py3 import DppBaseResource from ._models_py3 import DppBaseResourceList from ._models_py3 import DppIdentityDetails from ._models_py3 import DppResource from ._models_py3 import DppResourceList from ._models_py3 import DppTrackedResource from ._models_py3 import DppTrackedResourceList from ._models_py3 import DppWorkerRequest from ._models_py3 import Error from ._models_py3 import ErrorAdditionalInfo from ._models_py3 import ExportJobsResult from ._models_py3 import FeatureValidationRequest from ._models_py3 import FeatureValidationRequestBase from ._models_py3 import FeatureValidationResponse from ._models_py3 import FeatureValidationResponseBase from ._models_py3 import ImmediateCopyOption from ._models_py3 import InnerError from ._models_py3 import ItemLevelRestoreCriteria from ._models_py3 import ItemLevelRestoreTargetInfo from ._models_py3 import JobExtendedInfo from ._models_py3 import JobSubTask from ._models_py3 import OperationExtendedInfo from ._models_py3 import OperationJobExtendedInfo from ._models_py3 import OperationResource from ._models_py3 import PatchResourceRequestInput from ._models_py3 import PolicyInfo from ._models_py3 import PolicyParameters from ._models_py3 import ProtectionStatusDetails from ._models_py3 import RangeBasedItemLevelRestoreCriteria from ._models_py3 import RecoveryPointDataStoreDetails from ._models_py3 import RecoveryPointsFilters from ._models_py3 import ResourceGuard from ._models_py3 import ResourceGuardOperation from ._models_py3 import ResourceGuardResource from ._models_py3 import ResourceGuardResourceList from ._models_py3 import ResourceMoveDetails from ._models_py3 import RestorableTimeRange from ._models_py3 import RestoreFilesTargetInfo from ._models_py3 import RestoreJobRecoveryPointDetails from ._models_py3 import RestoreTargetInfo from ._models_py3 import RestoreTargetInfoBase from ._models_py3 import RetentionTag from ._models_py3 import ScheduleBasedBackupCriteria from ._models_py3 import ScheduleBasedTriggerContext from ._models_py3 import SecretStoreBasedAuthCredentials from ._models_py3 import SecretStoreResource from ._models_py3 import SourceLifeCycle from ._models_py3 import StorageSetting from ._models_py3 import SupportedFeature from ._models_py3 import SystemData from ._models_py3 import TaggingCriteria from ._models_py3 import TargetCopySetting from ._models_py3 import TargetDetails from ._models_py3 import TriggerBackupRequest from ._models_py3 import TriggerContext from ._models_py3 import UserFacingError from ._models_py3 import ValidateForBackupRequest from ._models_py3 import ValidateRestoreRequestObject except (SyntaxError, ImportError): from ._models import AbsoluteDeleteOption # type: ignore from ._models import AdHocBackupRuleOptions # type: ignore from ._models import AdhocBackupTriggerOption # type: ignore from ._models import AdhocBasedTaggingCriteria # type: ignore from ._models import AdhocBasedTriggerContext # type: ignore from ._models import AuthCredentials # type: ignore from ._models import AzureBackupDiscreteRecoveryPoint # type: ignore from ._models import AzureBackupFindRestorableTimeRangesRequest # type: ignore from ._models import AzureBackupFindRestorableTimeRangesRequestResource # type: ignore from ._models import AzureBackupFindRestorableTimeRangesResponse # type: ignore from ._models import AzureBackupFindRestorableTimeRangesResponseResource # type: ignore from ._models import AzureBackupJob # type: ignore from ._models import AzureBackupJobResource # type: ignore from ._models import AzureBackupJobResourceList # type: ignore from ._models import AzureBackupParams # type: ignore from ._models import AzureBackupRecoveryPoint # type: ignore from ._models import AzureBackupRecoveryPointBasedRestoreRequest # type: ignore from ._models import AzureBackupRecoveryPointResource # type: ignore from ._models import AzureBackupRecoveryPointResourceList # type: ignore from ._models import AzureBackupRecoveryTimeBasedRestoreRequest # type: ignore from ._models import AzureBackupRehydrationRequest # type: ignore from ._models import AzureBackupRestoreRequest # type: ignore from ._models import AzureBackupRestoreWithRehydrationRequest # type: ignore from ._models import AzureBackupRule # type: ignore from ._models import AzureOperationalStoreParameters # type: ignore from ._models import AzureRetentionRule # type: ignore from ._models import BackupCriteria # type: ignore from ._models import BackupInstance # type: ignore from ._models import BackupInstanceResource # type: ignore from ._models import BackupInstanceResourceList # type: ignore from ._models import BackupParameters # type: ignore from ._models import BackupPolicy # type: ignore from ._models import BackupSchedule # type: ignore from ._models import BackupVault # type: ignore from ._models import BackupVaultResource # type: ignore from ._models import BackupVaultResourceList # type: ignore from ._models import BaseBackupPolicy # type: ignore from ._models import BaseBackupPolicyResource # type: ignore from ._models import BaseBackupPolicyResourceList # type: ignore from ._models import BasePolicyRule # type: ignore from ._models import CheckNameAvailabilityRequest # type: ignore from ._models import CheckNameAvailabilityResult # type: ignore from ._models import ClientDiscoveryDisplay # type: ignore from ._models import ClientDiscoveryForLogSpecification # type: ignore from ._models import ClientDiscoveryForProperties # type: ignore from ._models import ClientDiscoveryForServiceSpecification # type: ignore from ._models import ClientDiscoveryResponse # type: ignore from ._models import ClientDiscoveryValueForSingleApi # type: ignore from ._models import CopyOnExpiryOption # type: ignore from ._models import CopyOption # type: ignore from ._models import CustomCopyOption # type: ignore from ._models import DataStoreInfoBase # type: ignore from ._models import DataStoreParameters # type: ignore from ._models import Datasource # type: ignore from ._models import DatasourceSet # type: ignore from ._models import Day # type: ignore from ._models import DeleteOption # type: ignore from ._models import DppBaseResource # type: ignore from ._models import DppBaseResourceList # type: ignore from ._models import DppIdentityDetails # type: ignore from ._models import DppResource # type: ignore from ._models import DppResourceList # type: ignore from ._models import DppTrackedResource # type: ignore from ._models import DppTrackedResourceList # type: ignore from ._models import DppWorkerRequest # type: ignore from ._models import Error # type: ignore from ._models import ErrorAdditionalInfo # type: ignore from ._models import ExportJobsResult # type: ignore from ._models import FeatureValidationRequest # type: ignore from ._models import FeatureValidationRequestBase # type: ignore from ._models import FeatureValidationResponse # type: ignore from ._models import FeatureValidationResponseBase # type: ignore from ._models import ImmediateCopyOption # type: ignore from ._models import InnerError # type: ignore from ._models import ItemLevelRestoreCriteria # type: ignore from ._models import ItemLevelRestoreTargetInfo # type: ignore from ._models import JobExtendedInfo # type: ignore from ._models import JobSubTask # type: ignore from ._models import OperationExtendedInfo # type: ignore from ._models import OperationJobExtendedInfo # type: ignore from ._models import OperationResource # type: ignore from ._models import PatchResourceRequestInput # type: ignore from ._models import PolicyInfo # type: ignore from ._models import PolicyParameters # type: ignore from ._models import ProtectionStatusDetails # type: ignore from ._models import RangeBasedItemLevelRestoreCriteria # type: ignore from ._models import RecoveryPointDataStoreDetails # type: ignore from ._models import RecoveryPointsFilters # type: ignore from ._models import ResourceGuard # type: ignore from ._models import ResourceGuardOperation # type: ignore from ._models import ResourceGuardResource # type: ignore from ._models import ResourceGuardResourceList # type: ignore from ._models import ResourceMoveDetails # type: ignore from ._models import RestorableTimeRange # type: ignore from ._models import RestoreFilesTargetInfo # type: ignore from ._models import RestoreJobRecoveryPointDetails # type: ignore from ._models import RestoreTargetInfo # type: ignore from ._models import RestoreTargetInfoBase # type: ignore from ._models import RetentionTag # type: ignore from ._models import ScheduleBasedBackupCriteria # type: ignore from ._models import ScheduleBasedTriggerContext # type: ignore from ._models import SecretStoreBasedAuthCredentials # type: ignore from ._models import SecretStoreResource # type: ignore from ._models import SourceLifeCycle # type: ignore from ._models import StorageSetting # type: ignore from ._models import SupportedFeature # type: ignore from ._models import SystemData # type: ignore from ._models import TaggingCriteria # type: ignore from ._models import TargetCopySetting # type: ignore from ._models import TargetDetails # type: ignore from ._models import TriggerBackupRequest # type: ignore from ._models import TriggerContext # type: ignore from ._models import UserFacingError # type: ignore from ._models import ValidateForBackupRequest # type: ignore from ._models import ValidateRestoreRequestObject # type: ignore from ._data_protection_client_enums import ( AbsoluteMarker, CreatedByType, CurrentProtectionState, DataStoreTypes, DayOfWeek, FeatureSupportStatus, FeatureType, Month, ProvisioningState, RecoveryOption, RehydrationPriority, RehydrationStatus, ResourceMoveState, RestoreSourceDataStoreType, RestoreTargetLocationType, SecretStoreType, SourceDataStoreType, Status, StorageSettingStoreTypes, StorageSettingTypes, WeekNumber, ) __all__ = [ 'AbsoluteDeleteOption', 'AdHocBackupRuleOptions', 'AdhocBackupTriggerOption', 'AdhocBasedTaggingCriteria', 'AdhocBasedTriggerContext', 'AuthCredentials', 'AzureBackupDiscreteRecoveryPoint', 'AzureBackupFindRestorableTimeRangesRequest', 'AzureBackupFindRestorableTimeRangesRequestResource', 'AzureBackupFindRestorableTimeRangesResponse', 'AzureBackupFindRestorableTimeRangesResponseResource', 'AzureBackupJob', 'AzureBackupJobResource', 'AzureBackupJobResourceList', 'AzureBackupParams', 'AzureBackupRecoveryPoint', 'AzureBackupRecoveryPointBasedRestoreRequest', 'AzureBackupRecoveryPointResource', 'AzureBackupRecoveryPointResourceList', 'AzureBackupRecoveryTimeBasedRestoreRequest', 'AzureBackupRehydrationRequest', 'AzureBackupRestoreRequest', 'AzureBackupRestoreWithRehydrationRequest', 'AzureBackupRule', 'AzureOperationalStoreParameters', 'AzureRetentionRule', 'BackupCriteria', 'BackupInstance', 'BackupInstanceResource', 'BackupInstanceResourceList', 'BackupParameters', 'BackupPolicy', 'BackupSchedule', 'BackupVault', 'BackupVaultResource', 'BackupVaultResourceList', 'BaseBackupPolicy', 'BaseBackupPolicyResource', 'BaseBackupPolicyResourceList', 'BasePolicyRule', 'CheckNameAvailabilityRequest', 'CheckNameAvailabilityResult', 'ClientDiscoveryDisplay', 'ClientDiscoveryForLogSpecification', 'ClientDiscoveryForProperties', 'ClientDiscoveryForServiceSpecification', 'ClientDiscoveryResponse', 'ClientDiscoveryValueForSingleApi', 'CopyOnExpiryOption', 'CopyOption', 'CustomCopyOption', 'DataStoreInfoBase', 'DataStoreParameters', 'Datasource', 'DatasourceSet', 'Day', 'DeleteOption', 'DppBaseResource', 'DppBaseResourceList', 'DppIdentityDetails', 'DppResource', 'DppResourceList', 'DppTrackedResource', 'DppTrackedResourceList', 'DppWorkerRequest', 'Error', 'ErrorAdditionalInfo', 'ExportJobsResult', 'FeatureValidationRequest', 'FeatureValidationRequestBase', 'FeatureValidationResponse', 'FeatureValidationResponseBase', 'ImmediateCopyOption', 'InnerError', 'ItemLevelRestoreCriteria', 'ItemLevelRestoreTargetInfo', 'JobExtendedInfo', 'JobSubTask', 'OperationExtendedInfo', 'OperationJobExtendedInfo', 'OperationResource', 'PatchResourceRequestInput', 'PolicyInfo', 'PolicyParameters', 'ProtectionStatusDetails', 'RangeBasedItemLevelRestoreCriteria', 'RecoveryPointDataStoreDetails', 'RecoveryPointsFilters', 'ResourceGuard', 'ResourceGuardOperation', 'ResourceGuardResource', 'ResourceGuardResourceList', 'ResourceMoveDetails', 'RestorableTimeRange', 'RestoreFilesTargetInfo', 'RestoreJobRecoveryPointDetails', 'RestoreTargetInfo', 'RestoreTargetInfoBase', 'RetentionTag', 'ScheduleBasedBackupCriteria', 'ScheduleBasedTriggerContext', 'SecretStoreBasedAuthCredentials', 'SecretStoreResource', 'SourceLifeCycle', 'StorageSetting', 'SupportedFeature', 'SystemData', 'TaggingCriteria', 'TargetCopySetting', 'TargetDetails', 'TriggerBackupRequest', 'TriggerContext', 'UserFacingError', 'ValidateForBackupRequest', 'ValidateRestoreRequestObject', 'AbsoluteMarker', 'CreatedByType', 'CurrentProtectionState', 'DataStoreTypes', 'DayOfWeek', 'FeatureSupportStatus', 'FeatureType', 'Month', 'ProvisioningState', 'RecoveryOption', 'RehydrationPriority', 'RehydrationStatus', 'ResourceMoveState', 'RestoreSourceDataStoreType', 'RestoreTargetLocationType', 'SecretStoreType', 'SourceDataStoreType', 'Status', 'StorageSettingStoreTypes', 'StorageSettingTypes', 'WeekNumber', ]
import pandas as pd df = pd.DataFrame({'age': [24, 42], 'state': ['NY', 'CA'], 'point': [64, 92]}, index=['Alice', 'Bob']) print(df) # age state point # Alice 24 NY 64 # Bob 42 CA 92 for column_name in df: print(type(column_name)) print(column_name) print('======\n') # <class 'str'> # age # ====== # # <class 'str'> # state # ====== # # <class 'str'> # point # ====== # for column_name in df.__iter__(): print(type(column_name)) print(column_name) print('======\n') # <class 'str'> # age # ====== # # <class 'str'> # state # ====== # # <class 'str'> # point # ====== # for column_name, item in df.iteritems(): print(type(column_name)) print(column_name) print('~~~~~~') print(type(item)) print(item) print('------') print(item['Alice']) print(item[0]) print(item.Alice) print('======\n') # <class 'str'> # age # ~~~~~~ # <class 'pandas.core.series.Series'> # Alice 24 # Bob 42 # Name: age, dtype: int64 # ------ # 24 # 24 # 24 # ====== # # <class 'str'> # state # ~~~~~~ # <class 'pandas.core.series.Series'> # Alice NY # Bob CA # Name: state, dtype: object # ------ # NY # NY # NY # ====== # # <class 'str'> # point # ~~~~~~ # <class 'pandas.core.series.Series'> # Alice 64 # Bob 92 # Name: point, dtype: int64 # ------ # 64 # 64 # 64 # ====== # for index, row in df.iterrows(): print(type(index)) print(index) print('~~~~~~') print(type(row)) print(row) print('------') print(row['point']) print(row[2]) print(row.point) print('======\n') # <class 'str'> # Alice # ~~~~~~ # <class 'pandas.core.series.Series'> # age 24 # state NY # point 64 # Name: Alice, dtype: object # ------ # 64 # 64 # 64 # ====== # # <class 'str'> # Bob # ~~~~~~ # <class 'pandas.core.series.Series'> # age 42 # state CA # point 92 # Name: Bob, dtype: object # ------ # 92 # 92 # 92 # ====== # for row in df.itertuples(): print(type(row)) print(row) print('------') print(row[3]) print(row.point) print('======\n') # <class 'pandas.core.frame.Pandas'> # Pandas(Index='Alice', age=24, state='NY', point=64) # ------ # 64 # 64 # ====== # # <class 'pandas.core.frame.Pandas'> # Pandas(Index='Bob', age=42, state='CA', point=92) # ------ # 92 # 92 # ====== # for row in df.itertuples(name=None): print(type(row)) print(row) print('------') print(row[3]) print('======\n') # <class 'tuple'> # ('Alice', 24, 'NY', 64) # ------ # 64 # ====== # # <class 'tuple'> # ('Bob', 42, 'CA', 92) # ------ # 92 # ====== # print(df['age']) # Alice 24 # Bob 42 # Name: age, dtype: int64 print(type(df['age'])) # <class 'pandas.core.series.Series'> for age in df['age']: print(age) # 24 # 42 for age, point in zip(df['age'], df['point']): print(age, point) # 24 64 # 42 92 print(df.index) # Index(['Alice', 'Bob'], dtype='object') print(type(df.index)) # <class 'pandas.core.indexes.base.Index'> for index in df.index: print(index) # Alice # Bob for index, state in zip(df.index, df['state']): print(index, state) # Alice NY # Bob CA for index, row in df.iterrows(): row['point'] += row['age'] print(df) # age state point # Alice 24 NY 64 # Bob 42 CA 92 for index, row in df.iterrows(): df.at[index, 'point'] += row['age'] print(df) # age state point # Alice 24 NY 88 # Bob 42 CA 134 df['point'] += df['age'] print(df) # age state point # Alice 24 NY 112 # Bob 42 CA 176 df['new'] = df['point'] + df['age'] * 2 print(df) # age state point new # Alice 24 NY 112 160 # Bob 42 CA 176 260 df['age_sqrt'] = pd.np.sqrt(df['age']) print(df) # age state point new age_sqrt # Alice 24 NY 112 160 4.898979 # Bob 42 CA 176 260 6.480741 df['state_0'] = df['state'].str.lower().str[0] print(df) # age state point new age_sqrt state_0 # Alice 24 NY 112 160 4.898979 n # Bob 42 CA 176 260 6.480741 c
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Shared code between AMQP based nova.rpc implementations. The code in this module is shared between the rpc implemenations based on AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses AMQP, but is deprecated and predates this code. """ import inspect import sys import uuid from eventlet import greenpool from eventlet import pools from eventlet import semaphore from nova import context from nova import log as logging from nova.openstack.common import excutils from nova.openstack.common import local import nova.rpc.common as rpc_common LOG = logging.getLogger(__name__) class Pool(pools.Pool): """Class that implements a Pool of Connections.""" def __init__(self, conf, connection_cls, *args, **kwargs): self.connection_cls = connection_cls self.conf = conf kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) kwargs.setdefault("order_as_stack", True) super(Pool, self).__init__(*args, **kwargs) # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug('Pool creating new connection') return self.connection_cls(self.conf) def empty(self): while self.free_items: self.get().close() _pool_create_sem = semaphore.Semaphore() def get_connection_pool(conf, connection_cls): with _pool_create_sem: # Make sure only one thread tries to create the connection pool. if not connection_cls.pool: connection_cls.pool = Pool(conf, connection_cls) return connection_cls.pool class ConnectionContext(rpc_common.Connection): """The class that is actually returned to the caller of create_connection(). This is a essentially a wrapper around Connection that supports 'with' and can return a new Connection or one from a pool. It will also catch when an instance of this class is to be deleted so that we can return Connections to the pool on exceptions and so forth without making the caller be responsible for catching all exceptions and making sure to return a connection to the pool. """ def __init__(self, conf, connection_pool, pooled=True, server_params=None): """Create a new connection, or get one from the pool""" self.connection = None self.conf = conf self.connection_pool = connection_pool if pooled: self.connection = connection_pool.get() else: self.connection = connection_pool.connection_cls(conf, server_params=server_params) self.pooled = pooled def __enter__(self): """When with ConnectionContext() is used, return self""" return self def _done(self): """If the connection came from a pool, clean it up and put it back. If it did not come from a pool, close it. """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller # to grab from the pool self.connection.reset() self.connection_pool.put(self.connection) else: try: self.connection.close() except Exception: pass self.connection = None def __exit__(self, exc_type, exc_value, tb): """End of 'with' statement. We're done here.""" self._done() def __del__(self): """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): """Caller is done with this connection.""" self._done() def create_consumer(self, topic, proxy, fanout=False): self.connection.create_consumer(topic, proxy, fanout) def consume_in_thread(self): self.connection.consume_in_thread() def __getattr__(self, key): """Proxy all other calls to the Connection instance""" if self.connection: return getattr(self.connection, key) else: raise rpc_common.InvalidRPCConnectionReuse() def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, ending=False): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. """ with ConnectionContext(conf, connection_pool) as conn: if failure: failure = rpc_common.serialize_remote_exception(failure) try: msg = {'result': reply, 'failure': failure} except TypeError: msg = {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure} if ending: msg['ending'] = True conn.direct_send(msg_id, msg) class RpcContext(context.RequestContext): """Context that supports replying to a rpc.call""" def __init__(self, *args, **kwargs): self.msg_id = kwargs.pop('msg_id', None) self.conf = kwargs.pop('conf') super(RpcContext, self).__init__(*args, **kwargs) def reply(self, reply=None, failure=None, ending=False, connection_pool=None): if self.msg_id: msg_reply(self.conf, self.msg_id, connection_pool, reply, failure, ending) if ending: self.msg_id = None def unpack_context(conf, msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): # NOTE(vish): Some versions of python don't like unicode keys # in kwargs. key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) context_dict['conf'] = conf ctx = RpcContext.from_dict(context_dict) rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) return ctx def pack_context(msg, context): """Pack context into msg. Values for message keys need to be less than 255 chars, so we pull context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. """ context_d = dict([('_context_%s' % key, value) for (key, value) in context.to_dict().iteritems()]) msg.update(context_d) class ProxyCallback(object): """Calls methods on a proxy object based on method and args.""" def __init__(self, conf, proxy, connection_pool): self.proxy = proxy self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) self.connection_pool = connection_pool self.conf = conf def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ # It is important to clear the context here, because at this point # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) version = message_data.get('version', None) if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return self.pool.spawn_n(self._process_data, ctxt, version, method, args) def _process_data(self, ctxt, version, method, args): """Process a message in a new thread. If the proxy object we have has a dispatch method (see rpc.dispatcher.RpcDispatcher), pass it the version, method, and args and let it dispatch as appropriate. If not, use the old behavior of magically calling the specified method on the proxy we have here. """ ctxt.update_store() try: rval = self.proxy.dispatch(ctxt, version, method, **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None, connection_pool=self.connection_pool) else: ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) except Exception as e: LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info(), connection_pool=self.connection_pool) class MulticallWaiter(object): def __init__(self, conf, connection, timeout): self._connection = connection self._iterator = connection.iterconsume( timeout=timeout or conf.rpc_response_timeout) self._result = None self._done = False self._got_ending = False self._conf = conf def done(self): if self._done: return self._done = True self._iterator.close() self._iterator = None self._connection.close() def __call__(self, data): """The consume() callback will call this. Store the result.""" if data['failure']: failure = data['failure'] self._result = rpc_common.deserialize_remote_exception(self._conf, failure) elif data.get('ending', False): self._got_ending = True else: self._result = data['result'] def __iter__(self): """Return a result until we get a 'None' response from consumer""" if self._done: raise StopIteration while True: try: self._iterator.next() except Exception: with excutils.save_and_reraise_exception(): self.done() if self._got_ending: self.done() raise StopIteration result = self._result if isinstance(result, Exception): self.done() raise result yield result def create_connection(conf, new, connection_pool): """Create a connection""" return ConnectionContext(conf, connection_pool, pooled=not new) def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) pack_context(msg, context) conn = ConnectionContext(conf, connection_pool) wait_msg = MulticallWaiter(conf, conn, timeout) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg def call(conf, context, topic, msg, timeout, connection_pool): """Sends a message on a topic and wait for a response.""" rv = multicall(conf, context, topic, msg, timeout, connection_pool) # NOTE(vish): return the last result from the multicall rv = list(rv) if not rv: return return rv[-1] def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, msg) def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.fanout_send(topic, msg) def cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a topic to a specific server.""" pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: conn.topic_send(topic, msg) def fanout_cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a fanout exchange to a specific server.""" pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: conn.fanout_send(topic, msg) def notify(conf, context, topic, msg, connection_pool): """Sends a notification event on a topic.""" event_type = msg.get('event_type') LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals()) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.notify_send(topic, msg) def cleanup(connection_pool): if connection_pool: connection_pool.empty()
#!/usr/bin/env python ''' Copyright 2012 Root the Box Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ---------------------------------------------------------------------------- This file is the main starting point for the application, based on the command line arguments it calls various components setup/start/etc. ''' from __future__ import print_function import os import nose import random import logging from datetime import datetime from tornado.options import define, options from libs.ConsoleColors import * from libs.ConfigHelpers import save_config from setup import __version__ current_time = lambda: str(datetime.now()).split(' ')[1].split('.')[0] def start(): ''' Starts the application ''' from handlers import start_server print(INFO + '%s : Starting application ...' % current_time()) start_server() def setup(): ''' Creates/bootstraps the database. If you're a real developer you'll figure out how to remove the warning yourself. Don't merge any code the removes it. ''' is_devel = options.setup.startswith('dev') if is_devel: print("%sWARNING:%s Setup is in development mode %s" % ( WARN + bold, W, WARN, )) message = "I know what the fuck I am doing" resp = raw_input(PROMPT + 'Please type "%s": ' % message) if resp.replace('"', '').lower().strip() != message.lower(): os._exit(1) print(INFO + '%s : Creating the database ...' % current_time()) from setup.create_database import create_tables, engine, metadata create_tables(engine, metadata, is_devel) print(INFO + '%s : Bootstrapping the database ...' % current_time()) import setup.bootstrap # Display Details if is_devel: environ = bold + R + "Developement boot strap" + W details = ", admin password is 'nimda123'." else: environ = bold + "Production boot strap" + W details = '.' print(INFO + '%s completed successfully%s' % (environ, details)) def recovery(): ''' Starts the recovery console ''' from setup.recovery import RecoveryConsole print(INFO + '%s : Starting recovery console ...' % current_time()) console = RecoveryConsole() try: console.cmdloop() except KeyboardInterrupt: print(INFO + "Have a nice day!") def setup_xml(): ''' Imports XML file(s) ''' from setup.xmlsetup import import_xml for index, xml_param in enumerate(xml_params): print(INFO + "Processing %d of %d .xml file(s) ..." % ( index + 1, len(xml_params) )) import_xml(xml_param) print(INFO + "%s : Completed processing of all .xml file(s)" % ( current_time() )) def tests(): ''' Creates a temporary sqlite database and runs the unit tests ''' print(INFO + '%s : Running unit tests ...' % current_time()) from tests import setup_database, teardown_database db_name = 'test-%04s' % random.randint(0, 9999) setup_database(db_name) nose.run(module='tests', argv=[os.getcwd() + '/tests']) teardown_database(db_name) def restart(): ''' Shutdown the actual process and restart the service. Useful for rootthebox.cfg changes. ''' pid = os.getpid() print(INFO + '%s : Restarting the service (%i)...' % (current_time(), pid)) os.execl('./setup/restart.sh', '') def version(): from sqlalchemy import __version__ as orm_version from tornado import version as tornado_version print(bold + "Root the Box%s v%s" % (W, __version__)) print(bold + " SQL Alchemy%s v%s" % (W, orm_version)) print(bold + " Torando%s v%s" % (W, tornado_version)) def check_cwd(): ''' Checks to make sure the cwd is the application root directory ''' app_root = os.path.dirname(os.path.abspath(__file__)) if app_root != os.getcwd(): print(INFO + "Switching CWD to '%s'" % app_root) os.chdir(app_root) ######################################################################## # Application Settings ######################################################################## # HTTP Server Settings define("origin", default="ws://localhost:8888", group="server", help="validate websocket connections against this origin") define("listen_port", default=8888, group="server", help="run instances starting the given port", type=int) define("session_age", default=int(60 * 60), group="server", help="max session age (seconds)", type=int) define("session_regeneration_interval", default=int(60 * 60), group="server", help="regenerate session time frame (seconds)", type=int) define("x_headers", default=False, group="server", help="honor the `X-FORWARDED-FOR` and `X-REAL-IP` http headers", type=bool) define("ssl", default=False, group="server", help="enable the use of ssl/tls", type=bool) define("certfile", default="", group="server", help="the certificate file path (for ssl/tls)") define("keyfile", default="", group="server", help="the key file path (for ssl/tls)") define("admin_ips", multiple=True, default=['127.0.0.1', '::1'], group="server", help="whitelist of ip addresses that can access the admin ui") # Application Settings define("debug", default=False, group="application", help="start the application in debugging mode", type=bool) define("avatar_dir", default="./files/avatars", group="application", help="the directory to store avatar files") define("share_dir", default="./files/shares", group="application", help="the directory to store shared files") define("flag_attachment_dir", default="./files/flag_attachments", group="application", help="the directory to store flag attachment files") define("source_code_market_dir", default="./files/source_code_market", group="application", help="the directory to store souce code market files") # ReCAPTCHA define("use_recaptcha", default=True, help="enable the use of recaptcha for bank passwords", group='recaptcha', type=bool) define("recaptcha_api_key", default="6LcJJ88SAAAAAFzcmQqDPWGKRQXmJ0DCiZoPWTZf", group='recaptcha', help="recaptcha api key") # Database settings define("sql_dialect", default="mysql", group="database", help="define the type of database (mysql|postgres|sqlite)") define("sql_database", default="rootthebox", group="database", help="the sql database name") define("sql_host", default="127.0.0.1", group="database", help="database sever hostname") define("sql_port", default=3306, group="database", help="database tcp port", type=int) define("sql_user", default="rtb", group="database", help="database username") define("sql_password", default="rtb", group="database", help="database password, if applicable") define("log_sql", default=False, group="database", help="Log SQL queries for debugging") # Memcached settings define("memcached", default="127.0.0.1", group="cache", help="memcached sever hostname") define("memcached_port", default=11011, group="cache", help="memcached tcp port", type=int) # Game Settings define("game_name", default="Root the Box", group="game", help="the name of the current game", type=basestring) define("restrict_registration", default=False, group="game", help="require registration tokens", type=bool) define("public_teams", default=True, group="game", help="allow anyone to create a new team", type=bool) define("max_team_size", default=4, group="game", help="max number of players on any one team", type=int) define("min_user_password_length", default=16, group="game", help="min user password length", type=int) define("max_password_length", default=7, group="game", help="max bank password length", type=int) define("use_bots", default=True, group="game", help="enable the use of botnets", type=bool) define("botnet_db", default="./files/botnet.db", group="game", help="botnet database path") define("bot_reward", default=50, group="game", help="the reward per-interval for a single bot", type=int) define("use_black_market", default=True, group="game", help="enable the use of the black market", type=bool) define("password_upgrade_cost", default=1000, group="game", help="price to upgrade a password hash algorithm") define("bribe_cost", default=2500, group="game", help="the base bribe cost to swat another player") define("whitelist_box_ips", default=False, group="game", help="whitelist box ip addresses (for botnets)", type=bool) define("dynamic_flag_value", default=True, group="game", help="decrease reward for flags based on captures", type=bool) define("flag_value_decrease", default=10, group="game", help="decrease flag reward by this percent per capture", type=int) define("default_theme", default="Cyborg", group="game", help="the default css theme") define("rank_by", default="flags", group="game", help="rank teams by (flags or money)") # I/O Loop Settings define("history_snapshot_interval", default=int(60000 * 5), group="game", help="interval to create history snapshots (milliseconds)", type=int) define("bot_reward_interval", default=int(60000 * 15), group="game", help="interval for rewarding botnets (milliseconds)", type=int) # Process modes define("setup", default="", help="setup a database (prod|devel)") define("xml", multiple=True, default=[], help="import xml file(s)") define("recovery", default=False, help="start the recovery console", type=bool) define("start", default=False, help="start the server", type=bool) define("restart", default=False, help="restart the server", type=bool) define("version", default=False, help="display version information and exit", type=bool) define("save", default=False, help="save the current configuration to file", type=bool) define("config", default="files/rootthebox.cfg", help="root the box configuration file") if __name__ == '__main__': # We need this to pull the --config option options.parse_command_line() check_cwd() if os.path.isfile(options.config): logging.debug("Parsing config file `%s`" % ( os.path.abspath(options.config), )) options.parse_config_file(options.config) # Make sure that cli args always have president over the file options.parse_command_line() if options.save: save_config() if options.setup.lower()[:3] in ['pro', 'dev']: setup() elif options.start: start() elif options.restart: restart() elif options.recovery: recovery() elif options.version: version()
#/usr/bin/python import urllib, urllib2 import csv import os import geojson from geojson import MultiPoint from datetime import datetime import string from PIL import Image from copy import deepcopy north = -1.3000 south = -1.3232 east = 36.8079 west = 36.7663 def readfile(filename): with open(filename, 'r') as f: read_data = f.read() f.closed return read_data def writefile(file_name, buf): myFile = open(file_name, 'w') myFile.write(buf) myFile.close() def url2file(url,file_name): req = urllib2.Request(url) try: rsp = urllib2.urlopen(req) except urllib2.HTTPError, err: print str(err.code) + " " + url return myFile = open(file_name, 'w') myFile.write(rsp.read()) myFile.close() def sync_osm(): kibera = "36.7651,-1.3211,36.8178,-1.3009" mathare = "36.8427,-1.2673,36.8792,-1.2479" url_base = "http://overpass-api.de/api/interpreter?data=[bbox];node['education:type'];out%20meta;&bbox=" url2file(url_base + kibera,"kibera-schools-osm.xml") url2file(url_base + mathare,"mathare-schools-osm.xml") def kenyaopendata(): #https://www.opendata.go.ke/Education/Kenya-Secondary-Schools-2007/i6vz-a543 url2file('https://www.opendata.go.ke/api/views/i6vz-a543/rows.csv?accessType=DOWNLOAD','kenya-secondary-schools.csv') #https://www.opendata.go.ke/Education/Kenya-Primary-Schools-2007/p452-xb7c url2file('https://www.opendata.go.ke/api/views/p452-xb7c/rows.csv?accessType=DOWNLOAD','kenya-primary-schools.csv') def filter_data(input_file,output_file,division_column,location_column,write_id,other_columns): f = open(input_file) #header = f.readline() #header_list = f.readline.replace("\n","").split(',') reader = csv.DictReader(f) column_list = ['official_name', 'lat', 'lon'] column_list.extend(other_columns) writer = csv.DictWriter(open(output_file,'w'),column_list) header = dict() for item in column_list: header[ item ] = item writer.writerow(header) for row in reader: [lat,lon] = row[location_column].replace('(','').replace(')','').split(',') # TODO check if we really need all of "KIBERA" if (row[division_column] == 'GOLF COURSE' or row[division_column] == "KENYATTA/GOLF COURSE" or row[division_column] == "KIBERA" or row[division_column] == "LAINI SABA" or row[division_column] == "MAKINA" or row[division_column] == "MUGUMOINI" or row[division_column] == "OLYMPIC" or row[division_column] == "SARANGOMBE" or row[division_column] == "SERA NGOMBE") or ((float(lat) <= north and float(lat) >= south) and (float(lon) <= east and float(lon) >= west)): out_row = dict() out_row['official_name'] = row[write_id] out_row['lat'] = lat out_row['lon'] = lon for h in other_columns: out_row[h] = row[h] writer.writerow(out_row) def filter_kenyaopendata(): other_columns = ['Level of Education', 'Status of School','Sponsor of School','School Institution Type_1','School Institution Type_2','School Institution Type_3','Pupil Teacher Ratio','Pupil Classroom Ratio','Pupil Toilet Ratio','Total Number of Classrooms','Boys Toilets','Girls Toilets','Teachers Toilets','Total Toilets','Total Boys','Total Girls','Total Enrolment','GOK TSC Male','GOK TSC Female','Local Authority Male','Local Authority Female','PTA BOG Male','PTA BOG Female','Others Male','Others Female','Non-Teaching Staff Male','Non-Teaching Staff Female','Province','District','Division','Location','Costituency'] filter_data('kenya-primary-schools.csv','kibera-primary-schools.csv','Location','Geolocation','Name of School', other_columns) other_columns = ['Code','School Address','Public or Private','School Sponsor','Girls/Boys/Mixed','Day or Boarding','Ordinary or Special','Total Enrolment 2007','Total Teaching staff','Pupil Teacher Ratio','Acreage per enrolment','TSC Male Teachers','TSC Female Teachers','Local Authority Male Teachers','Local Authority Female Teachers','PTA Board of Governors Male Teacher','PTA Board of Governors Female Teacher','Other Male Teachers','Other Female Teachers','Non Teaching Staff Male','Non Teaching Staff Female','Acreage','District','Location','Sublocation','School Zone','Costituency','Province'] filter_data('kenya-secondary-schools.csv','kibera-secondary-schools.csv','Location','Location 1','Name of School', other_columns) #Code?? def convert2geojson(): #KOD os.system("rm kibera-primary-schools.geojson") os.system("ogr2ogr -f GeoJSON kibera-primary-schools.geojson kibera-primary-schools.vrt") os.system("rm kibera-secondary-schools.geojson") os.system("ogr2ogr -f GeoJSON kibera-secondary-schools.geojson kibera-secondary-schools.vrt") kod_primary = geojson.loads(readfile('kibera-primary-schools.geojson')) kod_secondary = geojson.loads(readfile('kibera-secondary-schools.geojson')) kod_primary.features.extend(kod_secondary.features) dump = geojson.dumps(kod_primary, sort_keys=True, indent=2) writefile('kibera-primary-secondary-schools.geojson',dump) #OSM os.system("osmtogeojson -e kibera-schools-osm.xml > kibera-schools-osm.geojson") os.system("osmtogeojson -e mathare-schools-osm.xml > mathare-schools-osm.geojson") clean_osm('kibera-schools-osm.geojson') clean_osm('mathare-schools-osm.geojson') osm_kibera = geojson.loads(readfile('kibera-schools-osm.geojson')) osm_mathare = geojson.loads(readfile('mathare-schools-osm.geojson')) osm_kibera.features.extend(osm_mathare.features) dump = geojson.dumps(osm_kibera, sort_keys=True, indent=2) writefile('nairobi-schools-osm.geojson', dump) def clean_osm(file): osm = geojson.loads(readfile(file)) for feature in osm.features: properties = {} #properties = feature.properties for osm_property in feature.properties['tags'].keys(): properties[ "osm:" + osm_property ] = feature.properties['tags'][ osm_property ] properties[ "osm:_user" ] = feature.properties['meta']['user'] properties[ "osm:_timestamp" ] = datetime.strptime(feature.properties['meta']['timestamp'],'%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d') properties[ "osm:id" ] = feature['id'] #TODO change to "_id"? properties[ "osm:location" ] = os.path.splitext(os.path.basename(file))[0].split('-')[0] feature.properties = properties for prop in feature.properties.keys(): if prop.startswith('osm:polling_station:'): feature.properties.pop(prop, None) dump = geojson.dumps(osm, sort_keys=True, indent=2) writefile(file,dump) def compare_osm_kenyaopendata(): osm = geojson.loads(readfile('nairobi-schools-osm.geojson')) kod = geojson.loads(readfile('kibera-primary-secondary-schools.geojson')) result = {} result['type'] = 'FeatureCollection' result['features'] = [] kibera = deepcopy(result) mathare = deepcopy(result) #TODO make sure all features in KOD are in OSM (through osmly) for feature in osm.features: points = [(feature.geometry.coordinates[0], feature.geometry.coordinates[1])] if 'osm:official_name' in feature.properties: found_match = False for kod_feature in kod.features: if 'official_name' in kod_feature.properties and kod_feature.properties['official_name'] == feature.properties['osm:official_name']: #print feature.properties['official_name'] found_match = True points.append((kod_feature.geometry.coordinates[0], kod_feature.geometry.coordinates[1])) for kod_property in kod_feature.properties.keys(): if kod_property != 'lat' and kod_property != 'lon': feature.properties[ "kenyaopendata:" + kod_property] = kod_feature.properties[ kod_property ] if found_match == False: print "WARN: OSM official_name has no match: " + feature.properties['osm:name'] + ", " + feature.properties['osm:official_name'] + ", " + feature['id'] geom = MultiPoint(points) result['features'].append( { "type": "Feature", "properties": feature.properties, "geometry": geom }) if feature.properties['osm:location'] == 'kibera': kibera['features'].append( { "type": "Feature", "properties": feature.properties, "geometry": geom }) else: mathare['features'].append( { "type": "Feature", "properties": feature.properties, "geometry": geom }) dump = geojson.dumps(result, sort_keys=True, indent=2) writefile('nairobi-combined-schools.geojson',dump) dump = geojson.dumps(kibera, sort_keys=True, indent=2) writefile('kibera-schools.geojson',dump) dump = geojson.dumps(mathare, sort_keys=True, indent=2) writefile('mathare-schools.geojson',dump) def slug_image(img_url): valid_chars = "%s%s" % (string.ascii_letters, string.digits) slug = ''.join(c for c in img_url if c in valid_chars) return slug def cache_image(osm_id, osm_name, img_type, img_url): slug = slug_image(img_url) cache_dir = "../content/images/cache/" + osm_id + '/' + slug + '/' if not os.path.exists(cache_dir): os.makedirs(cache_dir) fileName, fileExtension = os.path.splitext(img_url) if not fileExtension: fileExtension = ".png" if not os.path.exists(cache_dir + 'orig' + fileExtension): url2file(img_url, cache_dir + 'orig' + fileExtension) if os.path.exists(cache_dir + 'orig' + fileExtension): try: im = Image.open(cache_dir + 'orig' + fileExtension) except IOError: print "IMAGE ERROR,can't open image," + osm_name + ",http://www.osm.org/" + osm_id + "," + img_type + "," + img_url #print "orig image error " + cache_dir + 'orig' + fileExtension return size = 300, 225 if not os.path.exists(cache_dir + 'med' + fileExtension): try: im.thumbnail(size) im.save(cache_dir + 'med' + fileExtension) except KeyError: print "IMAGE ERROR,unknown extension," + osm_name + ",http://www.osm.org/" + osm_id + "," + img_type + "," + img_url #print "unknown extension error " + cache_dir + 'med' + fileExtension return size = 1200, 900 if not os.path.exists(cache_dir + 'large' + fileExtension): try: im.thumbnail(size) im.save(cache_dir + 'large' + fileExtension) except KeyError: print "IMAGE ERROR,unknown extension," + osm_name + ",http://www.osm.org/" + osm_id + "," + img_type + "," + img_url #print "unknown extension error " + cache_dir + 'med' + fileExtension return else: print "IMAGE ERROR,orig missing," + osm_name + ",http://www.osm.org/" + osm_id + "," + img_type + "," + img_url #print "orig image missing " + cache_dir + 'orig' + fileExtension def get_image_cache(osm_id, img_type, img_url, cache_size): slug = slug_image(img_url) cache_path = "/data/images/cache/" + osm_id + '/' + slug + '/' fileName, fileExtension = os.path.splitext(img_url) return cache_path + cache_size + fileExtension def cache_images(): combined = geojson.loads(readfile('nairobi-combined-schools.geojson')) for index, feature in enumerate(combined.features): images = [] large_images = [] for prop in ["osm:image:classroom","osm:image:compound","osm:image:other", "osm:image:outside"]: if prop in feature['properties']: #cache_image(feature['properties']['osm:id'], feature['properties']['osm:name'], prop, feature['properties'][prop]) image = get_image_cache(feature['properties']['osm:id'], prop, feature['properties'][prop], 'med') images.append(image) image = get_image_cache(feature['properties']['osm:id'], prop, feature['properties'][prop], 'large') large_images.append(image) if len(images) > 0: combined.features[index]['properties']['osm:images'] = ','.join(images) combined.features[index]['properties']['osm:large_images'] = ','.join(large_images) dump = geojson.dumps(combined, sort_keys=True, indent=2) writefile('nairobi-combined-schools.geojson',dump) def deploy(): os.system("rm nairobi-combined-schools.csv") os.system("ogr2ogr -f CSV nairobi-combined-schools.csv nairobi-combined-schools.geojson -lco GEOMETRY=AS_WKT") os.system("cp nairobi-combined-schools.geojson ../content/schools/") os.system("cp nairobi-combined-schools.csv ../content/download/") os.system("ogr2ogr -f CSV kibera-schools.csv kibera-schools.geojson -lco GEOMETRY=AS_WKT") os.system("ogr2ogr -f CSV mathare-schools.csv mathare-schools.geojson -lco GEOMETRY=AS_WKT") os.system("cp kibera-schools.csv ../content/download/") os.system("cp mathare-schools.csv ../content/download/") #TODO make command line configurable .. Fabric? #kenyaopendata() filter_kenyaopendata() sync_osm() convert2geojson() compare_osm_kenyaopendata() cache_images() deploy() #TODO generate statistics on each run of comparison results #TODO generate list of ODK schools unmapped
from collections import Counter import json from django.db import models from django.db.models import DateTimeField, TextField, CharField, ForeignKey, IntegerField, BooleanField, F, \ ManyToManyField, OneToOneField, FloatField from django.utils import timezone from django.db import transaction import sqlparse # Seperated out so can use in tests w/o models def _time_taken(start_time, end_time): d = end_time - start_time return d.seconds * 1000 + d.microseconds / 1000 def time_taken(self): return _time_taken(self.start_time, self.end_time) class CaseInsensitiveDictionary(dict): def __getitem__(self, key): return super(CaseInsensitiveDictionary, self).__getitem__(key.lower()) def __setitem__(self, key, value): super(CaseInsensitiveDictionary, self).__setitem__(key.lower(), value) def update(self, other=None, **kwargs): for k, v in other.items(): self[k] = v for k, v in kwargs.items(): self[k] = v def __init__(self, d): super(CaseInsensitiveDictionary, self).__init__() for k, v in d.items(): self[k] = v class Request(models.Model): path = CharField(max_length=300, db_index=True) query_params = TextField(blank=True, default='') raw_body = TextField(blank=True, default='') body = TextField(blank=True, default='') method = CharField(max_length=10) start_time = DateTimeField(default=timezone.now) view_name = CharField(max_length=300, db_index=True, blank=True, default='') end_time = DateTimeField(null=True, blank=True) time_taken = FloatField(blank=True, null=True) encoded_headers = TextField(blank=True, default='') meta_time = FloatField(null=True, blank=True) meta_num_queries = IntegerField(null=True, blank=True) meta_time_spent_queries = FloatField(null=True, blank=True) @property def total_meta_time(self): return (self.meta_time or 0) + (self.meta_time_spent_queries or 0) # defined in atomic transaction within SQLQuery save()/delete() as well # as in bulk_create of SQLQueryManager # TODO: This is probably a bad way to do this, .count() will prob do? num_sql_queries = IntegerField(default=0) @property def time_spent_on_sql_queries(self): # TODO: Perhaps there is a nicer way to do this with Django aggregates? # My initial thought was to perform: # SQLQuery.objects.filter.aggregate(Sum(F('end_time')) - Sum(F('start_time'))) # However this feature isnt available yet, however there has been talk for use of F objects # within aggregates for four years here: https://code.djangoproject.com/ticket/14030 # It looks like this will go in soon at which point this should be changed. return sum(x.time_taken for x in SQLQuery.objects.filter(request=self)) @property def headers(self): if self.encoded_headers: raw = json.loads(self.encoded_headers) else: raw = {} return CaseInsensitiveDictionary(raw) @property def content_type(self): return self.headers.get('content-type', None) def save(self, *args, **kwargs): if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 super(Request, self).save(*args, **kwargs) class Response(models.Model): request = OneToOneField('Request', related_name='response') status_code = IntegerField() raw_body = TextField(blank=True, default='') body = TextField(blank=True, default='') encoded_headers = TextField(blank=True, default='') @property def content_type(self): return self.headers.get('content-type', None) @property def headers(self): if self.encoded_headers: raw = json.loads(self.encoded_headers) else: raw = {} return CaseInsensitiveDictionary(raw) class SQLQueryManager(models.Manager): def bulk_create(self, *args, **kwargs): """ensure that num_sql_queries remains consistent. Bulk create does not call the model save() method and hence we must add this logic here too""" if len(args): objs = args[0] else: objs = kwargs.get('objs') with transaction.commit_on_success(): request_counter = Counter([x.request_id for x in objs]) requests = Request.objects.filter(pk__in=request_counter.keys()) # TODO: Not that there is ever more than one request (but there could be eventually) # but perhaps there is a cleaner way of apply the increment from the counter without iterating # and saving individually? e.g. bulk update but with diff. increments. Couldn't come up with this # off hand. for r in requests: r.num_sql_queries = F('num_sql_queries') + request_counter[r.pk] r.save() save = super(SQLQueryManager, self).bulk_create(*args, **kwargs) return save class SQLQuery(models.Model): query = TextField() start_time = DateTimeField(null=True, blank=True, default=timezone.now) end_time = DateTimeField(null=True, blank=True) time_taken = FloatField(blank=True, null=True) request = ForeignKey('Request', related_name='queries', null=True, blank=True) traceback = TextField() objects = SQLQueryManager() @property def traceback_ln_only(self): return '\n'.join(self.traceback.split('\n')[::2]) @property def formatted_query(self): return sqlparse.format(self.query, reindent=True, keyword_case='upper') # TODO: Surely a better way to handle this? May return false positives @property def num_joins(self): return self.query.lower().count('join ') @property def tables_involved(self): """A rreally ather rudimentary way to work out tables involved in a query. TODO: Can probably parse the SQL using sqlparse etc and pull out table info that way?""" components = [x.strip() for x in self.query.split()] tables = [] for idx, c in enumerate(components): # TODO: If django uses aliases on column names they will be falsely identified as tables... if c.lower() == 'from' or c.lower() == 'join' or c.lower() == 'as': try: nxt = components[idx + 1] if not nxt.startswith('('): # Subquery stripped = nxt.strip().strip(',') if stripped: tables.append(stripped) except IndexError: # Reach the end pass return tables @transaction.commit_on_success() def save(self, *args, **kwargs): if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 if not self.pk: if self.request: self.request.num_sql_queries += 1 self.request.save() super(SQLQuery, self).save(*args, **kwargs) @transaction.commit_on_success() def delete(self, *args, **kwargs): self.request.num_sql_queries -= 1 self.request.save() super(SQLQuery, self).delete(*args, **kwargs) class BaseProfile(models.Model): name = CharField(max_length=300, blank=True, default='') start_time = DateTimeField(default=timezone.now) end_time = DateTimeField(null=True, blank=True) request = ForeignKey('Request', null=True, blank=True) time_taken = FloatField(blank=True, null=True) class Meta: abstract = True def save(self, *args, **kwargs): if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 super(BaseProfile, self).save(*args, **kwargs) class Profile(BaseProfile): file_path = CharField(max_length=300, blank=True, default='') line_num = IntegerField(null=True, blank=True) end_line_num = IntegerField(null=True, blank=True) func_name = CharField(max_length=300, blank=True, default='') exception_raised = BooleanField(default=False) queries = ManyToManyField('SQLQuery', related_name='profiles') dynamic = BooleanField(default=False) @property def is_function_profile(self): return self.func_name is not None @property def is_context_profile(self): return self.func_name is None @property def time_spent_on_sql_queries(self): time_spent = sum(x.time_taken for x in self.queries.all()) return time_spent
from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models import Index class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type codes to Django Field types. data_types_reverse = { 16: 'BooleanField', 17: 'BinaryField', 20: 'BigIntegerField', 21: 'SmallIntegerField', 23: 'IntegerField', 25: 'TextField', 700: 'FloatField', 701: 'FloatField', 869: 'GenericIPAddressField', 1042: 'CharField', # blank-padded 1043: 'CharField', 1082: 'DateField', 1083: 'TimeField', 1114: 'DateTimeField', 1184: 'DateTimeField', 1186: 'DurationField', 1266: 'TimeField', 1700: 'DecimalField', 2950: 'UUIDField', } ignored_tables = [] def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.default and 'nextval' in description.default: if field_type == 'IntegerField': return 'AutoField' elif field_type == 'BigIntegerField': return 'BigAutoField' elif field_type == 'SmallIntegerField': return 'SmallAutoField' return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute(""" SELECT c.relname, CASE WHEN {} THEN 'p' WHEN c.relkind IN ('m', 'v') THEN 'v' ELSE 't' END FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid) """.format('c.relispartition' if self.connection.features.supports_table_partitions else 'FALSE')) return [TableInfo(*row) for row in cursor.fetchall() if row[0] not in self.ignored_tables] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # Query the pg_catalog tables as cursor.description does not reliably # return the nullable property and information_schema.columns does not # contain details of materialized views. cursor.execute(""" SELECT a.attname AS column_name, NOT (a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)) AS is_nullable, pg_get_expr(ad.adbin, ad.adrelid) AS column_default FROM pg_attribute a LEFT JOIN pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum JOIN pg_type t ON a.atttypid = t.oid JOIN pg_class c ON a.attrelid = c.oid JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v') AND c.relname = %s AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid) """, [table_name]) field_map = {line[0]: line[1:] for line in cursor.fetchall()} cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return [ FieldInfo( line.name, line.type_code, line.display_size, line.internal_size, line.precision, line.scale, *field_map[line.name], ) for line in cursor.description ] def get_sequences(self, cursor, table_name, table_fields=()): cursor.execute(""" SELECT s.relname as sequence_name, col.attname FROM pg_class s JOIN pg_namespace sn ON sn.oid = s.relnamespace JOIN pg_depend d ON d.refobjid = s.oid AND d.refclassid = 'pg_class'::regclass JOIN pg_attrdef ad ON ad.oid = d.objid AND d.classid = 'pg_attrdef'::regclass JOIN pg_attribute col ON col.attrelid = ad.adrelid AND col.attnum = ad.adnum JOIN pg_class tbl ON tbl.oid = ad.adrelid WHERE s.relkind = 'S' AND d.deptype in ('a', 'n') AND pg_catalog.pg_table_is_visible(tbl.oid) AND tbl.relname = %s """, [table_name]) return [ {'name': row[0], 'table': table_name, 'column': row[1]} for row in cursor.fetchall() ] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ return {row[0]: (row[2], row[1]) for row in self.get_key_columns(cursor, table_name)} def get_key_columns(self, cursor, table_name): cursor.execute(""" SELECT a1.attname, c2.relname, a2.attname FROM pg_constraint con LEFT JOIN pg_class c1 ON con.conrelid = c1.oid LEFT JOIN pg_class c2 ON con.confrelid = c2.oid LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1] LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1] WHERE c1.relname = %s AND con.contype = 'f' AND c1.relnamespace = c2.relnamespace AND pg_catalog.pg_table_is_visible(c1.oid) """, [table_name]) return cursor.fetchall() def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. Also retrieve the definition of expression-based indexes. """ constraints = {} # Loop over the key table, collecting things as constraints. The column # array must return column names in the same order in which they were # created. cursor.execute(""" SELECT c.conname, array( SELECT attname FROM unnest(c.conkey) WITH ORDINALITY cols(colid, arridx) JOIN pg_attribute AS ca ON cols.colid = ca.attnum WHERE ca.attrelid = c.conrelid ORDER BY cols.arridx ), c.contype, (SELECT fkc.relname || '.' || fka.attname FROM pg_attribute AS fka JOIN pg_class AS fkc ON fka.attrelid = fkc.oid WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]), cl.reloptions FROM pg_constraint AS c JOIN pg_class AS cl ON c.conrelid = cl.oid WHERE cl.relname = %s AND pg_catalog.pg_table_is_visible(cl.oid) """, [table_name]) for constraint, columns, kind, used_cols, options in cursor.fetchall(): constraints[constraint] = { "columns": columns, "primary_key": kind == "p", "unique": kind in ["p", "u"], "foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None, "check": kind == "c", "index": False, "definition": None, "options": options, } # Now get indexes cursor.execute(""" SELECT indexname, array_agg(attname ORDER BY arridx), indisunique, indisprimary, array_agg(ordering ORDER BY arridx), amname, exprdef, s2.attoptions FROM ( SELECT c2.relname as indexname, idx.*, attr.attname, am.amname, CASE WHEN idx.indexprs IS NOT NULL THEN pg_get_indexdef(idx.indexrelid) END AS exprdef, CASE am.amname WHEN 'btree' THEN CASE (option & 1) WHEN 1 THEN 'DESC' ELSE 'ASC' END END as ordering, c2.reloptions as attoptions FROM ( SELECT * FROM pg_index i, unnest(i.indkey, i.indoption) WITH ORDINALITY koi(key, option, arridx) ) idx LEFT JOIN pg_class c ON idx.indrelid = c.oid LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid LEFT JOIN pg_am am ON c2.relam = am.oid LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key WHERE c.relname = %s AND pg_catalog.pg_table_is_visible(c.oid) ) s2 GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions; """, [table_name]) for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall(): if index not in constraints: basic_index = type_ == 'btree' and not index.endswith('_btree') and options is None constraints[index] = { "columns": columns if columns != [None] else [], "orders": orders if orders != [None] else [], "primary_key": primary, "unique": unique, "foreign_key": None, "check": False, "index": True, "type": Index.suffix if basic_index else type_, "definition": definition, "options": options, } return constraints
#!/usr/bin/env python ''' client.py: general client for metadata and images in datastore Copyright (c) 2017 Vanessa Sochat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' from .models import ( Collection, Entity, Object ) from .models import DataStoreManager from som.api.google.storage.client import StorageClientBase from google.cloud import datastore from som.logger import bot import six ###################################################################################### # Specs and base data structures for a general model ###################################################################################### def entity(uid,collection): '''entity returns an entity object parent is a collection ''' fields = [{'key':'uid','required':True,'value':uid}] if type(collection) not in six.string_types: collection = collection.get_name() model = {'fields':fields, 'key':['Collection',collection,'Entity', uid]} return model def collection(uid): '''entity returns an entity object parent is an owner ''' fields = [{'key':'uid','required':True,'value':uid}] model = {'fields':fields, 'key':['Collection', uid]} return model def storageObject(uid,entity,url,storage_type): '''image returns an image object. entity is the parent ''' fields = [{'key':'uid','required':True,'value': uid}, {'key':'url','required':True,'value': url}] collection = entity.collection.get_name() entity = entity.get_name() storage_type = storage_type.replace(' ','-').lower().capitalize() model = {'fields':fields, 'exclude_from_indexes': ['url'], 'key':['Collection', collection, 'Entity',entity, storage_type, uid]} return model ###################################################################################### # Client to interact with Models ###################################################################################### class DataStoreClient(StorageClientBase): ''' a DataStore Client is a wrapper for DataStore and google Storage, with a general stratgy to upload to storage, and save metadata in Datastore ''' def __init__(self, project, bucket_name, **kwargs): super(DataStoreClient, self).__init__(project, bucket_name, **kwargs) self.datastore = datastore.Client(self.project) self.name = "datastore" self.batch = DataStoreManager(client=self.datastore) ################################################################### ## Create ######################################################### ################################################################### def create_collection(self, uid, create=True,fields=None): return Collection(uid=uid, collection=collection, create=create, fields=fields) def create_entity(self,collection,uid,create=True,fields=None): return Entity(collection=collection, uid=uid, create=create, fields=fields) def create_object(self,uid,entity,url,object_type,create=True,fields=None): '''Object type should be one in Image or Text''' return Object(object_type=object_type, uid=uid, entity=entity, url=url, create=create, fields=fields) def create_object(self,uid,entity,url,object_type,create=True,fields=None): '''Object type should be one in Image or Text''' return Object(object_type=object_type, uid=uid, entity=entity, url=url, create=create, fields=fields) ################################################################### ## Get ############################################################ ################################################################### def get_storage_path(self,file_path,entity,return_folder=False): ''' get_storage_path will return the human readable path of a file associated with an entity ''' folder = '/'.join(entity.get_keypath()) bucket_path = "%s/%s" %(folder,os.path.basename(file_path)) if return_folder: return os.path.dirname(bucket_path) return bucket_path def get_collections(self,uids=None,limit=None,keys_only=False): return self.batch.get(kind="Collection", keys=uids, limit=limit, keys_only=keys_only) def get_entities(self,collection=None,field=None,uids=None,limit=None,keys_only=False): '''eg: pmc_articles = client.get_entities(uids=pmc_keys,field="pmcid") ''' ancestor = None if collection is not None: ancestor = self.batch.client.key("Collection", collection) return self.batch.get(kind="Entity", limit=limit, field=field, ancestor=ancestor, keys=uids, keys_only=keys_only) def get_images(self,entity,limit=None,keys_only=False): return self.batch.query(kind="Image", limit=limit, keys_only=keys_only, ancestor=entity.key) def get_text(self,entity,limit=None,keys_only=False): return self.batch.query(kind="Text", limit=limit, keys_only=keys_only, ancestor=entity.key) ################################################################### ## Upload ######################################################### ################################################################### def upload_object(self,file_path,entity, object_type=None, batch=True, permission=None, mimetype=None, fields=None): '''upload_object will add a general object to the batch manager The object is uploaded to Google Storage, returning storage fields. If the user has provided additional fields, these are added to the call to create a new object''' if object_type is None: bot.warning("You must specify object_type. Image or Text.") return None uid = self.get_storage_path(file_path,entity) bucket_folder = self.get_storage_path(file_path, entity, return_folder=True) # Returns none on httperror, if object already exists storage_obj = self.put_object(file_path=file_path, bucket_folder=bucket_folder, permission=permission, mimetype=mimetype) # We created it if storage_obj is None: return storage_obj storage_fields = get_storage_fields(storage_obj) if fields is not None: storage_fields.update(fields) fields = storage_fields url = "https://storage.googleapis.com/%s/%s" %(self.bucket['name'], storage_obj['name']) new_object = self.create_object(uid=uid, entity=entity, url=url, fields=fields, object_type=object_type, create=not batch) if batch: self.batch.add(new_object) return new_object def upload_text(self,text,entity,batch=True,fields=None,permission=None,mimetype=None): '''upload_text will add a text object to the batch manager''' new_object = self.upload_object(file_path=text, entity=entity, fields=fields, mimetype=mimetype, permission=permission, object_type="Text", batch=batch) bot.debug('TEXT: %s' %new_object) return new_object def upload_image(self,image,entity,batch=True,fields=None,permission=None,mimetype=None): '''upload_images will add an image object to the batch manager ''' new_object = self.upload_object(file_path=image, entity=entity, fields=fields, mimetype=mimetype, permission=permission, object_type="Image", batch=batch) bot.debug('IMAGE: %s' %new_object) return new_object def upload_dataset(self,uid,collection, images=None, texts=None, permission=None, texts_mimetype=None, images_mimetype=None, entity_metadata=None, images_metadata=None, texts_metadata=None, batch=True): '''upload takes a list of images, texts, and optional metadata and uploads to datastore (metadata) and storage (images) :param uid: should be the unique id for the entity, with one or more images/texts :param entity_metadata: a single dictionary of keys/values for the entity :param texts_metadata: a dictionary with keys corresponding to text paths of key value pairs for the text in question :param images_metadata: the same, but for images :param collection: should be the collection to add the entity to :param batch: add entities in batches (recommended, default True) ''' if permission is None: permission = "projectPrivate" if images_metadata is None: images_metadata = {} if texts_metadata is None: texts_metadata = {} # Add entity entity = self.create_entity(collection=collection,uid=uid) if entity_metadata is not None: entity.update(fields=entity_metadata) if texts is not None: for text in texts: fields = None # metadata provided for the text? if text in texts_metadata: fields = texts_metadata[text] self.upload_text(text=text, entity=entity, mimetype=texts_mimetype, fields=fields, batch=batch, permission=permission) if images is not None: for img in images: fields = None # metadata provided for the image? if img in images_metadata: fields = images_metadata[img] self.upload_image(image=img, entity=entity, fields=fields, mimetype=images_mimetype, batch=batch, permission=permission) # Run a transaction for put (insert) images and text, and clears queue if batch: self.batch.runInsert()
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # This is a dummy migration pass def backwards(self, orm): pass models = { 'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.auditlogentry': { 'Meta': {'object_name': 'AuditLogEntry'}, 'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}), 'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.authidentity': { 'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'}, 'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}), 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authprovider': { 'Meta': {'object_name': 'AuthProvider'}, 'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.broadcast': { 'Meta': {'object_name': 'Broadcast'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 4, 4, 0, 0)', 'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}) }, 'sentry.broadcastseen': { 'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'}, 'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}), 'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.counter': { 'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}), 'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"}, 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'}) }, 'sentry.eventmapping': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventtag': { 'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventuser': { 'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}) }, 'sentry.file': { 'Meta': {'object_name': 'File'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}), 'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}), 'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.fileblob': { 'Meta': {'object_name': 'FileBlob'}, 'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}) }, 'sentry.fileblobindex': { 'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.globaldsymfile': { 'Meta': {'object_name': 'GlobalDSymFile'}, 'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_name': ('django.db.models.fields.TextField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupassignee': { 'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupemailthread': { 'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'}, 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"}) }, 'sentry.grouphash': { 'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.groupredirect': { 'Meta': {'object_name': 'GroupRedirect'}, 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'}) }, 'sentry.groupresolution': { 'Meta': {'object_name': 'GroupResolution'}, 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouprulestatus': { 'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'sentry.groupseen': { 'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'}) }, 'sentry.groupsnooze': { 'Meta': {'object_name': 'GroupSnooze'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'until': ('django.db.models.fields.DateTimeField', [], {}) }, 'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouptagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.helppage': { 'Meta': {'object_name': 'HelpPage'}, 'content': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}), 'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': {'object_name': 'Organization'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationaccessrequest': { 'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationmember': { 'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'}, 'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}), 'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.organizationmemberteam': { 'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"}, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationonboardingtask': { 'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'}, 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.organizationoption': { 'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.project': { 'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.projectbookmark': { 'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.projectdsymfile': { 'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'}, 'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_name': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.release': { 'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'}, 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.releasefile': { 'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'}, 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'name': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.rule': { 'Meta': {'object_name': 'Rule'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.savedsearch': { 'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'query': ('django.db.models.fields.TextField', [], {}) }, 'sentry.savedsearchuserdefault': { 'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.tagkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.team': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.userreport': { 'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"}, 'comments': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) } } complete_apps = ['sentry']
""" Customisable progressbar decorator for iterators. Includes a default (x)range iterator printing to stderr. Usage: >>> from tqdm import trange[, tqdm] >>> for i in trange(10): #same as: for i in tqdm(xrange(10)) ... ... """ # future division is important to divide integers and get as # a result precise floating numbers (instead of truncated int) from __future__ import division, absolute_import # import compatibility functions and utilities from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \ _term_move_up, _unicode, WeakSet import sys from time import time __author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim", "casperdcl", "lrq3000"]} __all__ = ['tqdm', 'trange'] class tqdm(object): """ Decorate an iterable object, returning an iterator which acts exactly like the original iterable, but prints a dynamically updating progressbar every time a value is requested. """ @staticmethod def format_sizeof(num, suffix=''): """ Formats a number (greater than unity) with SI Order of Magnitude prefixes. Parameters ---------- num : float Number ( >= 1) to format. suffix : str, optional Post-postfix [default: '']. Returns ------- out : str Number with Order of Magnitude SI unit postfix. """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 999.95: if abs(num) < 99.95: if abs(num) < 9.995: return '{0:1.2f}'.format(num) + unit + suffix return '{0:2.1f}'.format(num) + unit + suffix return '{0:3.0f}'.format(num) + unit + suffix num /= 1000.0 return '{0:3.1f}Y'.format(num) + suffix @staticmethod def format_interval(t): """ Formats a number of seconds as a clock time, [H:]MM:SS Parameters ---------- t : int Number of seconds. Returns ------- out : str [H:]MM:SS """ mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) if h: return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s) else: return '{0:02d}:{1:02d}'.format(m, s) @staticmethod def status_printer(file): """ Manage the printing and in-place updating of a line of characters. Note that if the string is longer than a line, then in-place updating may not work (it will print a new line at each refresh). """ fp = file fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover def fp_write(s): fp.write(_unicode(s)) fp_flush() last_len = [0] def print_status(s): len_s = len(s) fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0))) last_len[0] = len_s return print_status @staticmethod def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it', unit_scale=False, rate=None, bar_format=None): """ Return a string-based progress bar given some parameters Parameters ---------- n : int Number of finished iterations. total : int The expected total number of iterations. If meaningless (), only basic progress statistics are displayed (no ETA). elapsed : float Number of seconds passed since start. ncols : int, optional The width of the entire output message. If specified, dynamically resizes the progress meter to stay within this bound [default: None]. The fallback meter width is 10 for the progress bar + no limit for the iterations counter and statistics. If 0, will not print any meter (only stats). prefix : str, optional Prefix message (included in total width) [default: '']. ascii : bool, optional If not set, use unicode (smooth blocks) to fill the meter [default: False]. The fallback is to use ASCII characters (1-9 #). unit : str, optional The iteration unit [default: 'it']. unit_scale : bool, optional If set, the number of iterations will printed with an appropriate SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False]. rate : float, optional Manual override for iteration rate. If [default: None], uses n/elapsed. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar is '{desc}{percentage:3.0f}%|' and r_bar is '| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]' Possible vars: bar, n, n_fmt, total, total_fmt, percentage, rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc. Returns ------- out : Formatted meter and stats, ready to display. """ # sanity check: total if total and n > total: total = None format_interval = tqdm.format_interval elapsed_str = format_interval(elapsed) # if unspecified, attempt to use rate = average speed # (we allow manual override since predicting time is an arcane art) if rate is None and elapsed: rate = n / elapsed inv_rate = 1 / rate if (rate and (rate < 1)) else None format_sizeof = tqdm.format_sizeof rate_fmt = ((format_sizeof(inv_rate if inv_rate else rate) if unit_scale else '{0:5.2f}'.format(inv_rate if inv_rate else rate)) if rate else '?') \ + ('s' if inv_rate else unit) + '/' + (unit if inv_rate else 's') if unit_scale: n_fmt = format_sizeof(n) total_fmt = format_sizeof(total) if total else None else: n_fmt = str(n) total_fmt = str(total) # total is known: we can predict some stats if total: # fractional and percentage progress frac = n / total percentage = frac * 100 remaining_str = format_interval((total - n) / rate) \ if rate else '?' # format the stats displayed to the left and right sides of the bar l_bar = (prefix if prefix else '') + \ '{0:3.0f}%|'.format(percentage) r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format( n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt) if ncols == 0: return l_bar[:-1] + r_bar[1:] if bar_format: # Custom bar formatting # Populate a dict with all available progress indicators bar_args = {'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt, 'percentage': percentage, 'rate': rate if inv_rate is None else inv_rate, 'rate_noinv': rate, 'rate_noinv_fmt': ((format_sizeof(rate) if unit_scale else '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s', 'rate_fmt': rate_fmt, 'elapsed': elapsed_str, 'remaining': remaining_str, 'l_bar': l_bar, 'r_bar': r_bar, 'desc': prefix if prefix else '', # 'bar': full_bar # replaced by procedure below } # Interpolate supplied bar format with the dict if '{bar}' in bar_format: # Format left/right sides of the bar, and format the bar # later in the remaining space (avoid breaking display) l_bar_user, r_bar_user = bar_format.split('{bar}') l_bar = l_bar_user.format(**bar_args) r_bar = r_bar_user.format(**bar_args) else: # Else no progress bar, we can just format and return return bar_format.format(**bar_args) # Formatting progress bar # space available for bar's display N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \ else 10 # format bar depending on availability of unicode/ascii chars if ascii: bar_length, frac_bar_length = divmod( int(frac * N_BARS * 10), 10) bar = '#' * bar_length frac_bar = chr(48 + frac_bar_length) if frac_bar_length \ else ' ' else: bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8) bar = _unich(0x2588) * bar_length frac_bar = _unich(0x2590 - frac_bar_length) \ if frac_bar_length else ' ' # whitespace padding if bar_length < N_BARS: full_bar = bar + frac_bar + \ ' ' * max(N_BARS - bar_length - 1, 0) else: full_bar = bar + \ ' ' * max(N_BARS - bar_length, 0) # Piece together the bar parts return l_bar + full_bar + r_bar # no total: no progressbar, ETA, just progress stats else: return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format( n_fmt, unit, elapsed_str, rate_fmt) def __new__(cls, *args, **kwargs): # Create a new instance instance = object.__new__(cls) # Add to the list of instances if "_instances" not in cls.__dict__: cls._instances = WeakSet() cls._instances.add(instance) # Return the instance return instance @classmethod def _get_free_pos(cls, instance=None): """ Skips specified instance """ try: return max(inst.pos for inst in cls._instances if inst is not instance) + 1 except ValueError as e: if "arg is an empty sequence" in str(e): return 0 raise # pragma: no cover @classmethod def _decr_instances(cls, instance): """ Remove from list and reposition other bars so that newer bars won't overlap previous bars """ try: # in case instance was explicitly positioned, it won't be in set cls._instances.remove(instance) for inst in cls._instances: if inst.pos > instance.pos: inst.pos -= 1 except KeyError: pass @classmethod def write(cls, s, file=sys.stdout, end="\n"): """ Print a message via tqdm (without overlap with bars) """ fp = file # Clear all bars inst_cleared = [] for inst in cls._instances: # Clear instance if in the target output file # or if write output + tqdm output are both either # sys.stdout or sys.stderr (because both are mixed in terminal) if inst.fp == fp or all(f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)): inst.clear() inst_cleared.append(inst) # Write the message fp.write(s) fp.write(end) # Force refresh display of bars we cleared for inst in inst_cleared: inst.refresh() # TODO: make list of all instances incl. absolutely positioned ones? def __init__(self, iterable=None, desc=None, total=None, leave=True, file=sys.stderr, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, gui=False, **kwargs): """ Parameters ---------- iterable : iterable, optional Iterable to decorate with a progressbar. Leave blank to manually manage the updates. desc : str, optional Prefix for the progressbar. total : int, optional The number of expected iterations. If unspecified, len(iterable) is used if possible. As a last resort, only basic progress statistics are displayed (no ETA, no progressbar). If `gui` is True and this parameter needs subsequent updating, specify an initial arbitrary large positive integer, e.g. int(9e9). leave : bool, optional If [default: True], keeps all traces of the progressbar upon termination of iteration. file : `io.TextIOWrapper` or `io.StringIO`, optional Specifies where to output the progress messages [default: sys.stderr]. Uses `file.write(str)` and `file.flush()` methods. ncols : int, optional The width of the entire output message. If specified, dynamically resizes the progressbar to stay within this bound. If unspecified, attempts to use environment width. The fallback is a meter width of 10 and no limit for the counter and statistics. If 0, will not print any meter (only stats). mininterval : float, optional Minimum progress update interval, in seconds [default: 0.1]. maxinterval : float, optional Maximum progress update interval, in seconds [default: 10.0]. miniters : int, optional Minimum progress update interval, in iterations. If specified, will set `mininterval` to 0. ascii : bool, optional If unspecified or False, use unicode (smooth blocks) to fill the meter. The fallback is to use ASCII characters `1-9 #`. disable : bool, optional Whether to disable the entire progressbar wrapper [default: False]. unit : str, optional String that will be used to define the unit of each iteration [default: it]. unit_scale : bool, optional If set, the number of iterations will be reduced/scaled automatically and a metric prefix following the International System of Units standard will be added (kilo, mega, etc.) [default: False]. dynamic_ncols : bool, optional If set, constantly alters `ncols` to the environment (allowing for window resizes) [default: False]. smoothing : float, optional Exponential moving average smoothing factor for speed estimates (ignored in GUI mode). Ranges from 0 (average speed) to 1 (current/instantaneous speed) [default: 0.3]. bar_format : str, optional Specify a custom bar string formatting. May impact performance. If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is '{desc}{percentage:3.0f}%|' and r_bar is '| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]' Possible vars: bar, n, n_fmt, total, total_fmt, percentage, rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc. initial : int, optional The initial counter value. Useful when restarting a progress bar [default: 0]. position : int, optional Specify the line offset to print this bar (starting from 0) Automatic if unspecified. Useful to manage multiple bars at once (eg, from threads). gui : bool, optional WARNING: internal parameter - do not use. Use tqdm_gui(...) instead. If set, will attempt to use matplotlib animations for a graphical output [default: False]. Returns ------- out : decorated iterator. """ if disable: self.iterable = iterable self.disable = disable self.pos = self._get_free_pos(self) self._instances.remove(self) return if kwargs: self.disable = True self.pos = self._get_free_pos(self) self._instances.remove(self) raise (DeprecationWarning("nested is deprecated and" " automated.\nUse position instead" " for manual control") if "nested" in kwargs else Warning("Unknown argument(s): " + str(kwargs))) # Preprocess the arguments if total is None and iterable is not None: try: total = len(iterable) except (TypeError, AttributeError): total = None if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \ dynamic_ncols: # pragma: no cover if dynamic_ncols: dynamic_ncols = _environ_cols_wrapper() ncols = dynamic_ncols(file) else: ncols = _environ_cols_wrapper()(file) if miniters is None: miniters = 0 dynamic_miniters = True else: dynamic_miniters = False if mininterval is None: mininterval = 0 if maxinterval is None: maxinterval = 0 if ascii is None: ascii = not _supports_unicode(file) if bar_format and not ascii: # Convert bar format into unicode since terminal uses unicode bar_format = _unicode(bar_format) if smoothing is None: smoothing = 0 # Store the arguments self.iterable = iterable self.desc = desc + ': ' if desc else '' self.total = total self.leave = leave self.fp = file self.ncols = ncols self.mininterval = mininterval self.maxinterval = maxinterval self.miniters = miniters self.dynamic_miniters = dynamic_miniters self.ascii = ascii self.disable = disable self.unit = unit self.unit_scale = unit_scale self.gui = gui self.dynamic_ncols = dynamic_ncols self.smoothing = smoothing self.avg_time = None self._time = time self.bar_format = bar_format # Init the iterations counters self.last_print_n = initial self.n = initial # if nested, at initial sp() call we replace '\r' by '\n' to # not overwrite the outer progress bar self.pos = self._get_free_pos(self) if position is None else position if not gui: # Initialize the screen printer self.sp = self.status_printer(self.fp) if self.pos: self.moveto(self.pos) self.sp(self.format_meter(self.n, total, 0, (dynamic_ncols(file) if dynamic_ncols else ncols), self.desc, ascii, unit, unit_scale, None, bar_format)) if self.pos: self.moveto(-self.pos) # Init the time counter self.start_t = self.last_print_t = self._time() def __len__(self): return len(self.iterable) if self.iterable else self.total def __enter__(self): return self def __exit__(self, *exc): self.close() return False def __del__(self): self.close() def __repr__(self): return self.format_meter(self.n, self.total, time() - self.last_print_t, self.ncols, self.desc, self.ascii, self.unit, self.unit_scale, 1 / self.avg_time if self.avg_time else None, self.bar_format) def __lt__(self, other): # try: return self.pos < other.pos # except AttributeError: # return self.start_t < other.start_t def __le__(self, other): return (self < other) or (self == other) def __eq__(self, other): # try: return self.pos == other.pos # except AttributeError: # return self.start_t == other.start_t def __ne__(self, other): return not (self == other) def __gt__(self, other): return not (self <= other) def __ge__(self, other): return not (self < other) def __hash__(self): return id(self) def __iter__(self): ''' Backward-compatibility to use: for x in tqdm(iterable) ''' # Inlining instance variables as locals (speed optimisation) iterable = self.iterable # If the bar is disabled, then just walk the iterable # (note: keep this check outside the loop for performance) if self.disable: for obj in iterable: yield obj else: ncols = self.ncols mininterval = self.mininterval maxinterval = self.maxinterval miniters = self.miniters dynamic_miniters = self.dynamic_miniters unit = self.unit unit_scale = self.unit_scale ascii = self.ascii start_t = self.start_t last_print_t = self.last_print_t last_print_n = self.last_print_n n = self.n dynamic_ncols = self.dynamic_ncols smoothing = self.smoothing avg_time = self.avg_time bar_format = self.bar_format _time = self._time format_meter = self.format_meter try: sp = self.sp except AttributeError: raise DeprecationWarning('Please use tqdm_gui(...)' ' instead of tqdm(..., gui=True)') for obj in iterable: yield obj # Update and print the progressbar. # Note: does not call self.update(1) for speed optimisation. n += 1 # check the counter first (avoid calls to time()) if n - last_print_n >= miniters: delta_it = n - last_print_n cur_t = _time() delta_t = cur_t - last_print_t if delta_t >= mininterval: elapsed = cur_t - start_t # EMA (not just overall average) if smoothing and delta_t: avg_time = delta_t / delta_it \ if avg_time is None \ else smoothing * delta_t / delta_it + \ (1 - smoothing) * avg_time if self.pos: self.moveto(self.pos) # Printing the bar's update sp(format_meter( n, self.total, elapsed, (dynamic_ncols(self.fp) if dynamic_ncols else ncols), self.desc, ascii, unit, unit_scale, 1 / avg_time if avg_time else None, bar_format)) if self.pos: self.moveto(-self.pos) # If no `miniters` was specified, adjust automatically # to the maximum iteration rate seen so far. if dynamic_miniters: if maxinterval and delta_t > maxinterval: # Set miniters to correspond to maxinterval miniters = delta_it * maxinterval / delta_t elif mininterval and delta_t: # EMA-weight miniters to converge # towards the timeframe of mininterval miniters = smoothing * delta_it * mininterval \ / delta_t + (1 - smoothing) * miniters else: miniters = smoothing * delta_it + \ (1 - smoothing) * miniters # Store old values for next call self.n = self.last_print_n = last_print_n = n self.last_print_t = last_print_t = cur_t # Closing the progress bar. # Update some internal variables for close(). self.last_print_n = last_print_n self.n = n self.close() def update(self, n=1): """ Manually update the progress bar, useful for streams such as reading files. E.g.: >>> t = tqdm(total=filesize) # Initialise >>> for current_buffer in stream: ... ... ... t.update(len(current_buffer)) >>> t.close() The last line is highly recommended, but possibly not necessary if `t.update()` will be called in such a way that `filesize` will be exactly reached and printed. Parameters ---------- n : int Increment to add to the internal counter of iterations [default: 1]. """ if self.disable: return if n < 0: raise ValueError("n ({0}) cannot be negative".format(n)) self.n += n delta_it = self.n - self.last_print_n # should be n? if delta_it >= self.miniters: # We check the counter first, to reduce the overhead of time() cur_t = self._time() delta_t = cur_t - self.last_print_t if delta_t >= self.mininterval: elapsed = cur_t - self.start_t # EMA (not just overall average) if self.smoothing and delta_t: self.avg_time = delta_t / delta_it \ if self.avg_time is None \ else self.smoothing * delta_t / delta_it + \ (1 - self.smoothing) * self.avg_time if not hasattr(self, "sp"): raise DeprecationWarning('Please use tqdm_gui(...)' ' instead of tqdm(..., gui=True)') if self.pos: self.moveto(self.pos) # Print bar's update self.sp(self.format_meter( self.n, self.total, elapsed, (self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols), self.desc, self.ascii, self.unit, self.unit_scale, 1 / self.avg_time if self.avg_time else None, self.bar_format)) if self.pos: self.moveto(-self.pos) # If no `miniters` was specified, adjust automatically to the # maximum iteration rate seen so far. # e.g.: After running `tqdm.update(5)`, subsequent # calls to `tqdm.update()` will only cause an update after # at least 5 more iterations. if self.dynamic_miniters: if self.maxinterval and delta_t > self.maxinterval: self.miniters = self.miniters * self.maxinterval \ / delta_t elif self.mininterval and delta_t: self.miniters = self.smoothing * delta_it \ * self.mininterval / delta_t + \ (1 - self.smoothing) * self.miniters else: self.miniters = self.smoothing * delta_it + \ (1 - self.smoothing) * self.miniters # Store old values for next call self.last_print_n = self.n self.last_print_t = cur_t def close(self): """ Cleanup and (if leave=False) close the progressbar. """ if self.disable: return # Prevent multiple closures self.disable = True # decrement instance pos and remove from internal set pos = self.pos self._decr_instances(self) # GUI mode if not hasattr(self, "sp"): return # annoyingly, _supports_unicode isn't good enough def fp_write(s): self.fp.write(_unicode(s)) try: fp_write('') except ValueError as e: if 'closed' in str(e): return raise # pragma: no cover if pos: self.moveto(pos) if self.leave: if self.last_print_n < self.n: cur_t = self._time() # stats for overall rate (no weighted average) self.sp(self.format_meter( self.n, self.total, cur_t - self.start_t, (self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols), self.desc, self.ascii, self.unit, self.unit_scale, None, self.bar_format)) if pos: self.moveto(-pos) else: fp_write('\n') else: self.sp('') # clear up last bar if pos: self.moveto(-pos) else: fp_write('\r') def unpause(self): """ Restart tqdm timer from last print time. """ cur_t = self._time() self.start_t += cur_t - self.last_print_t self.last_print_t = cur_t def set_description(self, desc=None): """ Set/modify description of the progress bar. """ self.desc = desc + ': ' if desc else '' def moveto(self, n): self.fp.write(_unicode('\n' * n + _term_move_up() * -n)) def clear(self, nomove=False): """ Clear current bar display """ if not nomove: self.moveto(self.pos) # clear up the bar (can't rely on sp('')) self.fp.write('\r') self.fp.write(' ' * (self.ncols if self.ncols else 10)) self.fp.write('\r') # place cursor back at the beginning of line if not nomove: self.moveto(-self.pos) def refresh(self): """ Force refresh the display of this bar """ self.moveto(self.pos) # clear up this line's content (whatever there was) self.clear(nomove=True) # Print current/last bar state self.fp.write(self.__repr__()) self.moveto(-self.pos) def trange(*args, **kwargs): """ A shortcut for tqdm(xrange(*args), **kwargs). On Python3+ range is used instead of xrange. """ return tqdm(_range(*args), **kwargs)
# Copyright (c) 2012 Cloudera, Inc. All rights reserved. # # This test suite validates the scanners by running queries against ALL file formats and # their permutations (e.g. compression codec/compression type). This works by exhaustively # generating the table format test vectors for this specific test suite. This way, other # tests can run with the normal exploration strategy and the overall test runtime doesn't # explode. import logging import pytest from copy import deepcopy from subprocess import call, check_call from testdata.common import widetable from tests.common.test_vector import * from tests.common.impala_test_suite import * from tests.util.test_file_parser import * from tests.util.filesystem_utils import WAREHOUSE from tests.common.test_dimensions import create_single_exec_option_dimension from tests.common.skip import SkipIfS3, SkipIfIsilon class TestScannersAllTableFormats(ImpalaTestSuite): BATCH_SIZES = [0, 1, 16] @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestScannersAllTableFormats, cls).add_test_dimensions() if cls.exploration_strategy() == 'core': # The purpose of this test is to get some base coverage of all the file formats. # Even in 'core', we'll test each format by using the pairwise strategy. cls.TestMatrix.add_dimension(cls.create_table_info_dimension('pairwise')) cls.TestMatrix.add_dimension( TestDimension('batch_size', *TestScannersAllTableFormats.BATCH_SIZES)) def test_scanners(self, vector): new_vector = deepcopy(vector) new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size') self.run_test_case('QueryTest/scanners', new_vector) # Test all the scanners with a simple limit clause. The limit clause triggers # cancellation in the scanner code paths. class TestScannersAllTableFormatsWithLimit(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestScannersAllTableFormatsWithLimit, cls).add_test_dimensions() def test_limit(self, vector): # Use a small batch size so changing the limit affects the timing of cancellation vector.get_value('exec_option')['batch_size'] = 100 iterations = 50 query_template = "select * from alltypes limit %s" for i in range(1, iterations): # Vary the limit to vary the timing of cancellation query = query_template % ((iterations * 100) % 1000 + 1) self.execute_query(query, vector.get_value('exec_option'), table_format=vector.get_value('table_format')) # Test case to verify the scanners work properly when the table metadata (specifically the # number of columns in the table) does not match the number of columns in the data file. class TestUnmatchedSchema(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestUnmatchedSchema, cls).add_test_dimensions() cls.TestMatrix.add_dimension(create_single_exec_option_dimension()) # Avro has a more advanced schema evolution process which is covered in more depth # in the test_avro_schema_evolution test suite. cls.TestMatrix.add_constraint(\ lambda v: v.get_value('table_format').file_format != 'avro') def _get_table_location(self, table_name, vector): result = self.execute_query_using_client(self.client, "describe formatted %s" % table_name, vector) for row in result.data: if 'Location:' in row: return row.split('\t')[1] # This should never happen. assert 0, 'Unable to get location for table: ' + table_name def _create_test_table(self, vector): """ Creates the test table Cannot be done in a setup method because we need access to the current test vector """ self._drop_test_table(vector) self.execute_query_using_client(self.client, "create external table jointbl_test like jointbl", vector) # Update the location of the new table to point the same location as the old table location = self._get_table_location('jointbl', vector) self.execute_query_using_client(self.client, "alter table jointbl_test set location '%s'" % location, vector) def _drop_test_table(self, vector): self.execute_query_using_client(self.client, "drop table if exists jointbl_test", vector) def test_unmatched_schema(self, vector): table_format = vector.get_value('table_format') # jointbl has no columns with unique values. When loaded in hbase, the table looks # different, as hbase collapses duplicates. if table_format.file_format == 'hbase': pytest.skip() self._create_test_table(vector) self.run_test_case('QueryTest/test-unmatched-schema', vector) self._drop_test_table(vector) # Tests that scanners can read a single-column, single-row, 10MB table class TestWideRow(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestWideRow, cls).add_test_dimensions() # I can't figure out how to load a huge row into hbase cls.TestMatrix.add_constraint( lambda v: v.get_value('table_format').file_format != 'hbase') def test_wide_row(self, vector): new_vector = deepcopy(vector) # Use a 5MB scan range, so we will have to perform 5MB of sync reads new_vector.get_value('exec_option')['max_scan_range_length'] = 5 * 1024 * 1024 # We need > 10 MB of memory because we're creating extra buffers: # - 10 MB table / 5 MB scan range = 2 scan ranges, each of which may allocate ~20MB # - Sync reads will allocate ~5MB of space # The 80MB value used here was determined empirically by raising the limit until the # query succeeded for all file formats -- I don't know exactly why we need this much. # TODO: figure out exact breakdown of memory usage (IMPALA-681) new_vector.get_value('exec_option')['mem_limit'] = 80 * 1024 * 1024 self.run_test_case('QueryTest/wide-row', new_vector) class TestWideTable(ImpalaTestSuite): # TODO: expand this to more rows when we have the capability NUM_COLS = [250, 500, 1000] @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestWideTable, cls).add_test_dimensions() cls.TestMatrix.add_dimension(TestDimension("num_cols", *cls.NUM_COLS)) # To cut down on test execution time, only run in exhaustive. if cls.exploration_strategy() != 'exhaustive': cls.TestMatrix.add_constraint(lambda v: False) def test_wide_table(self, vector): NUM_COLS = vector.get_value('num_cols') # Due to the way HBase handles duplicate row keys, we have different number of # rows in HBase tables compared to HDFS tables. NUM_ROWS = 10 if vector.get_value('table_format').file_format != 'hbase' else 2 DB_NAME = QueryTestSectionReader.get_db_name(vector.get_value('table_format')) TABLE_NAME = "%s.widetable_%s_cols" % (DB_NAME, NUM_COLS) result = self.client.execute("select count(*) from %s " % TABLE_NAME) assert result.data == [str(NUM_ROWS)] expected_result = widetable.get_data(NUM_COLS, NUM_ROWS, quote_strings=True) result = self.client.execute("select * from %s" % TABLE_NAME) if vector.get_value('table_format').file_format == 'hbase': assert len(result.data) == NUM_ROWS return types = parse_column_types(result.schema) labels = parse_column_labels(result.schema) expected = QueryTestResult(expected_result, types, labels, order_matters=False) actual = QueryTestResult(parse_result_rows(result), types, labels, order_matters=False) assert expected == actual class TestParquet(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestParquet, cls).add_test_dimensions() cls.TestMatrix.add_constraint( lambda v: v.get_value('table_format').file_format == 'parquet') def test_parquet(self, vector): self.run_test_case('QueryTest/parquet', vector) @SkipIfS3.hdfs_block_size @SkipIfIsilon.hdfs_block_size def test_verify_runtime_profile(self, vector): # For IMPALA-1881. The table functional_parquet.lineitem_multiblock has 3 blocks, so # we verify if each impalad reads one block by checking if each impalad reads at # least one row group. DB_NAME = 'functional_parquet' TABLE_NAME = 'lineitem_multiblock' query = 'select count(l_orderkey) from %s.%s' % (DB_NAME, TABLE_NAME) result = self.client.execute(query) assert len(result.data) == 1 assert result.data[0] == '20000' runtime_profile = str(result.runtime_profile) num_row_groups_list = re.findall('NumRowGroups: ([0-9]*)', runtime_profile) scan_ranges_complete_list = re.findall('ScanRangesComplete: ([0-9]*)', runtime_profile) # This will fail if the number of impalads != 3 # The fourth fragment is the "Averaged Fragment" assert len(num_row_groups_list) == 4 assert len(scan_ranges_complete_list) == 4 # Skip the Averaged Fragment; it comes first in the runtime profile. for num_row_groups in num_row_groups_list[1:]: assert int(num_row_groups) > 0 for scan_ranges_complete in scan_ranges_complete_list[1:]: assert int(scan_ranges_complete) == 1 # Missing coverage: Impala can query a table with complex types created by Hive on a # non-hdfs filesystem. @SkipIfS3.hive @SkipIfIsilon.hive class TestParquetComplexTypes(ImpalaTestSuite): COMPLEX_COLUMN_TABLE = "functional_parquet.nested_column_types" @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestParquetComplexTypes, cls).add_test_dimensions() cls.TestMatrix.add_dimension(create_single_exec_option_dimension()) # Only run on delimited text with no compression. cls.TestMatrix.add_dimension(create_parquet_dimension(cls.get_workload())) # This tests we can read the scalar-typed columns from a Parquet table that also has # complex-typed columns. # TODO: remove this when we can read complex-typed columns (complex types testing should # supercede this) def test_complex_column_types(self, vector): self._drop_complex_column_table() # Partitioned case create_table_stmt = """ CREATE TABLE IF NOT EXISTS {0} ( a int, b ARRAY<STRUCT<c:INT, d:STRING>>, e MAP<STRING,INT>, f string, g ARRAY<INT>, h STRUCT<i:DOUBLE> ) PARTITIONED BY (p1 INT, p2 STRING) STORED AS PARQUET; """.format(self.COMPLEX_COLUMN_TABLE) insert_stmt = """ INSERT OVERWRITE TABLE {0} PARTITION (p1=1, p2="partition1") SELECT 1, array(named_struct("c", 2, "d", "foo")), map("key1", 10, "key2", 20), "bar", array(2,3,4,5), named_struct("i", 1.23) FROM functional_parquet.tinytable limit 2; """.format(self.COMPLEX_COLUMN_TABLE) check_call(["hive", "-e", create_table_stmt]) check_call(["hive", "-e", insert_stmt]) self.execute_query("invalidate metadata %s" % self.COMPLEX_COLUMN_TABLE) result = self.execute_query("select count(*) from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 1) assert(result.data[0] == "2") result = self.execute_query("select a from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "1") result = self.execute_query( "select p1, a from %s where p1 = 1" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "1\t1") result = self.execute_query("select f from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "bar") result = self.execute_query( "select p2, f from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "partition1\tbar") # Unpartitioned case self._drop_complex_column_table() create_table_stmt = """ CREATE TABLE IF NOT EXISTS {0} ( a int, b ARRAY<STRUCT<c:INT, d:STRING>>, e MAP<STRING,INT>, f string, g ARRAY<INT>, h STRUCT<i:DOUBLE> ) STORED AS PARQUET; """.format(self.COMPLEX_COLUMN_TABLE) insert_stmt = """ INSERT OVERWRITE TABLE {0} SELECT 1, array(named_struct("c", 2, "d", "foo")), map("key1", 10, "key2", 20), "bar", array(2,3,4,5), named_struct("i", 1.23) FROM functional_parquet.tinytable limit 2; """.format(self.COMPLEX_COLUMN_TABLE) check_call(["hive", "-e", create_table_stmt]) check_call(["hive", "-e", insert_stmt]) self.execute_query("invalidate metadata %s" % self.COMPLEX_COLUMN_TABLE) result = self.execute_query("select count(*) from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 1) assert(result.data[0] == "2") result = self.execute_query("select a from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "1") result = self.execute_query("select f from %s" % self.COMPLEX_COLUMN_TABLE) assert(len(result.data) == 2) assert(result.data[1] == "bar") @classmethod def teardown_class(cls): cls._drop_complex_column_table() @classmethod def _drop_complex_column_table(cls): cls.client.execute("drop table if exists %s" % cls.COMPLEX_COLUMN_TABLE) # We use various scan range lengths to exercise corner cases in the HDFS scanner more # thoroughly. In particular, it will exercise: # 1. default scan range # 2. scan range with no tuple # 3. tuple that span across multiple scan ranges # 4. scan range length = 16 for ParseSse() execution path MAX_SCAN_RANGE_LENGTHS = [0, 1, 2, 5, 16, 17, 32] class TestScanRangeLengths(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestScanRangeLengths, cls).add_test_dimensions() cls.TestMatrix.add_dimension( TestDimension('max_scan_range_length', *MAX_SCAN_RANGE_LENGTHS)) def test_scan_ranges(self, vector): vector.get_value('exec_option')['max_scan_range_length'] =\ vector.get_value('max_scan_range_length') self.run_test_case('QueryTest/hdfs-tiny-scan', vector) # More tests for text scanner # 1. Test file that ends w/o tuple delimiter # 2. Test file with escape character class TestTextScanRangeLengths(ImpalaTestSuite): ESCAPE_TABLE_LIST = ["testescape_16_lf", "testescape_16_crlf", "testescape_17_lf", "testescape_17_crlf", "testescape_32_lf", "testescape_32_crlf"] @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestTextScanRangeLengths, cls).add_test_dimensions() cls.TestMatrix.add_dimension( TestDimension('max_scan_range_length', *MAX_SCAN_RANGE_LENGTHS)) cls.TestMatrix.add_constraint(lambda v:\ v.get_value('table_format').file_format == 'text' and\ v.get_value('table_format').compression_codec == 'none') def test_text_scanner(self, vector): vector.get_value('exec_option')['max_scan_range_length'] =\ vector.get_value('max_scan_range_length') self.execute_query_expect_success(self.client, "drop stats " "functional.table_no_newline_part") self.execute_query_expect_success(self.client, "compute stats " "functional.table_no_newline_part") self.run_test_case('QueryTest/hdfs-text-scan', vector) # Test various escape char cases. We have to check the count(*) result against # the count(col) result because if the scan range is split right after the escape # char, the escape char has no effect because we cannot scan backwards to the # previous scan range. for t in self.ESCAPE_TABLE_LIST: expected_result = self.client.execute("select count(col) from " + t) result = self.client.execute("select count(*) from " + t) assert result.data == expected_result.data # Missing Coverage: No coverage for truncated files errors or scans. @SkipIfS3.hive @SkipIfIsilon.hive @pytest.mark.execute_serially class TestScanTruncatedFiles(ImpalaTestSuite): TEST_DB = 'test_truncated_file' @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestScanTruncatedFiles, cls).add_test_dimensions() cls.TestMatrix.add_dimension(create_single_exec_option_dimension()) # This test takes about a minute to complete due to the Hive commands that are # executed. To cut down on runtime, limit the test to exhaustive exploration # strategy. # TODO: Test other file formats if cls.exploration_strategy() == 'exhaustive': cls.TestMatrix.add_constraint(lambda v:\ v.get_value('table_format').file_format == 'text' and\ v.get_value('table_format').compression_codec == 'none') else: cls.TestMatrix.add_constraint(lambda v: False) def setup_method(self, method): self.cleanup_db(TestScanTruncatedFiles.TEST_DB) self.client.execute("create database %s location '%s/%s.db'" % (TestScanTruncatedFiles.TEST_DB, WAREHOUSE, TestScanTruncatedFiles.TEST_DB)) def teardown_method(self, method): self.cleanup_db(TestScanTruncatedFiles.TEST_DB) def test_scan_truncated_file_empty(self, vector): self.scan_truncated_file(0) def test_scan_truncated_file(self, vector): self.scan_truncated_file(10) def scan_truncated_file(self, num_rows): db_name = TestScanTruncatedFiles.TEST_DB tbl_name = "tbl" self.execute_query("use %s" % db_name) self.execute_query("create table %s (s string)" % tbl_name) call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s SELECT string_col from "\ "functional.alltypes" % (db_name, tbl_name)]) # Update the Impala metadata self.execute_query("refresh %s" % tbl_name) # Insert overwrite with a truncated file call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s SELECT string_col from "\ "functional.alltypes limit %s" % (db_name, tbl_name, num_rows)]) result = self.execute_query("select count(*) from %s" % tbl_name) assert(len(result.data) == 1) assert(result.data[0] == str(num_rows))
########################################################################## # # Copyright (c) 2014, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import Gaffer import GafferUI import GafferScene class SceneReaderPathPreview( GafferUI.PathPreviewWidget ) : def __init__( self, path ) : column = GafferUI.SplitContainer( GafferUI.SplitContainer.Orientation.Vertical ) GafferUI.PathPreviewWidget.__init__( self, column, path ) self.__script = Gaffer.ScriptNode( "scenePreview" ) # for reading IECore.SceneInterface files (scc, lscc) self.__script["SceneReader"] = GafferScene.SceneReader() # for reading Alembic files (abc) self.__script["AlembicSource"] = GafferScene.AlembicSource() # for reading more generic single object files (cob, ptc, pdc, etc) ## \todo: can we unify all file input to SceneReader by creating a SceneInterface that makes # single object scenes using Reader ops behind the scenes? self.__script["ObjectPreview"] = _ObjectPreview() # display points and curves GL style rather than disks and ribbons self.__script["OpenGLAttributes"] = GafferScene.OpenGLAttributes( "OpenGLAttributes" ) self.__script["OpenGLAttributes"]["attributes"]["pointsPrimitiveUseGLPoints"]["value"].setValue( 'forAll' ) self.__script["OpenGLAttributes"]["attributes"]["pointsPrimitiveUseGLPoints"]["enabled"].setValue( True ) self.__script["OpenGLAttributes"]["attributes"]["curvesPrimitiveUseGLLines"]["enabled"].setValue( True ) self.__script["camera"] = _Camera() self.__script["camera"]["in"].setInput( self.__script["OpenGLAttributes"]["out"] ) self.__viewer = GafferUI.Viewer( self.__script ) column.append( self.__viewer ) column.append( GafferUI.Timeline( self.__script ) ) self.__script.selection().add( self.__script["camera"] ) self._updateFromPath() def isValid( self ) : path = self.getPath() if not isinstance( path, ( Gaffer.FileSystemPath, Gaffer.SequencePath ) ) or not path.isLeaf() : return False if isinstance( path, Gaffer.SequencePath ) : try : sequence = IECore.FileSequence( str(path) ) ext = sequence.fileName.split( "." )[-1] except : return False else : ext = str(path).split( "." )[-1] supported = set( [ "abc" ] ) supported.update( GafferScene.SceneReader.supportedExtensions() ) supported.update( IECore.Reader.supportedExtensions() ) # no reason to preview a single image as a 3D scene supported.difference_update( IECore.Reader.supportedExtensions( IECore.TypeId.ImageReader ) ) return ext in supported def _updateFromPath( self ) : self.__script["SceneReader"]["fileName"].setValue( "" ) self.__script["AlembicSource"]["fileName"].setValue( "" ) self.__script["ObjectPreview"]["fileName"].setValue( "" ) if not self.isValid() : return path = self.getPath() if isinstance( path, Gaffer.SequencePath ) : try : sequence = IECore.FileSequence( str(path) ) except : return fileName = str(sequence) ext = sequence.fileName.split( "." )[-1] calc = IECore.OversamplesCalculator() if isinstance( sequence.frameList, IECore.FrameRange ) and sequence.frameList.step == 1 : calc.setTicksPerSecond( 24 ) frames = sequence.frameList.asList() startFrame = int( calc.ticksToFrames( min(frames) ) ) endFrame = int( calc.ticksToFrames( max(frames) ) ) else : fileName = str(path) ext = str(path).split( "." )[-1] startFrame = None endFrame = None outPlug = None if ext in GafferScene.SceneReader.supportedExtensions() : self.__script["SceneReader"]["fileName"].setValue( fileName ) outPlug = self.__script["SceneReader"]["out"] scene = IECore.SharedSceneInterfaces.get( fileName ) if hasattr( scene, "numBoundSamples" ) : numSamples = scene.numBoundSamples() if numSamples > 1 : startFrame = int( round( scene.boundSampleTime( 0 ) * 24.0 ) ) endFrame = int( round( scene.boundSampleTime( numSamples - 1 ) * 24.0 ) ) elif ext in IECore.Reader.supportedExtensions() : self.__script["ObjectPreview"]["fileName"].setValue( fileName ) outPlug = self.__script["ObjectPreview"]["out"] elif ext == "abc" : self.__script["AlembicSource"]["fileName"].setValue( fileName ) outPlug = self.__script["AlembicSource"]["out"] ## \todo: determine the frame range from the abc file self.__script["OpenGLAttributes"]["in"].setInput( outPlug ) # update the timeline if startFrame is not None and endFrame is not None : self.__script.context().setFrame( startFrame ) self.__script["frameRange"]["start"].setValue( startFrame ) self.__script["frameRange"]["end"].setValue( endFrame ) GafferUI.Playback.acquire( self.__script.context() ).setFrameRange( startFrame, endFrame ) # focus the viewer with self.__script.context() : self.__viewer.viewGadgetWidget().getViewportGadget().frame( self.__script["OpenGLAttributes"]["out"].bound( "/" ) ) GafferUI.PathPreviewWidget.registerType( "Scene", SceneReaderPathPreview ) class _Camera( Gaffer.Node ) : def __init__( self, name = "_Camera" ) : Gaffer.Node.__init__( self, name ) self["in"] = GafferScene.ScenePlug() self["addCamera"] = Gaffer.BoolPlug( defaultValue = False ) self["lookAt"] = Gaffer.StringPlug( defaultValue = "/" ) self["depth"] = Gaffer.FloatPlug( defaultValue = 20, minValue = 0 ) self["angle"] = Gaffer.FloatPlug() self["elevation"] = Gaffer.FloatPlug( defaultValue = 10, minValue = -90, maxValue = 90 ) self["camera"] = GafferScene.Camera() self["camera"]["name"].setValue( "previewCamera" ) self["parent"] = GafferScene.Parent() self["parent"]["in"].setInput( self["in"] ) self["parent"]["parent"].setValue( "/" ) self["parent"]["child"].setInput( self["camera"]["out"] ) self["cameraFilter"] = GafferScene.PathFilter() self["cameraFilter"]["paths"].setValue( IECore.StringVectorData( [ "/previewCamera" ] ) ) self["parentConstraint"] = GafferScene.ParentConstraint() self["parentConstraint"]["in"].setInput( self["parent"]["out"] ) self["parentConstraint"]["target"].setInput( self["lookAt"] ) self["parentConstraint"]["targetMode"].setValue( self["parentConstraint"].TargetMode.BoundCenter ) self["parentConstraint"]["filter"].setInput( self["cameraFilter"]["out"] ) self["cameraRotate"] = GafferScene.Transform() self["cameraRotate"]["in"].setInput( self["parentConstraint"]["out"] ) self["cameraRotate"]["transform"]["rotate"]["y"].setInput( self["angle"] ) self["cameraRotate"]["filter"].setInput( self["cameraFilter"]["out"] ) self["elevationExpression"] = Gaffer.Expression() self["elevationExpression"].setExpression( 'parent["cameraRotate"]["transform"]["rotate"]["x"] = -parent["elevation"]' ) self["cameraTranslate"] = GafferScene.Transform() self["cameraTranslate"]["in"].setInput( self["cameraRotate"]["out"] ) self["cameraTranslate"]["transform"]["translate"]["z"].setInput( self["depth"] ) self["cameraTranslate"]["filter"].setInput( self["cameraFilter"]["out"] ) self["options"] = GafferScene.StandardOptions() self["options"]["options"]["renderCamera"]["enabled"].setValue( True ) self["options"]["options"]["renderCamera"]["value"].setValue( "/previewCamera" ) self["options"]["in"].setInput( self["cameraTranslate"]["out"] ) self["switch"] = GafferScene.SceneSwitch() self["switch"]["in"].setInput( self["in"] ) self["switch"]["in1"].setInput( self["options"]["out"] ) self["switch"]["index"].setInput( self["addCamera"] ) self["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out ) self["out"].setInput( self["switch"]["out"] ) IECore.registerRunTimeTyped( _Camera ) Gaffer.Metadata.registerNode( _Camera, "nodeToolbar:top:type", "GafferUI.StandardNodeToolbar.top", plugs = { "*" : [ "toolbarLayout:section", "Top", ], "in" : [ "plugValueWidget:type", "", ], "out" : [ "plugValueWidget:type", "", ], "user" : [ "plugValueWidget:type", "", ], "lookAt" : [ "plugValueWidget:type", "GafferSceneUI.ScenePathPlugValueWidget", ], "depth" : [ "numericPlugValueWidget:fixedCharacterWidth", 5, ], "angle" : [ "numericPlugValueWidget:fixedCharacterWidth", 5, ], "elevation" : [ "numericPlugValueWidget:fixedCharacterWidth", 5, ], } ) # Utility node for previewing single objects from a file or # sequence (cob, ptc, pdc, etc), as though they were a scene class _ObjectPreview( Gaffer.Node ) : def __init__( self, name = "_ObjectPreview" ) : Gaffer.Node.__init__( self, name ) self["fileName"] = Gaffer.StringPlug( defaultValue = "", substitutions = Gaffer.Context.Substitutions.NoSubstitutions ) self["frameRate"] = Gaffer.FloatPlug( defaultValue = 24.0 ) self["samplesPerFrame"] = Gaffer.IntPlug( defaultValue = 1, minValue = 1 ) # single object scenes using Reader ops behind the scenes? self["ObjectReader"] = Gaffer.ObjectReader() self["ObjectReaderExpression"] = Gaffer.Expression( "Expression" ) self["ObjectReaderExpression"].setExpression( ''' import IECore fileName = parent['fileName'] try : sequence = IECore.FileSequence( fileName ) calc = IECore.OversamplesCalculator( frameRate = parent["frameRate"], samplesPerFrame = parent["samplesPerFrame"] ) if isinstance( sequence.frameList, IECore.FrameRange ) and sequence.frameList.step == 1 : calc.setTicksPerSecond( 24 ) result = sequence.fileNameForFrame( calc.framesToTicks( context['frame'] ) ) except : result = fileName parent['ObjectReader']['fileName'] = result ''' ) self["ObjectToScene"] = GafferScene.ObjectToScene( "ObjectToScene" ) self["ObjectToScene"]["object"].setInput( self["ObjectReader"]["out"] ) self["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out ) self["out"].setInput( self["ObjectToScene"]["out"] ) IECore.registerRunTimeTyped( _ObjectPreview )
""" These classes are wrappers for `XGBoost library <https://github.com/dmlc/xgboost>`_. """ from __future__ import division, print_function, absolute_import import tempfile import os from abc import ABCMeta import pandas import numpy from sklearn.utils import check_random_state from .utils import normalize_weights, remove_first_line from .interface import Classifier, Regressor from .utils import check_inputs __author__ = 'Mikhail Hushchyn, Alex Rogozhnikov' __all__ = ['XGBoostBase', 'XGBoostClassifier', 'XGBoostRegressor'] try: import xgboost as xgb except ImportError as e: raise ImportError("please install xgboost") class XGBoostBase(object): """ A base class for the XGBoostClassifier and XGBoostRegressor. XGBoost tree booster is used. :param int n_estimators: number of trees built. :param int nthreads: number of parallel threads used to run XGBoost. :param num_feature: feature dimension used in boosting, set to maximum dimension of the feature (set automatically by XGBoost, no need to be set by user). :type num_feature: None or int :param float gamma: minimum loss reduction required to make a further partition on a leaf node of the tree. The larger, the more conservative the algorithm will be. :type gamma: None or float :param float eta: (or learning rate) step size shrinkage used in update to prevent overfitting. After each boosting step, we can directly get the weights of new features and eta actually shrinkages the feature weights to make the boosting process more conservative. :param int max_depth: maximum depth of a tree. :param float scale_pos_weight: ration of weights of the class 1 to the weights of the class 0. :param float min_child_weight: minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. .. note:: weights are normalized so that mean=1 before fitting. Roughly min_child_weight is equal to the number of events. :param float subsample: subsample ratio of the training instance. Setting it to 0.5 means that XGBoost randomly collected half of the data instances to grow trees and this will prevent overfitting. :param float colsample: subsample ratio of columns when constructing each tree. :param float base_score: the initial prediction score of all instances, global bias. :param random_state: state for a pseudo random generator :type random_state: None or int or RandomState :param boot verbose: if 1, will print messages during training :param float missing: the number considered by XGBoost as missing value. """ __metaclass__ = ABCMeta def __init__(self, n_estimators=100, nthreads=16, num_feature=None, gamma=None, eta=0.3, max_depth=6, scale_pos_weight=1., min_child_weight=1., subsample=1., colsample=1., base_score=0.5, verbose=0, missing=-999., random_state=0): self.n_estimators = n_estimators self.missing = missing self.nthreads = nthreads self.num_feature = num_feature self.gamma = gamma self.eta = eta self.max_depth = max_depth self.scale_pos_weight = scale_pos_weight self.min_child_weight = min_child_weight self.subsample = subsample self.colsample = colsample self.objective = None self.base_score = base_score self.verbose = verbose self.random_state = random_state self._num_class = None self.xgboost_estimator = None def _make_dmatrix(self, X, y=None, sample_weight=None): """ Create XGBoost data from initial data. :return: XGBoost DMatrix """ feature_names = [str(i) for i in range(X.shape[1])] matrix = xgb.DMatrix(data=X, label=y, weight=sample_weight, missing=self.missing, feature_names=feature_names) return matrix def _check_fitted(self): assert self.xgboost_estimator is not None, "Classifier wasn't fitted, please call `fit` first" def _fit(self, X, y, estimator_type, sample_weight=None, **kwargs): """ Train a classification/regression model on the data. :param pandas.DataFrame X: data of shape [n_samples, n_features] :param y: labels of samples, array-like of shape [n_samples] :param sample_weight: weight of samples, array-like of shape [n_samples] or None if all weights are equal :param str estimator_type: type of the estimator (binary, reg or mult) :param dict kwargs: additional parameters :return: self """ if self.random_state is None: seed = 0 elif isinstance(self.random_state, int): seed = self.random_state else: seed = check_random_state(self.random_state).randint(0, 10000) self.objective = estimator_type params = {"nthread": self.nthreads, "eta": self.eta, "max_depth": self.max_depth, "scale_pos_weight": self.scale_pos_weight, "min_child_weight": self.min_child_weight, "subsample": self.subsample, "colsample_bytree": self.colsample, "objective": self.objective, "base_score": self.base_score, "silent": int(not self.verbose), "seed": seed} for key, value in kwargs.items(): params[key] = value if key == 'num_class': self._num_class = value if self.num_feature is not None: params["num_feature"] = self.num_feature if self.gamma is not None: params["gamma"] = self.gamma xgboost_matrix = self._make_dmatrix(X, y, sample_weight) self.xgboost_estimator = xgb.train(params, xgboost_matrix, num_boost_round=self.n_estimators) return self def __getstate__(self): result = self.__dict__.copy() del result['xgboost_estimator'] if self.xgboost_estimator is None: result['dumped_xgboost'] = None else: with tempfile.NamedTemporaryFile() as dump: self._save_model(dump.name) with open(dump.name, 'rb') as dumpfile: result['dumped_xgboost'] = dumpfile.read() return result def __setstate__(self, dict): self.__dict__ = dict if dict['dumped_xgboost'] is None: self.xgboost_estimator = None else: with tempfile.NamedTemporaryFile() as dump: with open(dump.name, 'wb') as dumpfile: dumpfile.write(dict['dumped_xgboost']) self._load_model(dump.name) # HACK error in xgboost reloading if '_num_class' in dict: self.xgboost_estimator.set_param({'num_class': dict['_num_class']}) del dict['dumped_xgboost'] def _save_model(self, path_to_dump): """ Save XGBoost model""" self._check_fitted() self.xgboost_estimator.save_model(path_to_dump) def _load_model(self, path_to_dumped_model): """ Load XGBoost model to estimator """ assert os.path.exists(path_to_dumped_model), 'there is no such file: {}'.format(path_to_dumped_model) self.xgboost_estimator = xgb.Booster({'nthread': self.nthreads}, model_file=path_to_dumped_model) def get_feature_importances(self): """ Get features importances. :rtype: pandas.DataFrame with `index=self.features` """ self._check_fitted() feature_score = self.xgboost_estimator.get_fscore() reordered_scores = numpy.zeros(len(self.features)) for name, score in feature_score.items(): reordered_scores[int(name)] = score return pandas.DataFrame({'effect': reordered_scores}, index=self.features) @property def feature_importances_(self): """Sklearn-way of returning feature importance. This returned as numpy.array, assuming that initially passed train_features=None """ self._check_fitted() return self.get_feature_importances().ix[self.features, 'effect'].values class XGBoostClassifier(XGBoostBase, Classifier): __doc__ = 'Implements classification model from XGBoost library. \n'\ + remove_first_line(XGBoostBase.__doc__) def __init__(self, features=None, n_estimators=100, nthreads=16, num_feature=None, gamma=None, eta=0.3, max_depth=6, scale_pos_weight=1., min_child_weight=1., subsample=1., colsample=1., base_score=0.5, verbose=0, missing=-999., random_state=0): XGBoostBase.__init__(self, n_estimators=n_estimators, nthreads=nthreads, num_feature=num_feature, gamma=gamma, eta=eta, max_depth=max_depth, scale_pos_weight=scale_pos_weight, min_child_weight=min_child_weight, subsample=subsample, colsample=colsample, base_score=base_score, verbose=verbose, missing=missing, random_state=random_state) Classifier.__init__(self, features=features) def fit(self, X, y, sample_weight=None): X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False) sample_weight = normalize_weights(y, sample_weight=sample_weight, per_class=False) X = self._get_features(X) self._set_classes(y) if self.n_classes_ >= 2: return self._fit(X, y, 'multi:softprob', sample_weight=sample_weight, num_class=self.n_classes_) fit.__doc__ = Classifier.fit.__doc__ def predict_proba(self, X): self._check_fitted() X_dmat = self._make_dmatrix(self._get_features(X)) prediction = self.xgboost_estimator.predict(X_dmat, ntree_limit=0) if self.n_classes_ >= 2: return prediction.reshape(X.shape[0], self.n_classes_) predict_proba.__doc__ = Classifier.predict_proba.__doc__ def staged_predict_proba(self, X, step=None): """ Predict probabilities for data for each class label on each stage.. :param pandas.DataFrame X: data of shape [n_samples, n_features] :param int step: step for returned iterations (None by default). XGBoost does not implement this functionality and we need to predict from the beginning each time. With `None` passed step is chosen to have 10 points in the learning curve. :return: iterator .. warning: this method may be very slow, it takes iterations^2 / step time. """ self._check_fitted() X_dmat = self._make_dmatrix(self._get_features(X)) if step is None: step = max(self.n_estimators // 10, 1) # TODO use applying tree-by-tree for i in range(1, self.n_estimators // step + 1): prediction = self.xgboost_estimator.predict(X_dmat, ntree_limit=i * step) yield prediction.reshape(X.shape[0], self.n_classes_) class XGBoostRegressor(XGBoostBase, Regressor): __doc__ = 'Implements regression model from XGBoost library. \n' + remove_first_line(XGBoostBase.__doc__) def __init__(self, features=None, n_estimators=100, nthreads=16, num_feature=None, gamma=None, eta=0.3, max_depth=6, min_child_weight=1., subsample=1., colsample=1., objective_type='linear', base_score=0.5, verbose=0, missing=-999., random_state=0): XGBoostBase.__init__(self, n_estimators=n_estimators, nthreads=nthreads, num_feature=num_feature, gamma=gamma, eta=eta, max_depth=max_depth, min_child_weight=min_child_weight, subsample=subsample, colsample=colsample, base_score=base_score, verbose=verbose, missing=missing, random_state=random_state) Regressor.__init__(self, features=features) self.objective_type = objective_type def fit(self, X, y, sample_weight=None): X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False) sample_weight = normalize_weights(y, sample_weight=sample_weight, per_class=False) X = self._get_features(X) assert self.objective_type in {'linear', 'logistic'}, 'Objective parameter is not valid' return self._fit(X, y, "reg:{}".format(self.objective_type), sample_weight=sample_weight) fit.__doc__ = Regressor.fit.__doc__ def predict(self, X): self._check_fitted() X_dmat = self._make_dmatrix(self._get_features(X)) return self.xgboost_estimator.predict(X_dmat, ntree_limit=0) predict.__doc__ = Regressor.predict.__doc__ def staged_predict(self, X, step=None): """ Predicts values for data on each stage. :param X: pandas.DataFrame of shape [n_samples, n_features] :param int step: step for returned iterations (None by default). XGBoost does not implement this functionality and we need to predict from the beginning each time. With `None` passed step is chosen to have 10 points in the learning curve. :return: iterator .. warning: this method may be very slow, it takes iterations^2 / step time. """ self._check_fitted() X_dmat = self._make_dmatrix(self._get_features(X)) if step is None: step = max(self.n_estimators // 10, 1) # TODO use applying tree-by-tree for i in range(1, self.n_estimators // step + 1): yield self.xgboost_estimator.predict(X_dmat, ntree_limit=i * step)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from nova import exception from nova import test from nova.tests import fakeguestfs from nova.virt.disk import api as diskapi from nova.virt.disk.vfs import guestfs as vfsguestfs class VirtDiskTest(test.TestCase): def setUp(self): super(VirtDiskTest, self).setUp() sys.modules['guestfs'] = fakeguestfs vfsguestfs.guestfs = fakeguestfs def test_inject_data(self): self.assertTrue(diskapi.inject_data("/some/file", use_cow=True)) self.assertTrue(diskapi.inject_data("/some/file", mandatory=('files',))) self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey", mandatory=('key',))) os_name = os.name os.name = 'nt' # Cause password injection to fail self.assertRaises(exception.NovaException, diskapi.inject_data, "/some/file", admin_password="p", mandatory=('admin_password',)) self.assertFalse(diskapi.inject_data("/some/file", admin_password="p")) os.name = os_name def test_inject_data_key(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() diskapi._inject_key_into_fs("mysshkey", vfs) self.assertTrue("/root/.ssh" in vfs.handle.files) self.assertEquals(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700}) self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files) self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0700}) vfs.teardown() def test_inject_data_key_with_selinux(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertTrue("/etc/rc.d/rc.local" in vfs.handle.files) self.assertEquals(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "Hello World#!/bin/sh\n# Added by " + "Nova to ensure injected ssh keys " + "have the right context\nrestorecon " + "-RF root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0700}) self.assertTrue("/root/.ssh" in vfs.handle.files) self.assertEquals(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0700}) self.assertTrue("/root/.ssh/authorized_keys" in vfs.handle.files) self.assertEquals(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0700}) vfs.teardown() def test_inject_data_key_with_selinux_append_with_newline(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done") vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertTrue("/etc/rc.d/rc.local" in vfs.handle.files) self.assertEquals(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "#!/bin/sh\necho done\n# Added " "by Nova to ensure injected ssh keys have " "the right context\nrestorecon -RF " "root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0700}) vfs.teardown() def test_inject_net(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() diskapi._inject_net_into_fs("mynetconfig", vfs) self.assertTrue("/etc/network/interfaces" in vfs.handle.files) self.assertEquals(vfs.handle.files["/etc/network/interfaces"], {'content': 'mynetconfig', 'gid': 100, 'isdir': False, 'mode': 0700, 'uid': 100}) vfs.teardown() def test_inject_metadata(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() diskapi._inject_metadata_into_fs([{"key": "foo", "value": "bar"}, {"key": "eek", "value": "wizz"}], vfs) self.assertTrue("/meta.js" in vfs.handle.files) self.assertEquals(vfs.handle.files["/meta.js"], {'content': '{"foo": "bar", ' + '"eek": "wizz"}', 'gid': 100, 'isdir': False, 'mode': 0700, 'uid': 100}) vfs.teardown() def test_inject_admin_password(self): vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2") vfs.setup() def fake_salt(): return "1234567890abcdef" self.stubs.Set(diskapi, '_generate_salt', fake_salt) vfs.handle.write("/etc/shadow", "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n") vfs.handle.write("/etc/passwd", "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n") diskapi._inject_admin_password_into_fs("123456", vfs) self.assertEquals(vfs.handle.files["/etc/passwd"], {'content': "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:" + "/sbin/nologin\n", 'gid': 100, 'isdir': False, 'mode': 0700, 'uid': 100}) shadow = vfs.handle.files["/etc/shadow"] # if the encrypted password is only 13 characters long, then # nova.virt.disk.api:_set_password fell back to DES. if len(shadow['content']) == 91: self.assertEquals(shadow, {'content': "root:12tir.zIbWQ3c" + ":14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0700, 'uid': 100}) else: self.assertEquals(shadow, {'content': "root:$1$12345678$a4ge4d5iJ5vw" + "vbFS88TEN0:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0700, 'uid': 100}) vfs.teardown()
import asyncio import pytest from dask import delayed from distributed import Client from distributed.client import futures_of from distributed.metrics import time from distributed.protocol import Serialized from distributed.utils_test import gen_cluster, inc @gen_cluster() async def test_publish_simple(s, a, b): c = Client(s.address, asynchronous=True) f = Client(s.address, asynchronous=True) await asyncio.gather(c, f) data = await c.scatter(range(3)) await c.publish_dataset(data=data) assert "data" in s.extensions["publish"].datasets assert isinstance(s.extensions["publish"].datasets["data"]["data"], Serialized) with pytest.raises(KeyError) as exc_info: await c.publish_dataset(data=data) assert "exists" in str(exc_info.value) assert "data" in str(exc_info.value) result = await c.scheduler.publish_list() assert result == ("data",) result = await f.scheduler.publish_list() assert result == ("data",) await asyncio.gather(c.close(), f.close()) @gen_cluster() async def test_publish_non_string_key(s, a, b): async with Client(s.address, asynchronous=True) as c: for name in [("a", "b"), 9.0, 8]: data = await c.scatter(range(3)) await c.publish_dataset(data, name=name) assert name in s.extensions["publish"].datasets assert isinstance( s.extensions["publish"].datasets[name]["data"], Serialized ) datasets = await c.scheduler.publish_list() assert name in datasets @gen_cluster() async def test_publish_roundtrip(s, a, b): c = await Client(s.address, asynchronous=True) f = await Client(s.address, asynchronous=True) data = await c.scatter([0, 1, 2]) await c.publish_dataset(data=data) assert "published-data" in s.who_wants[data[0].key] result = await f.get_dataset(name="data") assert len(result) == len(data) out = await f.gather(result) assert out == [0, 1, 2] with pytest.raises(KeyError) as exc_info: await f.get_dataset(name="nonexistent") assert "not found" in str(exc_info.value) assert "nonexistent" in str(exc_info.value) await c.close() await f.close() @gen_cluster(client=True) async def test_unpublish(c, s, a, b): data = await c.scatter([0, 1, 2]) await c.publish_dataset(data=data) key = data[0].key del data await c.scheduler.publish_delete(name="data") assert "data" not in s.extensions["publish"].datasets start = time() while key in s.who_wants: await asyncio.sleep(0.01) assert time() < start + 5 with pytest.raises(KeyError) as exc_info: await c.get_dataset(name="data") assert "not found" in str(exc_info.value) assert "data" in str(exc_info.value) def test_unpublish_sync(client): data = client.scatter([0, 1, 2]) client.publish_dataset(data=data) client.unpublish_dataset(name="data") with pytest.raises(KeyError) as exc_info: client.get_dataset(name="data") assert "not found" in str(exc_info.value) assert "data" in str(exc_info.value) @gen_cluster(client=True) async def test_publish_multiple_datasets(c, s, a, b): x = delayed(inc)(1) y = delayed(inc)(2) await c.publish_dataset(x=x, y=y) datasets = await c.scheduler.publish_list() assert set(datasets) == {"x", "y"} def test_unpublish_multiple_datasets_sync(client): x = delayed(inc)(1) y = delayed(inc)(2) client.publish_dataset(x=x, y=y) client.unpublish_dataset(name="x") with pytest.raises(KeyError) as exc_info: client.get_dataset(name="x") datasets = client.list_datasets() assert set(datasets) == {"y"} assert "not found" in str(exc_info.value) assert "x" in str(exc_info.value) client.unpublish_dataset(name="y") with pytest.raises(KeyError) as exc_info: client.get_dataset(name="y") assert "not found" in str(exc_info.value) assert "y" in str(exc_info.value) @gen_cluster() async def test_publish_bag(s, a, b): db = pytest.importorskip("dask.bag") c = await Client(s.address, asynchronous=True) f = await Client(s.address, asynchronous=True) bag = db.from_sequence([0, 1, 2]) bagp = c.persist(bag) assert len(futures_of(bagp)) == 3 keys = {f.key for f in futures_of(bagp)} assert keys == set(bag.dask) await c.publish_dataset(data=bagp) # check that serialization didn't affect original bag's dask assert len(futures_of(bagp)) == 3 result = await f.get_dataset("data") assert set(result.dask.keys()) == set(bagp.dask.keys()) assert {f.key for f in result.dask.values()} == {f.key for f in bagp.dask.values()} out = await f.compute(result) assert out == [0, 1, 2] await c.close() await f.close() def test_datasets_setitem(client): for key in ["key", ("key", "key"), 1]: value = "value" client.datasets[key] = value assert client.get_dataset(key) == value assert client.get_dataset(key, default="something else") == value def test_datasets_getitem(client): for key in ["key", ("key", "key"), 1]: value = "value" client.publish_dataset(value, name=key) assert client.datasets[key] == value assert client.datasets.get(key) == value assert client.datasets.get(key, default="something else") == value def test_datasets_getitem_default(client): with pytest.raises(KeyError) as exc_info: client.get_dataset("key") assert client.datasets.get("key", default="value") == "value" assert client.datasets.get("key", default=None) is None assert client.get_dataset("key", default="value") == "value" def test_datasets_delitem(client): for key in ["key", ("key", "key"), 1]: value = "value" client.publish_dataset(value, name=key) del client.datasets[key] assert key not in client.list_datasets() def test_datasets_keys(client): client.publish_dataset(**{str(n): n for n in range(10)}) keys = list(client.datasets.keys()) assert keys == [str(n) for n in range(10)] def test_datasets_contains(client): key, value = "key", "value" client.publish_dataset(key=value) assert key in client.datasets def test_datasets_republish(client): key, value, value2 = "key", "value", "value2" client.publish_dataset(key=value) assert client.get_dataset(key) == value with pytest.raises(KeyError) as exc_info: client.publish_dataset(key=value) client.publish_dataset(key=value2, override=True) assert client.get_dataset(key) == value2 def test_datasets_iter(client): keys = [n for n in range(10)] client.publish_dataset(**{str(key): key for key in keys}) for n, key in enumerate(client.datasets): assert key == str(n) with pytest.raises(TypeError): client.datasets.__aiter__() @gen_cluster(client=True) async def test_datasets_async(c, s, a, b): await c.publish_dataset(foo=1, bar=2) assert await c.datasets["foo"] == 1 assert {k async for k in c.datasets} == {"foo", "bar"} with pytest.raises(TypeError): c.datasets["baz"] = 3 with pytest.raises(TypeError): del c.datasets["foo"] with pytest.raises(TypeError): next(iter(c.datasets)) with pytest.raises(TypeError): len(c.datasets) @gen_cluster(client=True) async def test_pickle_safe(c, s, a, b): async with Client(s.address, asynchronous=True, serializers=["msgpack"]) as c2: await c2.publish_dataset(x=[1, 2, 3]) result = await c2.get_dataset("x") assert result == [1, 2, 3] with pytest.raises(TypeError): await c2.publish_dataset(y=lambda x: x) await c.publish_dataset(z=lambda x: x) # this can use pickle with pytest.raises(TypeError): await c2.get_dataset("z") @gen_cluster(client=True) async def test_deserialize_client(c, s, a, b): """Test that the client attached to Futures returned by Client.get_dataset is always the instance of the client that invoked the method. Specifically: - when the client is defined by hostname, test that it is not accidentally reinitialised by IP; - when multiple clients are connected to the same scheduler, test that they don't interfere with each other. See: test_client.test_serialize_future See: https://github.com/dask/distributed/issues/3227 """ future = await c.scatter("123") await c.publish_dataset(foo=future) future = await c.get_dataset("foo") assert future.client is c for addr in (s.address, "localhost:" + s.address.split(":")[-1]): async with Client(addr, asynchronous=True) as c2: future = await c.get_dataset("foo") assert future.client is c future = await c2.get_dataset("foo") assert future.client is c2 # Ensure cleanup from distributed.client import _current_client assert _current_client.get() is None
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import threading import time import warnings import pandas as pd import apache_beam as beam from apache_beam.dataframe.frame_base import DeferredBase from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload from apache_beam.runners.interactive import background_caching_job as bcj from apache_beam.runners.interactive import interactive_environment as ie from apache_beam.runners.interactive import interactive_runner as ir from apache_beam.runners.interactive import pipeline_fragment as pf from apache_beam.runners.interactive import pipeline_instrument as pi from apache_beam.runners.interactive import utils from apache_beam.runners.runner import PipelineState _LOGGER = logging.getLogger(__name__) class ElementStream: """A stream of elements from a given PCollection.""" def __init__( self, pcoll, # type: beam.pvalue.PCollection var, # type: str cache_key, # type: str max_n, # type: int max_duration_secs # type: float ): self._pcoll = pcoll self._cache_key = cache_key self._pipeline = pcoll.pipeline self._var = var self._n = max_n self._duration_secs = max_duration_secs # A small state variable that when True, indicates that no more new elements # will be yielded if read() is called again. self._done = False @property def var(self): # type: () -> str """Returns the variable named that defined this PCollection.""" return self._var @property def pcoll(self): # type: () -> beam.pvalue.PCollection """Returns the PCollection that supplies this stream with data.""" return self._pcoll @property def cache_key(self): # type: () -> str """Returns the cache key for this stream.""" return self._cache_key def display_id(self, suffix): # type: (str) -> str """Returns a unique id able to be displayed in a web browser.""" return utils.obfuscate(self._cache_key, suffix) def is_computed(self): # type: () -> boolean """Returns True if no more elements will be recorded.""" return self._pcoll in ie.current_env().computed_pcollections def is_done(self): # type: () -> boolean """Returns True if no more new elements will be yielded.""" return self._done def read(self, tail=True): # type: (boolean) -> Any """Reads the elements currently recorded.""" # Get the cache manager and wait until the file exists. cache_manager = ie.current_env().get_cache_manager(self._pipeline) # Retrieve the coder for the particular PCollection which will be used to # decode elements read from cache. coder = cache_manager.load_pcoder('full', self._cache_key) # Read the elements from the cache. # Import limiters here to prevent a circular import. from apache_beam.runners.interactive.options.capture_limiters import CountLimiter from apache_beam.runners.interactive.options.capture_limiters import ProcessingTimeLimiter reader, _ = cache_manager.read('full', self._cache_key, tail=tail) # Because a single TestStreamFileRecord can yield multiple elements, we # limit the count again here in the to_element_list call. # # There are two ways of exiting this loop either a limiter was triggered or # all elements from the cache were read. In the latter situation, it may be # the case that the pipeline was still running. Thus, another invocation of # `read` will yield new elements. count_limiter = CountLimiter(self._n) time_limiter = ProcessingTimeLimiter(self._duration_secs) limiters = (count_limiter, time_limiter) for e in utils.to_element_list(reader, coder, include_window_info=True, n=self._n, include_time_events=True): # From the to_element_list we either get TestStreamPayload.Events if # include_time_events or decoded elements from the reader. Make sure we # only count the decoded elements to break early. if isinstance(e, TestStreamPayload.Event): time_limiter.update(e) else: count_limiter.update(e) yield e if any(l.is_triggered() for l in limiters): break # A limiter being triggered means that we have fulfilled the user's request. # This implies that reading from the cache again won't yield any new # elements. WLOG, this applies to the user pipeline being terminated. if any(l.is_triggered() for l in limiters) or ie.current_env().is_terminated(self._pipeline): self._done = True class Recording: """A group of PCollections from a given pipeline run.""" def __init__( self, user_pipeline, # type: beam.Pipeline pcolls, # type: List[beam.pvalue.PCollection] result, # type: beam.runner.PipelineResult pipeline_instrument, # type: beam.runners.interactive.PipelineInstrument max_n, # type: int max_duration_secs, # type: float ): self._user_pipeline = user_pipeline self._result = result self._result_lock = threading.Lock() self._pcolls = pcolls pcoll_var = lambda pcoll: pipeline_instrument.cacheable_var_by_pcoll_id( pipeline_instrument.pcolls_to_pcoll_id.get(str(pcoll), None)) self._streams = { pcoll: ElementStream( pcoll, pcoll_var(pcoll), pipeline_instrument.cache_key(pcoll), max_n, max_duration_secs) for pcoll in pcolls } self._start = time.time() self._duration_secs = max_duration_secs self._set_computed = bcj.is_cache_complete(str(id(user_pipeline))) # Run a separate thread for marking the PCollections done. This is because # the pipeline run may be asynchronous. self._mark_computed = threading.Thread(target=self._mark_all_computed) self._mark_computed.daemon = True self._mark_computed.start() def _mark_all_computed(self): # type: () -> None """Marks all the PCollections upon a successful pipeline run.""" if not self._result: return while not PipelineState.is_terminal(self._result.state): with self._result_lock: bcj = ie.current_env().get_background_caching_job(self._user_pipeline) if bcj and bcj.is_done(): self._result.wait_until_finish() elif time.time() - self._start >= self._duration_secs: self._result.cancel() self._result.wait_until_finish() elif all(s.is_done() for s in self._streams.values()): self._result.cancel() self._result.wait_until_finish() time.sleep(0.1) # Mark the PCollection as computed so that Interactive Beam wouldn't need to # re-compute. if self._result.state is PipelineState.DONE and self._set_computed: ie.current_env().mark_pcollection_computed(self._pcolls) def is_computed(self): # type: () -> boolean """Returns True if all PCollections are computed.""" return all(s.is_computed() for s in self._streams.values()) def stream(self, pcoll): # type: (beam.pvalue.PCollection) -> ElementStream """Returns an ElementStream for a given PCollection.""" return self._streams[pcoll] def computed(self): # type: () -> None """Returns all computed ElementStreams.""" return {p: s for p, s in self._streams.items() if s.is_computed()} def uncomputed(self): # type: () -> None """Returns all uncomputed ElementStreams.""" return {p: s for p, s in self._streams.items() if not s.is_computed()} def cancel(self): # type: () -> None """Cancels the recording.""" with self._result_lock: self._result.cancel() def wait_until_finish(self): # type: () -> None """Waits until the pipeline is done and returns the final state. This also marks any PCollections as computed right away if the pipeline is successful. """ if not self._result: return beam.runners.runner.PipelineState.DONE self._mark_computed.join() return self._result.state def describe(self): # type: () -> dict[str, int] """Returns a dictionary describing the cache and recording.""" cache_manager = ie.current_env().get_cache_manager(self._user_pipeline) size = sum( cache_manager.size('full', s.cache_key) for s in self._streams.values()) return {'size': size, 'duration': self._duration_secs} class RecordingManager: """Manages recordings of PCollections for a given pipeline.""" def __init__(self, user_pipeline, pipeline_var=None, test_limiters=None): # type: (beam.Pipeline, str, list[Limiter]) -> None self.user_pipeline = user_pipeline # type: beam.Pipeline self.pipeline_var = pipeline_var if pipeline_var else '' # type: str self._recordings = set() # type: set[Recording] self._start_time_sec = 0 # type: float self._test_limiters = test_limiters if test_limiters else [] def _watch(self, pcolls): # type: (List[beam.pvalue.PCollection]) -> None """Watch any pcollections not being watched. This allows for the underlying caching layer to identify the PCollection as something to be cached. """ watched_pcollections = set() watched_dataframes = set() for watching in ie.current_env().watching(): for _, val in watching: if isinstance(val, beam.pvalue.PCollection): watched_pcollections.add(val) elif isinstance(val, DeferredBase): watched_dataframes.add(val) # Convert them one-by-one to generate a unique label for each. This allows # caching at a more fine-grained granularity. # # TODO(BEAM-12388): investigate the mixing pcollections in multiple # pipelines error when using the default label. for df in watched_dataframes: pcoll, _ = utils.deferred_df_to_pcollection(df) watched_pcollections.add(pcoll) for pcoll in pcolls: if pcoll not in watched_pcollections: ie.current_env().watch( {'anonymous_pcollection_{}'.format(id(pcoll)): pcoll}) def _clear(self, pipeline_instrument): # type: (List[beam.pvalue.PCollection]) -> None """Clears the recording of all non-source PCollections.""" cache_manager = ie.current_env().get_cache_manager(self.user_pipeline) # Only clear the PCollections that aren't being populated from the # BackgroundCachingJob. computed = ie.current_env().computed_pcollections cacheables = [ c for c in pipeline_instrument.cacheables.values() if c.pcoll.pipeline is self.user_pipeline and c.pcoll not in computed ] all_cached = set(str(c.to_key()) for c in cacheables) source_pcolls = getattr(cache_manager, 'capture_keys', set()) to_clear = all_cached - source_pcolls self._clear_pcolls(cache_manager, set(to_clear)) def _clear_pcolls(self, cache_manager, pcolls): for pc in pcolls: cache_manager.clear('full', pc) def clear(self): # type: () -> None """Clears all cached PCollections for this RecordingManager.""" cache_manager = ie.current_env().get_cache_manager(self.user_pipeline) if cache_manager: cache_manager.cleanup() def cancel(self): # type: (None) -> None """Cancels the current background recording job.""" bcj.attempt_to_cancel_background_caching_job(self.user_pipeline) for r in self._recordings: r.wait_until_finish() self._recordings = set() # The recordings rely on a reference to the BCJ to correctly finish. So we # evict the BCJ after they complete. ie.current_env().evict_background_caching_job(self.user_pipeline) def describe(self): # type: () -> dict[str, int] """Returns a dictionary describing the cache and recording.""" cache_manager = ie.current_env().get_cache_manager(self.user_pipeline) capture_size = getattr(cache_manager, 'capture_size', 0) descriptions = [r.describe() for r in self._recordings] size = sum(d['size'] for d in descriptions) + capture_size start = self._start_time_sec bcj = ie.current_env().get_background_caching_job(self.user_pipeline) if bcj: state = bcj.state else: state = PipelineState.STOPPED return { 'size': size, 'start': start, 'state': state, 'pipeline_var': self.pipeline_var } def record_pipeline(self): # type: () -> bool """Starts a background caching job for this RecordingManager's pipeline.""" runner = self.user_pipeline.runner if isinstance(runner, ir.InteractiveRunner): runner = runner._underlying_runner # Make sure that sources without a user reference are still cached. ie.current_env().add_user_pipeline(self.user_pipeline) pi.watch_sources(self.user_pipeline) # Attempt to run background caching job to record any sources. if ie.current_env().is_in_ipython: warnings.filterwarnings( 'ignore', 'options is deprecated since First stable release. References to ' '<pipeline>.options will not be supported', category=DeprecationWarning) if bcj.attempt_to_run_background_caching_job( runner, self.user_pipeline, options=self.user_pipeline.options, limiters=self._test_limiters): self._start_time_sec = time.time() return True return False def record(self, pcolls, max_n, max_duration): # type: (List[beam.pvalue.PCollection], int, Union[int,str]) -> Recording """Records the given PCollections.""" # Assert that all PCollection come from the same user_pipeline. for pcoll in pcolls: assert pcoll.pipeline is self.user_pipeline, ( '{} belongs to a different user-defined pipeline ({}) than that of' ' other PCollections ({}).'.format( pcoll, pcoll.pipeline, self.user_pipeline)) if isinstance(max_duration, str) and max_duration != 'inf': max_duration_secs = pd.to_timedelta(max_duration).total_seconds() else: max_duration_secs = max_duration # Make sure that all PCollections to be shown are watched. If a PCollection # has not been watched, make up a variable name for that PCollection and # watch it. No validation is needed here because the watch logic can handle # arbitrary variables. self._watch(pcolls) pipeline_instrument = pi.PipelineInstrument(self.user_pipeline) self.record_pipeline() # Get the subset of computed PCollections. These do not to be recomputed. computed_pcolls = set( pcoll for pcoll in pcolls if pcoll in ie.current_env().computed_pcollections) # Start a pipeline fragment to start computing the PCollections. uncomputed_pcolls = set(pcolls).difference(computed_pcolls) if uncomputed_pcolls: # Clear the cache of the given uncomputed PCollections because they are # incomplete. self._clear(pipeline_instrument) warnings.filterwarnings( 'ignore', 'options is deprecated since First stable release. References to ' '<pipeline>.options will not be supported', category=DeprecationWarning) pf.PipelineFragment(list(uncomputed_pcolls), self.user_pipeline.options).run() result = ie.current_env().pipeline_result(self.user_pipeline) else: result = None recording = Recording( self.user_pipeline, pcolls, result, pipeline_instrument, max_n, max_duration_secs) self._recordings.add(recording) return recording
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for debug_gradients module.""" import tempfile from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import debug_gradients from tensorflow.python.debug.lib import debug_utils from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training import gradient_descent @test_util.run_v1_only("Sessions are not available in TF 2.x") class IdentifyGradientTest(test_util.TensorFlowTestCase): def setUp(self): rewriter_config = rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF) graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config) config = config_pb2.ConfigProto(graph_options=graph_options) self.sess = session.Session(config=config) with self.sess.as_default(): self.u = variables.Variable(2.0, name="u") self.v = variables.Variable(3.0, name="v") self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w") def tearDown(self): ops.reset_default_graph() debug_gradients.clear_gradient_debuggers() def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self): grad_debugger = debug_gradients.GradientsDebugger() id_grad_w = grad_debugger.identify_gradient(self.w) y = math_ops.add(id_grad_w, -1.0, name="y") grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] v_grad = grads[1] self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0, self.sess.run(y)) self.assertAllClose(3.0, self.sess.run(u_grad)) self.assertAllClose(2.0, self.sess.run(v_grad)) # Fetch the gradient tensor with the x-tensor object. w_grad = grad_debugger.gradient_tensor(self.w) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) # Fetch the gradient tensor with the x-tensor's name. w_grad = grad_debugger.gradient_tensor(self.w.name) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) # Fetch the gradient tensor with the x-tensor name. w_grad = grad_debugger.gradient_tensor(self.w.name) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self): grad_debugger = debug_gradients.GradientsDebugger() id_grad_w = grad_debugger.identify_gradient(self.w) y = math_ops.add(id_grad_w, -1.0, name="y") with grad_debugger: grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] v_grad = grads[1] self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0, self.sess.run(y)) self.assertAllClose(3.0, self.sess.run(u_grad)) self.assertAllClose(2.0, self.sess.run(v_grad)) # Fetch the gradient tensor with the x-tensor object. w_grad = grad_debugger.gradient_tensor(self.w) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) # Fetch the gradient tensor with the x-tensor's name. w_grad = grad_debugger.gradient_tensor(self.w.name) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) # Fetch the gradient tensor with the x-tensor name. w_grad = grad_debugger.gradient_tensor(self.w.name) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self): grad_debugger = debug_gradients.GradientsDebugger() grad_debugger.identify_gradient(self.w) with self.assertRaisesRegex(ValueError, "The graph already contains an op named .*"): grad_debugger.identify_gradient(self.w) def testIdentifyGradientWorksOnMultipleLosses(self): grad_debugger_1 = debug_gradients.GradientsDebugger() grad_debugger_2 = debug_gradients.GradientsDebugger() y = math_ops.add(self.w, -1.0, name="y") debug_y = grad_debugger_1.identify_gradient(y) z1 = math_ops.square(debug_y, name="z1") debug_y = grad_debugger_2.identify_gradient(y) z2 = math_ops.sqrt(debug_y, name="z2") with grad_debugger_1: gradient_descent.GradientDescentOptimizer(0.1).minimize(z1) with grad_debugger_2: gradient_descent.GradientDescentOptimizer(0.1).minimize(z2) dz1_dy = grad_debugger_1.gradient_tensor(y) dz2_dy = grad_debugger_2.gradient_tensor(y) self.assertIsInstance(dz1_dy, ops.Tensor) self.assertIsInstance(dz2_dy, ops.Tensor) self.assertIsNot(dz1_dy, dz2_dy) self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0**2, self.sess.run(z1)) self.assertAllClose(5.0**0.5, self.sess.run(z2)) self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy)) self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy)) def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self): grad_debugger_1 = debug_gradients.GradientsDebugger() grad_debugger_2 = debug_gradients.GradientsDebugger() id_grad_w = grad_debugger_1.identify_gradient(self.w) y = math_ops.add(id_grad_w, -1.0, name="y") # There are >1 gradient debuggers registered, and grad_debugger is not used # as a context manager here, so the gradient w.r.t. self.w will not be # registered. gradients_impl.gradients(y, [self.u, self.v]) with self.assertRaisesRegex( LookupError, r"This GradientsDebugger has not received any gradient tensor for "): grad_debugger_1.gradient_tensor(self.w) with self.assertRaisesRegex( LookupError, r"This GradientsDebugger has not received any gradient tensor for "): grad_debugger_2.gradient_tensor(self.w) def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self): grad_debugger = debug_gradients.GradientsDebugger() with self.assertRaisesRegex( TypeError, r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead " r"has type .*Operation.*"): grad_debugger.gradient_tensor(variables.global_variables_initializer()) def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self): grad_debugger = debug_gradients.GradientsDebugger() id_grad_w = grad_debugger.identify_gradient(self.w) y = math_ops.add(id_grad_w, -1.0, name="y") with grad_debugger: gradient_descent.GradientDescentOptimizer(0.1).minimize(y) self.sess.run(variables.global_variables_initializer()) # Fetch the gradient tensor with the x-tensor object. w_grad = grad_debugger.gradient_tensor(self.w) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) def testWatchGradientsByXTensorNamesWorks(self): y = math_ops.add(self.w, -1.0, name="y") # The constructrion of the forward graph has completed. # But we can still get the gradient tensors by using # watch_gradients_by_tensor_names(). grad_debugger = debug_gradients.GradientsDebugger() with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"): grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] v_grad = grads[1] self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0, self.sess.run(y)) self.assertAllClose(3.0, self.sess.run(u_grad)) self.assertAllClose(2.0, self.sess.run(v_grad)) w_grad = grad_debugger.gradient_tensor(self.w) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) w_grad = grad_debugger.gradient_tensor("w:0") self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self): y = math_ops.add(self.w, -1.0, name="y") # The constructrion of the forward graph has completed. # But we can still get the gradient tensors by using # watch_gradients_by_tensor_names(). grad_debugger = debug_gradients.GradientsDebugger() grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$") grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] v_grad = grads[1] self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0, self.sess.run(y)) self.assertAllClose(3.0, self.sess.run(u_grad)) self.assertAllClose(2.0, self.sess.run(v_grad)) w_grad = grad_debugger.gradient_tensor(self.w) self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) w_grad = grad_debugger.gradient_tensor("w:0") self.assertIsInstance(w_grad, ops.Tensor) self.assertAllClose(1.0, self.sess.run(w_grad)) def testWatchGradientsWorksOnRefTensor(self): y = math_ops.add(self.w, -1.0, name="y") grad_debugger = debug_gradients.GradientsDebugger() with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"): grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] v_grad = grads[1] self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0")) self.sess.run(variables.global_variables_initializer()) self.assertAllClose(3.0, self.sess.run(u_grad)) self.assertAllClose(2.0, self.sess.run(v_grad)) self.assertAllClose(3.0, self.sess.run( grad_debugger.gradient_tensor("u:0"))) def testWatchGradientsWorksOnMultipleTensors(self): y = math_ops.add(self.w, -1.0, name="y") grad_debugger = debug_gradients.GradientsDebugger() with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "(u|w):0$"): grads = gradients_impl.gradients(y, [self.u, self.v]) self.assertEqual(2, len(grads)) u_grad = grads[0] self.assertEqual(2, len(grad_debugger.gradient_tensors())) self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0")) self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), ops.Tensor) self.sess.run(variables.global_variables_initializer()) self.assertAllClose(1.0, self.sess.run( grad_debugger.gradient_tensor("w:0"))) self.assertAllClose(3.0, self.sess.run( grad_debugger.gradient_tensor("u:0"))) def testWatchGradientsByXTensorsWorks(self): y = math_ops.add(self.w, -1.0, name="foo/y") z = math_ops.square(y, name="foo/z") # The constructrion of the forward graph has completed. # But we can still get the gradient tensors by using # watch_gradients_by_x_tensors(). grad_debugger = debug_gradients.GradientsDebugger() with grad_debugger.watch_gradients_by_tensors(self.sess.graph, [self.w, self.u, y]): gradient_descent.GradientDescentOptimizer(0.1).minimize(z) self.assertEqual(3, len(grad_debugger.gradient_tensors())) u_grad = grad_debugger.gradient_tensor(self.u) w_grad = grad_debugger.gradient_tensor(self.w) y_grad = grad_debugger.gradient_tensor(y) self.sess.run(variables.global_variables_initializer()) self.assertAllClose(10.0, self.sess.run(y_grad)) self.assertAllClose(10.0, self.sess.run(w_grad)) self.assertAllClose(30.0, self.sess.run(u_grad)) def testWatchGradientsByTensorCanWorkOnMultipleLosses(self): y = math_ops.add(self.w, -1.0, name="y") z1 = math_ops.square(y, name="z1") z2 = math_ops.sqrt(y, name="z2") grad_debugger_1 = debug_gradients.GradientsDebugger() with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y): gradient_descent.GradientDescentOptimizer(0.1).minimize(z1) grad_debugger_2 = debug_gradients.GradientsDebugger() with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y): gradient_descent.GradientDescentOptimizer(0.1).minimize(z2) dz1_dy = grad_debugger_1.gradient_tensor(y) dz2_dy = grad_debugger_2.gradient_tensor(y) self.assertIsInstance(dz1_dy, ops.Tensor) self.assertIsInstance(dz2_dy, ops.Tensor) self.assertIsNot(dz1_dy, dz2_dy) self.sess.run(variables.global_variables_initializer()) self.assertAllClose(5.0**2, self.sess.run(z1)) self.assertAllClose(5.0**0.5, self.sess.run(z2)) self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy)) self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy)) def testGradientsValuesFromDumpWorks(self): y = math_ops.add(self.w, -1.0, name="y") z = math_ops.square(y, name="z") grad_debugger = debug_gradients.GradientsDebugger() with grad_debugger.watch_gradients_by_tensors(self.sess.graph, [self.w, self.u, y]): train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z) self.sess.run(variables.global_variables_initializer()) run_options = config_pb2.RunOptions(output_partition_graphs=True) dump_dir = tempfile.mkdtemp() debug_url = "file://" + dump_dir debug_utils.watch_graph(run_options, self.sess.graph, debug_urls=debug_url) run_metadata = config_pb2.RunMetadata() self.assertAllClose(2.0, self.sess.run(self.u)) self.sess.run(train_op, options=run_options, run_metadata=run_metadata) self.assertAllClose(-1.0, self.sess.run(self.u)) dump = debug_data.DebugDumpDir( dump_dir, partition_graphs=run_metadata.partition_graphs) dump.set_python_graph(self.sess.graph) y_grad_values = debug_gradients.gradient_values_from_dump( grad_debugger, y, dump) self.assertEqual(1, len(y_grad_values)) self.assertAllClose(10.0, y_grad_values[0]) w_grad_values = debug_gradients.gradient_values_from_dump( grad_debugger, self.w, dump) self.assertEqual(1, len(w_grad_values)) self.assertAllClose(10.0, w_grad_values[0]) u_grad_values = debug_gradients.gradient_values_from_dump( grad_debugger, self.u, dump) self.assertEqual(1, len(u_grad_values)) self.assertAllClose(30.0, u_grad_values[0]) with self.assertRaisesRegex( LookupError, r"This GradientsDebugger has not received any gradient tensor for " r"x-tensor v:0"): debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump) # Cleanup. file_io.delete_recursively(dump_dir) if __name__ == "__main__": googletest.main()
"""The tests for the REST switch platform.""" import asyncio import aiohttp from homeassistant.components.rest import DOMAIN import homeassistant.components.rest.switch as rest from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN from homeassistant.const import ( CONF_HEADERS, CONF_NAME, CONF_PARAMS, CONF_PLATFORM, CONF_RESOURCE, CONTENT_TYPE_JSON, HTTP_INTERNAL_SERVER_ERROR, HTTP_NOT_FOUND, HTTP_OK, ) from homeassistant.helpers.template import Template from homeassistant.setup import async_setup_component from tests.common import assert_setup_component """Tests for setting up the REST switch platform.""" NAME = "foo" METHOD = "post" RESOURCE = "http://localhost/" STATE_RESOURCE = RESOURCE HEADERS = {"Content-type": CONTENT_TYPE_JSON} AUTH = None PARAMS = None async def test_setup_missing_config(hass): """Test setup with configuration missing required entries.""" assert not await rest.async_setup_platform(hass, {CONF_PLATFORM: DOMAIN}, None) async def test_setup_missing_schema(hass): """Test setup with resource missing schema.""" assert not await rest.async_setup_platform( hass, {CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "localhost"}, None, ) async def test_setup_failed_connect(hass, aioclient_mock): """Test setup when connection error occurs.""" aioclient_mock.get("http://localhost", exc=aiohttp.ClientError) assert not await rest.async_setup_platform( hass, {CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost"}, None, ) async def test_setup_timeout(hass, aioclient_mock): """Test setup when connection timeout occurs.""" aioclient_mock.get("http://localhost", exc=asyncio.TimeoutError()) assert not await rest.async_setup_platform( hass, {CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost"}, None, ) async def test_setup_minimum(hass, aioclient_mock): """Test setup with minimum configuration.""" aioclient_mock.get("http://localhost", status=HTTP_OK) with assert_setup_component(1, SWITCH_DOMAIN): assert await async_setup_component( hass, SWITCH_DOMAIN, { SWITCH_DOMAIN: { CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost", } }, ) await hass.async_block_till_done() assert aioclient_mock.call_count == 1 async def test_setup_query_params(hass, aioclient_mock): """Test setup with query params.""" aioclient_mock.get("http://localhost/?search=something", status=HTTP_OK) with assert_setup_component(1, SWITCH_DOMAIN): assert await async_setup_component( hass, SWITCH_DOMAIN, { SWITCH_DOMAIN: { CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost", CONF_PARAMS: {"search": "something"}, } }, ) await hass.async_block_till_done() print(aioclient_mock) assert aioclient_mock.call_count == 1 async def test_setup(hass, aioclient_mock): """Test setup with valid configuration.""" aioclient_mock.get("http://localhost", status=HTTP_OK) assert await async_setup_component( hass, SWITCH_DOMAIN, { SWITCH_DOMAIN: { CONF_PLATFORM: DOMAIN, CONF_NAME: "foo", CONF_RESOURCE: "http://localhost", CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON}, rest.CONF_BODY_ON: "custom on text", rest.CONF_BODY_OFF: "custom off text", } }, ) await hass.async_block_till_done() assert aioclient_mock.call_count == 1 assert_setup_component(1, SWITCH_DOMAIN) async def test_setup_with_state_resource(hass, aioclient_mock): """Test setup with valid configuration.""" aioclient_mock.get("http://localhost", status=HTTP_NOT_FOUND) aioclient_mock.get("http://localhost/state", status=HTTP_OK) assert await async_setup_component( hass, SWITCH_DOMAIN, { SWITCH_DOMAIN: { CONF_PLATFORM: DOMAIN, CONF_NAME: "foo", CONF_RESOURCE: "http://localhost", rest.CONF_STATE_RESOURCE: "http://localhost/state", CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON}, rest.CONF_BODY_ON: "custom on text", rest.CONF_BODY_OFF: "custom off text", } }, ) await hass.async_block_till_done() assert aioclient_mock.call_count == 1 assert_setup_component(1, SWITCH_DOMAIN) """Tests for REST switch platform.""" def _setup_test_switch(hass): body_on = Template("on", hass) body_off = Template("off", hass) switch = rest.RestSwitch( NAME, RESOURCE, STATE_RESOURCE, METHOD, HEADERS, PARAMS, AUTH, body_on, body_off, None, 10, True, ) switch.hass = hass return switch, body_on, body_off def test_name(hass): """Test the name.""" switch, body_on, body_off = _setup_test_switch(hass) assert NAME == switch.name def test_is_on_before_update(hass): """Test is_on in initial state.""" switch, body_on, body_off = _setup_test_switch(hass) assert switch.is_on is None async def test_turn_on_success(hass, aioclient_mock): """Test turn_on.""" aioclient_mock.post(RESOURCE, status=HTTP_OK) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_on() assert body_on.template == aioclient_mock.mock_calls[-1][2].decode() assert switch.is_on async def test_turn_on_status_not_ok(hass, aioclient_mock): """Test turn_on when error status returned.""" aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_on() assert body_on.template == aioclient_mock.mock_calls[-1][2].decode() assert switch.is_on is None async def test_turn_on_timeout(hass, aioclient_mock): """Test turn_on when timeout occurs.""" aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_on() assert switch.is_on is None async def test_turn_off_success(hass, aioclient_mock): """Test turn_off.""" aioclient_mock.post(RESOURCE, status=HTTP_OK) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_off() assert body_off.template == aioclient_mock.mock_calls[-1][2].decode() assert not switch.is_on async def test_turn_off_status_not_ok(hass, aioclient_mock): """Test turn_off when error status returned.""" aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_off() assert body_off.template == aioclient_mock.mock_calls[-1][2].decode() assert switch.is_on is None async def test_turn_off_timeout(hass, aioclient_mock): """Test turn_off when timeout occurs.""" aioclient_mock.post(RESOURCE, exc=asyncio.TimeoutError()) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_turn_on() assert switch.is_on is None async def test_update_when_on(hass, aioclient_mock): """Test update when switch is on.""" switch, body_on, body_off = _setup_test_switch(hass) aioclient_mock.get(RESOURCE, text=body_on.template) await switch.async_update() assert switch.is_on async def test_update_when_off(hass, aioclient_mock): """Test update when switch is off.""" switch, body_on, body_off = _setup_test_switch(hass) aioclient_mock.get(RESOURCE, text=body_off.template) await switch.async_update() assert not switch.is_on async def test_update_when_unknown(hass, aioclient_mock): """Test update when unknown status returned.""" aioclient_mock.get(RESOURCE, text="unknown status") switch, body_on, body_off = _setup_test_switch(hass) await switch.async_update() assert switch.is_on is None async def test_update_timeout(hass, aioclient_mock): """Test update when timeout occurs.""" aioclient_mock.get(RESOURCE, exc=asyncio.TimeoutError()) switch, body_on, body_off = _setup_test_switch(hass) await switch.async_update() assert switch.is_on is None
# Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron import context from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db from neutron.db import models_v2 from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import l3 from neutron.openstack.common import uuidutils from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron.tests.unit.extensions import test_l3 as test_l3_plugin from neutron.tests.unit import testlib_api from networking_brocade.vyatta.common import utils as vyatta_utils from networking_brocade.vyatta.vrouter import neutron_plugin as vrouter_plugin _uuid = uuidutils.generate_uuid class FakeVRouterDriver(mock.Mock): def create_router(self, *args, **kwargs): return _uuid() class VRouterTestPlugin(vrouter_plugin.VyattaVRouterMixin, db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin): def delete_port(self, context, port_id, l3_port_check=False): super(VRouterTestPlugin, self).delete_port(context, port_id) class TestVyattaVRouterPlugin(testlib_api.SqlTestCase): def setUp(self): super(TestVyattaVRouterPlugin, self).setUp() self.setup_coreplugin(__name__ + '.' + VRouterTestPlugin.__name__) self._mock('eventlet.greenthread.sleep') self.driver = mock.Mock(wraps=FakeVRouterDriver()) fake_driver_mock = mock.Mock() fake_driver_mock.return_value = self.driver self._mock( 'networking_brocade.vyatta.vrouter.driver.VyattaVRouterDriver', fake_driver_mock) self.context = context.get_admin_context() self.plugin = VRouterTestPlugin() session = self.context.session with session.begin(subtransactions=True): self.ext_net = self._make_net('ext', is_external=True) self.ext_subnet = self._make_subnet( 'ext', '10.10.10', self.ext_net['id']) self.ext_port = self._make_port('f0', self.ext_net['id']) self._make_fixed_ip( self.ext_port['id'], self.ext_net['id'], self.ext_subnet['id'], '10.10.10.22') def _mock(self, target, new=mock.DEFAULT): patcher = mock.patch(target, new) return patcher.start() def _mock_object(self, target, attribute, new=mock.DEFAULT): patcher = mock.patch.object(target, attribute, new) return patcher.start() def _make_net(self, n, is_shared=False, is_external=False): session = self.context.session network = models_v2.Network(tenant_id='fake-tenant-id', name='test-network-{0}'.format(n), status='ACTIVE', admin_state_up=True, shared=is_shared) session.add(network) session.flush() if is_external: extnet = external_net_db.ExternalNetwork( network_id=network['id']) session.add(extnet) session.flush() return network def _make_subnet(self, n, cidr_prefix, network_id): session = self.context.session subnet = models_v2.Subnet(tenant_id='fake-tenant-id', name='test-subnet-{0}'.format(n), network_id=network_id, ip_version=4, cidr='{0}.0/24'.format(cidr_prefix), gateway_ip='{0}.1'.format(cidr_prefix), enable_dhcp=True, shared=False) session.add(subnet) session.flush() ippool = models_v2.IPAllocationPool( subnet_id=subnet['id'], first_ip='{0}.1'.format(cidr_prefix), last_ip='{0}.254'.format(cidr_prefix)) session.add(ippool) session.flush() iprange = models_v2.IPAvailabilityRange( allocation_pool_id=ippool['id'], first_ip='{0}.1'.format(cidr_prefix), last_ip='{0}.254'.format(cidr_prefix)) session.add(iprange) session.flush() return subnet def _make_fixed_ip(self, port_id, network_id, subnet_id, ip): session = self.context.session ip_allocation = models_v2.IPAllocation( port_id=port_id, ip_address=ip, subnet_id=subnet_id, network_id=network_id) session.add(ip_allocation) session.flush() return ip_allocation def _make_port(self, port, network_id, device_id=None, device_owner=None): session = self.context.session port = models_v2.Port(tenant_id='fake-tenant-id', name='', network_id=network_id, mac_address='aa:bb:cc:dd:ee:{0}'.format(port), admin_state_up=True, status='ACTIVE', device_id=device_id or '', device_owner=device_owner or '') session.add(port) session.flush() return port def test_create_router(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} result = self.plugin.create_router(self.context, router_data) self.assertTrue(uuidutils.is_uuid_like(result.get('id'))) self.driver.create_router.assert_called_once_with(mock.ANY) def test_update_router1(self): router_data = { 'router': { 'name': 'test_router1', 'admin_state_up': True, 'external_gateway_info': {}, } } router = self.plugin.create_router(self.context, router_data) router_new = self.plugin.update_router(self.context, router['id'], { 'router': { 'name': 'router2', 'external_gateway_info': {}, } }) self.assertEqual(router_new['name'], 'router2') def test_update_router2(self): self._mock_object(self.plugin, '_validate_routes_nexthop') router_data = { 'router': { 'name': 'test_router2', 'admin_state_up': True, 'external_gateway_info': {}, }, } router = self.plugin.create_router(self.context, router_data) routes = [ {'destination': '10.1.0.0/24', 'nexthop': '192.168.1.1'}, {'destination': '10.2.0.0/24', 'nexthop': '192.168.1.1'}, {'destination': '10.3.0.0/24', 'nexthop': '192.168.1.1'} ] set_routes = [] update_data = { 'router': { 'id': router['id'], 'routes': set_routes, }, } RouteRule = vyatta_utils.RouteRule for rules_add in routes: rules_add = [rules_add] set_routes.extend(rules_add) rules_del = set_routes[:-2] set_routes[:-2] = [] self.plugin.update_router(self.context, router['id'], update_data) rules_add = tuple(RouteRule(dest_cidr=x['destination'], next_hop=x['nexthop']) for x in rules_add) rules_del = tuple(RouteRule(dest_cidr=x['destination'], next_hop=x['nexthop']) for x in rules_del) self.driver.update_static_routes.assert_called_once_with( self.context, router['id'], rules_add, rules_del) self.driver.reset_mock() def test_get_router(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} router = self.plugin.create_router(self.context, router_data) router = self.plugin.get_router(self.context, router['id']) self.assertTrue(uuidutils.is_uuid_like(router.get('id'))) self.assertRaises(l3.RouterNotFound, self.plugin.get_router, self.context, uuidutils.generate_uuid()) def test_delete_router(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} router = self.plugin.create_router(self.context, router_data) self.plugin.delete_router(self.context, router['id']) self.driver.delete_router.assert_called_once_with( self.context, router['id']) self.assertRaises( l3.RouterNotFound, self.plugin.delete_router, self.context, router['id']) def test_router_interface_by_subnet(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} router = self.plugin.create_router(self.context, router_data) result = self.plugin.add_router_interface(self.context, router['id'], { 'subnet_id': self.ext_subnet['id'], }) self.driver.attach_interface.assert_called_once_with( self.context, router['id'], result['port_id']) result = self.plugin.remove_router_interface( self.context, router['id'], { 'subnet_id': self.ext_subnet['id']}) self.driver.detach_interface.assert_called_once_with( self.context, router['id'], result['port_id']) def test_router_interface_by_port(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} router = self.plugin.create_router(self.context, router_data) self.plugin.add_router_interface(self.context, router['id'], { 'port_id': self.ext_port['id'], }) self.driver.attach_interface.assert_called_once_with( self.context, router['id'], self.ext_port['id']) self.plugin.remove_router_interface( self.context, router['id'], { 'port_id': self.ext_port['id'] }) self.driver.detach_interface.assert_called_once_with( self.context, router['id'], self.ext_port['id']) def test_floatingip(self): router_data = { 'router': {'name': 'test_router1', 'admin_state_up': True}} router = self.plugin.create_router(self.context, router_data) floatingip = self.plugin.create_floatingip( self.context, {'floatingip': {'floating_network_id': self.ext_net['id']}}) self.addCleanup(self.plugin.delete_floatingip, self.context, floatingip['id']) self.assertTrue( floatingip['floating_ip_address'].startswith('10.10.10.')) self.plugin.associate_floatingip( self.context, router['id'], floatingip) self.driver.assign_floating_ip.assert_called_once_with( self.context, router['id'], floatingip['floating_ip_address'], None) self.plugin.disassociate_floatingip( self.context, router['id'], floatingip) self.driver.unassign_floating_ip.assert_called_once_with( self.context, router['id'], floatingip['floating_ip_address'], None) self.plugin.update_floatingip(self.context, floatingip['id'], { 'floatingip': { 'router_id': router['id'], }}) self.driver.assign_floating_ip.assert_called_once_with( self.context, router['id'], floatingip['floating_ip_address'], None) CORE_PLUGIN_CLASS = ( "networking_brocade.vyatta.tests.test_vrouter_neutron_plugin" ".TestVRouterNatPlugin") L3_PLUGIN_CLASS = ( "networking_brocade.vyatta.vrouter.neutron_plugin.VyattaVRouterMixin") class TestVRouterNatPlugin(test_l3_plugin.TestL3NatBasePlugin, sg_db_rpc.SecurityGroupServerRpcMixin): supported_extension_aliases = ["external-net"] class VRouterTestCase(test_db_plugin.NeutronDbPluginV2TestCase, test_l3_plugin.L3NatTestCaseBase): def setUp(self, core_plugin=None, l3_plugin=None, ext_mgr=None): if not core_plugin: core_plugin = CORE_PLUGIN_CLASS if not l3_plugin: l3_plugin = L3_PLUGIN_CLASS service_plugins = {'l3_plugin_name': l3_plugin} self._mock('eventlet.greenthread.sleep') self._mock( 'networking_brocade.vyatta.vrouter.driver.' 'VyattaVRouterDriver', FakeVRouterDriver) cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_override('tenant_id', 'tenant_a', 'VROUTER') super(VRouterTestCase, self).setUp( plugin=core_plugin, service_plugins=service_plugins, ext_mgr=test_l3_plugin.L3TestExtensionManager()) self.setup_notification_driver() def _mock(self, target, new=mock.DEFAULT): patcher = mock.patch(target, new) return patcher.start() def test_router_add_interface_ipv6_subnet(self): self.skipTest("Fails because router port is created with" " empty device owner") def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): self.skipTest("Fails because router port is created with" " empty device owner") def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): self.skipTest("Fails because router port is created with" " empty device owner") def test_router_add_gateway_no_subnet(self): self.skipTest("Skip because it is not supported.") def test_router_specify_id_backend(self): self.skipTest("Router id is autogenerated") def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): self.skipTest("Router external gateway supports only one IP address")
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator, ) from google.cloud.resourcemanager_v3.types import folders class ListFoldersPager: """A pager for iterating through ``list_folders`` requests. This class thinly wraps an initial :class:`google.cloud.resourcemanager_v3.types.ListFoldersResponse` object, and provides an ``__iter__`` method to iterate through its ``folders`` field. If there are more pages, the ``__iter__`` method will make additional ``ListFolders`` requests and continue to iterate through the ``folders`` field on the corresponding responses. All the usual :class:`google.cloud.resourcemanager_v3.types.ListFoldersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., folders.ListFoldersResponse], request: folders.ListFoldersRequest, response: folders.ListFoldersResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.resourcemanager_v3.types.ListFoldersRequest): The initial request object. response (google.cloud.resourcemanager_v3.types.ListFoldersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = folders.ListFoldersRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[folders.ListFoldersResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[folders.Folder]: for page in self.pages: yield from page.folders def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListFoldersAsyncPager: """A pager for iterating through ``list_folders`` requests. This class thinly wraps an initial :class:`google.cloud.resourcemanager_v3.types.ListFoldersResponse` object, and provides an ``__aiter__`` method to iterate through its ``folders`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListFolders`` requests and continue to iterate through the ``folders`` field on the corresponding responses. All the usual :class:`google.cloud.resourcemanager_v3.types.ListFoldersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[folders.ListFoldersResponse]], request: folders.ListFoldersRequest, response: folders.ListFoldersResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.resourcemanager_v3.types.ListFoldersRequest): The initial request object. response (google.cloud.resourcemanager_v3.types.ListFoldersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = folders.ListFoldersRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[folders.ListFoldersResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[folders.Folder]: async def async_generator(): async for page in self.pages: for response in page.folders: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class SearchFoldersPager: """A pager for iterating through ``search_folders`` requests. This class thinly wraps an initial :class:`google.cloud.resourcemanager_v3.types.SearchFoldersResponse` object, and provides an ``__iter__`` method to iterate through its ``folders`` field. If there are more pages, the ``__iter__`` method will make additional ``SearchFolders`` requests and continue to iterate through the ``folders`` field on the corresponding responses. All the usual :class:`google.cloud.resourcemanager_v3.types.SearchFoldersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., folders.SearchFoldersResponse], request: folders.SearchFoldersRequest, response: folders.SearchFoldersResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.resourcemanager_v3.types.SearchFoldersRequest): The initial request object. response (google.cloud.resourcemanager_v3.types.SearchFoldersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = folders.SearchFoldersRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[folders.SearchFoldersResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[folders.Folder]: for page in self.pages: yield from page.folders def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class SearchFoldersAsyncPager: """A pager for iterating through ``search_folders`` requests. This class thinly wraps an initial :class:`google.cloud.resourcemanager_v3.types.SearchFoldersResponse` object, and provides an ``__aiter__`` method to iterate through its ``folders`` field. If there are more pages, the ``__aiter__`` method will make additional ``SearchFolders`` requests and continue to iterate through the ``folders`` field on the corresponding responses. All the usual :class:`google.cloud.resourcemanager_v3.types.SearchFoldersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[folders.SearchFoldersResponse]], request: folders.SearchFoldersRequest, response: folders.SearchFoldersResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.resourcemanager_v3.types.SearchFoldersRequest): The initial request object. response (google.cloud.resourcemanager_v3.types.SearchFoldersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = folders.SearchFoldersRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[folders.SearchFoldersResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[folders.Folder]: async def async_generator(): async for page in self.pages: for response in page.folders: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
from collections import deque import logging log = logging.getLogger(__name__) def table_entry_size(name, value): """ Calculates the size of a single entry This size is mostly irrelevant to us and defined specifically to accommodate memory management for lower level implementions. The 32 extra bytes are considered the "maximum" overhead that would be required to represent each entry in the table. See RFC7541 Section 4.1 """ return 32 + len(name) + len(value) class HeaderTable(object): """ Implements the combined static and dynamic header table The name and value arguments for all the functions should ONLY be byte strings (b'') however this is not strictly enforced in the interface. See RFC7541 Section 2.3 """ #: Default maximum size of the dynamic table. See #: RFC7540 Section 6.5.2. DEFAULT_SIZE = 4096 #: Constant list of static headers. See RFC7541 Section #: 2.3.1 and Appendix A STATIC_TABLE = ( (b':authority' , b'' ), (b':method' , b'GET' ), (b':method' , b'POST' ), (b':path' , b'/' ), (b':path' , b'/index.html' ), (b':scheme' , b'http' ), (b':scheme' , b'https' ), (b':status' , b'200' ), (b':status' , b'204' ), (b':status' , b'206' ), (b':status' , b'304' ), (b':status' , b'400' ), (b':status' , b'404' ), (b':status' , b'500' ), (b'accept-charset' , b'' ), (b'accept-encoding' , b'gzip, deflate'), (b'accept-language' , b'' ), (b'accept-ranges' , b'' ), (b'accept' , b'' ), (b'access-control-allow-origin' , b'' ), (b'age' , b'' ), (b'allow' , b'' ), (b'authorization' , b'' ), (b'cache-control' , b'' ), (b'content-disposition' , b'' ), (b'content-encoding' , b'' ), (b'content-language' , b'' ), (b'content-length' , b'' ), (b'content-location' , b'' ), (b'content-range' , b'' ), (b'content-type' , b'' ), (b'cookie' , b'' ), (b'date' , b'' ), (b'etag' , b'' ), (b'expect' , b'' ), (b'expires' , b'' ), (b'from' , b'' ), (b'host' , b'' ), (b'if-match' , b'' ), (b'if-modified-since' , b'' ), (b'if-none-match' , b'' ), (b'if-range' , b'' ), (b'if-unmodified-since' , b'' ), (b'last-modified' , b'' ), (b'link' , b'' ), (b'location' , b'' ), (b'max-forwards' , b'' ), (b'proxy-authenticate' , b'' ), (b'proxy-authorization' , b'' ), (b'range' , b'' ), (b'referer' , b'' ), (b'refresh' , b'' ), (b'retry-after' , b'' ), (b'server' , b'' ), (b'set-cookie' , b'' ), (b'strict-transport-security' , b'' ), (b'transfer-encoding' , b'' ), (b'user-agent' , b'' ), (b'vary' , b'' ), (b'via' , b'' ), (b'www-authenticate' , b'' ), ) def __init__(self): self._maxsize = HeaderTable.DEFAULT_SIZE self.resized = False self.dynamic_entries = deque() def get_by_index(self, index): """ Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index. """ index -= 1 if index < 0: return None # TODO throw HPACKException here if index < len(HeaderTable.STATIC_TABLE): return HeaderTable.STATIC_TABLE[index] index -= len(HeaderTable.STATIC_TABLE) if index < len(self.dynamic_entries): return self.dynamic_entries[index] return None # TODO throw HPACKException here def __repr__(self): return "HeaderTable(%d, %s, %r)" % ( self._maxsize, self.resized, self.dynamic_entries ) def add(self, name, value): """ Adds a new entry to the table We reduce the table size if the entry will make the table size greater than maxsize. """ # We just clear the table if the entry is too big if table_entry_size(name, value) > self._maxsize: self.dynamic_entries.clear() # Add new entry if the table actually has a size elif self._maxsize > 0: self.dynamic_entries.appendleft((name, value)) self._shrink() def search(self, name, value): """ Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches. """ offset = len(HeaderTable.STATIC_TABLE) partial = None for (i, (n, v)) in enumerate(HeaderTable.STATIC_TABLE): if n == name: if v == value: return (i + 1, n, v) elif partial is None: partial = (i + 1, n, None) for (i, (n, v)) in enumerate(self.dynamic_entries): if n == name: if v == value: return (i + offset + 1, n, v) elif partial is None: partial = (i + offset + 1, n, None) return partial @property def maxsize(self): return self._maxsize @maxsize.setter def maxsize(self, newmax): newmax = int(newmax) log.debug("Resizing header table to %d from %d", newmax, self._maxsize) oldmax = self._maxsize self._maxsize = newmax self.resized = (newmax != oldmax) if newmax <= 0: self.dynamic_entries.clear() elif oldmax > newmax: self._shrink() def _size(self): """ Calculates the size of the dynamic table. See table_entry_size See RFC7541 Section 4.1 """ return sum(table_entry_size(*entry) for entry in self.dynamic_entries) def _shrink(self): """ Shrinks the dynamic table to be at or below maxsize """ cursize = self._size() while cursize > self._maxsize: (name, value) = self.dynamic_entries.pop() cursize -= table_entry_size(name, value) log.debug("Evicting %s: %s from the header table", name, value)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import time from oslo_concurrency import processutils as putils import six from cinder import exception from cinder.openstack.common import fileutils from cinder.i18n import _, _LI, _LW, _LE from cinder.openstack.common import log as logging from cinder import utils from cinder.volume.targets import iscsi from cinder.volume import utils as vutils LOG = logging.getLogger(__name__) class TgtAdm(iscsi.ISCSITarget): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. """ VOLUME_CONF = """ <target %s> backing-store %s driver iscsi write-cache %s </target> """ VOLUME_CONF_WITH_CHAP_AUTH = """ <target %s> backing-store %s driver iscsi %s write-cache %s </target> """ def __init__(self, *args, **kwargs): super(TgtAdm, self).__init__(*args, **kwargs) self.volumes_dir = self.configuration.safe_get('volumes_dir') def _get_target(self, iqn): (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: parsed = line.split() tid = parsed[1] return tid[:-1] return None def _verify_backing_lun(self, iqn, tid): backing_lun = True capture = False target_info = [] (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line and "Target %s" % tid in line: capture = True if capture: target_info.append(line) if iqn not in line and 'Target ' in line: capture = False if ' LUN: 1' not in target_info: backing_lun = False return backing_lun def _recreate_backing_lun(self, iqn, tid, name, path): LOG.warning(_LW('Attempting recreate of backing lun...')) # Since we think the most common case of this is a dev busy # (create vol from snapshot) we're going to add a sleep here # this will hopefully give things enough time to stabilize # how long should we wait?? I have no idea, let's go big # and error on the side of caution time.sleep(10) try: (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'new', '--mode', 'logicalunit', '--tid', tid, '--lun', '1', '-b', path, run_as_root=True) LOG.debug('StdOut from recreate backing lun: %s' % out) LOG.debug('StdErr from recreate backing lun: %s' % err) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to recover attempt to create " "iscsi backing lun for volume " "id:%(vol_id)s: %(e)s") % {'vol_id': name, 'e': e}) def _iscsi_location(self, ip, target, iqn, lun=None): return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, target, iqn, lun) def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 iscsi_target = 0 # NOTE(jdg): Not used by tgtadm return iscsi_target, lun def _ensure_iscsi_targets(self, context, host): """Ensure that target ids have been created in datastore.""" # NOTE(jdg): tgtadm doesn't use the iscsi_targets table # TODO(jdg): In the future move all of the dependent stuff into the # cooresponding target admin class host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host) if host_iscsi_targets >= self.configuration.iscsi_num_targets: return # NOTE(vish): Target ids start at 1, not 0. target_end = self.configuration.iscsi_num_targets + 1 for target_num in xrange(1, target_end): target = {'host': host, 'target_num': target_num} self.db.iscsi_target_create_safe(context, target) def _get_target_chap_auth(self, name): volumes_dir = self.volumes_dir vol_id = name.split(':')[1] volume_path = os.path.join(volumes_dir, vol_id) try: with open(volume_path, 'r') as f: volume_conf = f.read() except Exception as e: LOG.debug('Failed to open config for %(vol_id)s: %(e)s' % {'vol_id': vol_id, 'e': six.text_type(e)}) return None m = re.search('incominguser (\w+) (\w+)', volume_conf) if m: return (m.group(1), m.group(2)) LOG.debug('Failed to find CHAP auth from config for %s' % vol_id) return None def ensure_export(self, context, volume, volume_path): chap_auth = None old_name = None # FIXME (jdg): This appears to be broken in existing code # we recreate the iscsi target but we pass in None # for CHAP, so we just recreated without CHAP even if # we had it set on initial create iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) iscsi_write_cache = self.configuration.get('iscsi_write_cache', 'on') self.create_iscsi_target( iscsi_name, 1, 0, volume_path, chap_auth, check_exit_code=False, old_name=old_name, iscsi_write_cache=iscsi_write_cache) def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # Note(jdg) tid and lun aren't used by TgtAdm but remain for # compatibility # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078 # for now, since we intermittently hit target already exists we're # adding some debug info to try and pinpoint what's going on (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target', run_as_root=True) LOG.debug("Targets prior to update: %s" % out) fileutils.ensure_tree(self.volumes_dir) vol_id = name.split(':')[1] write_cache = kwargs.get('iscsi_write_cache', 'on') if chap_auth is None: volume_conf = self.VOLUME_CONF % (name, path, write_cache) else: chap_str = re.sub('^IncomingUser ', 'incominguser ', chap_auth) volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name, path, chap_str, write_cache) LOG.debug('Creating iscsi_target for: %s', vol_id) volumes_dir = self.volumes_dir volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): LOG.warning(_LW('Persistence file already exists for volume, ' 'found file at: %s'), volume_path) f = open(volume_path, 'w+') f.write(volume_conf) f.close() LOG.debug(('Created volume path %(vp)s,\n' 'content: %(vc)s'), {'vp': volume_path, 'vc': volume_conf}) old_persist_file = None old_name = kwargs.get('old_name', None) if old_name is not None: LOG.debug('Detected old persistence file for volume ' '%{vol}s at %{old_name}s', {'vol': vol_id, 'old_name': old_name}) old_persist_file = os.path.join(volumes_dir, old_name) try: # With the persistent tgts we create them # by creating the entry in the persist file # and then doing an update to get the target # created. (out, err) = utils.execute('tgt-admin', '--update', name, run_as_root=True) LOG.debug("StdOut from tgt-admin --update: %s", out) LOG.debug("StdErr from tgt-admin --update: %s", err) except putils.ProcessExecutionError as e: if "target already exists" in e.stderr: # Adding the additional Warning message below for a clear # ER marker (Ref bug: #1398078). LOG.warning(_LW('Could not create target because ' 'it already exists for volume: %s'), vol_id) LOG.debug('Exception was: %s', e) pass else: LOG.error(_LE("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Grab targets list for debug # Consider adding a check for lun 0 and 1 for tgtadm # before considering this as valid (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target', run_as_root=True) LOG.debug("Targets after update: %s" % out) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error(_LE("Failed to create iscsi target for volume " "id:%(vol_id)s. Please ensure your tgtd config file " "contains 'include %(volumes_dir)s/*'") % { 'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() # NOTE(jdg): Sometimes we have some issues with the backing lun # not being created, believe this is due to a device busy # or something related, so we're going to add some code # here that verifies the backing lun (lun 1) was created # and we'll try and recreate it if it's not there if not self._verify_backing_lun(iqn, tid): try: self._recreate_backing_lun(iqn, tid, name, path) except putils.ProcessExecutionError: os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Finally check once more and if no go, fail and punt if not self._verify_backing_lun(iqn, tid): os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) if old_persist_file is not None and os.path.exists(old_persist_file): os.unlink(old_persist_file) return tid def create_export(self, context, volume, volume_path): """Creates an export for a logical volume.""" iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, volume['name']) iscsi_target, lun = self._get_target_and_lun(context, volume) # Verify we haven't setup a CHAP creds file already # if DNE no big deal, we'll just create it current_chap_auth = self._get_target_chap_auth(iscsi_name) if current_chap_auth: (chap_username, chap_password) = current_chap_auth else: chap_username = vutils.generate_username() chap_password = vutils.generate_password() chap_auth = self._iscsi_authentication('IncomingUser', chap_username, chap_password) # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need # should clean this all up at some point in the future iscsi_write_cache = self.configuration.get('iscsi_write_cache', 'on') tid = self.create_iscsi_target(iscsi_name, iscsi_target, 0, volume_path, chap_auth, iscsi_write_cache=iscsi_write_cache) data = {} data['location'] = self._iscsi_location( self.configuration.iscsi_ip_address, tid, iscsi_name, lun) LOG.debug('Set provider_location to: %s', data['location']) data['auth'] = self._iscsi_authentication( 'CHAP', chap_username, chap_password) return data def remove_export(self, context, volume): try: iscsi_target = self._get_iscsi_target(context, volume['id']) except exception.NotFound: LOG.info(_LI("Skipping remove_export. No iscsi_target " "provisioned for volume: %s"), volume['id']) return try: # NOTE: provider_location may be unset if the volume hasn't # been exported location = volume['provider_location'].split(' ') iqn = location[1] # ietadm show will exit with an error # this export has already been removed self.show_target(iscsi_target, iqn=iqn) except Exception: LOG.info(_LI("Skipping remove_export. No iscsi_target " "is presently exported for volume: %s"), volume['id']) return self.remove_iscsi_target(iscsi_target, 0, volume['id'], volume['name']) def initialize_connection(self, volume, connector): iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI('Removing iscsi_target for: %s') % vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self.volumes_dir, vol_uuid_file) if not os.path.exists(volume_path): LOG.warning(_LW('Volume path %s does not exist, ' 'nothing to remove.') % volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: # NOTE(vish): --force is a workaround for bug: # https://bugs.launchpad.net/cinder/+bug/1159948 utils.execute('tgt-admin', '--force', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s") % {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): There's a bug in some versions of tgt that # will sometimes fail silently when using the force flag # https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343 # For now work-around by checking if the target was deleted, # if it wasn't, try again without the force. # This will NOT do any good for the case of mutliple sessions # which the force was aded for but it will however address # the cases pointed out in bug: # https://bugs.launchpad.net/cinder/+bug/1304122 if self._get_target(iqn): try: LOG.warning(_LW('Silent failure of target removal ' 'detected, retry....')) utils.execute('tgt-admin', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error(_LE("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s") % {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): This *should* be there still but incase # it's not we don't care, so just ignore it if was # somehow deleted between entry of this method # and here if os.path.exists(volume_path): os.unlink(volume_path) else: LOG.debug('Volume path %s not found at end, ' 'of remove_iscsi_target.' % volume_path) def show_target(self, tid, iqn=None, **kwargs): if iqn is None: raise exception.InvalidParameterValue( err=_('valid iqn needed for show_target')) tid = self._get_target(iqn) if tid is None: raise exception.NotFound()
# Copyright (C) 2013 VMware, Inc # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_log import log as logging from nova import exception from nova.network import linux_net from nova.network import manager from nova.network import model as network_model from nova import utils from novadocker.i18n import _ from novadocker.virt.docker import network from oslo_config import cfg import random # We need config opts from manager, but pep8 complains, this silences it. assert manager CONF = cfg.CONF CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vlan_interface', 'nova.manager') CONF.import_opt('flat_interface', 'nova.manager') CONF.import_opt('network_device_mtu', 'nova.objects.network') LOG = logging.getLogger(__name__) class DockerGenericVIFDriver(object): def plug(self, instance, vif): vif_type = vif['type'] LOG.debug('plug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) if vif_type == network_model.VIF_TYPE_BRIDGE: self.plug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: if self.ovs_hybrid_required(vif): self.plug_ovs_hybrid(instance, vif) else: self.plug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.plug_midonet(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.plug_iovisor(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) def plug_iovisor(self, instance, vif): """Plug docker vif into IOvisor Creates a port on IOvisor and onboards the interface """ if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] iface_id = vif['id'] net_id = vif['network']['id'] tenant_id = instance['project_id'] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'add_port', if_local_name, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'ifup', if_local_name, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network on IOvisor") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance) def plug_ovs(self, instance, vif): if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) linux_net.create_ovs_vif_port(bridge, if_local_name, network.get_ovs_interfaceid(vif), vif['address'], instance['uuid']) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance) def plug_midonet(self, instance, vif): """Plug into MidoNet's network port This accomplishes binding of the vif to a MidoNet virtual port """ if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] port_id = network.get_ovs_interfaceid(vif) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) utils.execute('mm-ctl', '--bind-port', port_id, if_local_name, run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance) def plug_ovs_hybrid(self, instance, vif): """Plug using hybrid strategy Create a per-VIF linux bridge, then link that bridge to the OVS integration bridge via a veth device, setting up the other end of the veth device just like a normal OVS port. Then boot the VIF on the linux bridge. and connect the tap port to linux bridge """ if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] iface_id = self.get_ovs_interfaceid(vif) br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: if not linux_net.device_exists(br_name): utils.execute('brctl', 'addbr', br_name, run_as_root=True) # Incase of failure undo the Steps # Deleting/Undoing the interface will delete all # associated resources undo_mgr.undo_with(lambda: utils.execute( 'brctl', 'delbr', br_name, run_as_root=True)) # LOG.exception('Throw Test exception with bridgename %s' # % br_name) utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True) utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True) utils.execute('tee', ('/sys/class/net/%s/bridge/multicast_snooping' % br_name), process_input='0', run_as_root=True, check_exit_code=[0, 1]) if not linux_net.device_exists(v2_name): linux_net._create_veth_pair(v1_name, v2_name) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', v1_name, run_as_root=True)) utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True) undo_mgr.undo_with(lambda: utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True)) # Deleting/Undoing the interface will delete all # associated resources (remove from the bridge, its # pair, etc...) utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True) linux_net.create_ovs_vif_port(self.get_bridge_name(vif), v2_name, iface_id, vif['address'], instance['uuid']) undo_mgr.undo_with( lambda: utils.execute('ovs-vsctl', 'del-port', self.get_bridge_name(vif), v2_name, run_as_root=True)) utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with( lambda: utils.execute('ip', 'link', 'delete', if_local_name, run_as_root=True)) # Deleting/Undoing the interface will delete all # associated resources (remove from the bridge, its pair, etc...) utils.execute('brctl', 'addif', br_name, if_local_name, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: msg = "Failed to configure Network." \ " Rolling back the network interfaces %s %s %s %s " % ( br_name, if_local_name, v1_name, v2_name) undo_mgr.rollback_and_reraise(msg=msg, instance=instance) # We are creating our own mac's now because the linux bridge interface # takes on the lowest mac that is assigned to it. By using FE range # mac's we prevent the interruption and possible loss of networking # from changing mac addresses. def _fe_random_mac(self): mac = [0xfe, 0xed, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def plug_bridge(self, instance, vif): if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] gateway = network.find_gateway(instance, vif['network']) vlan = vif.get('vlan') if vlan is not None: iface = (CONF.vlan_interface or vif['network'].get_meta('bridge_interface')) linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( vlan, bridge, iface, net_attrs=vif, mtu=vif.get('mtu')) iface = 'vlan%s' % vlan else: iface = (CONF.flat_interface or vif['network'].get_meta('bridge_interface')) LOG.debug('Ensuring bridge for %s - %s' % (iface, bridge)) linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( bridge, iface, net_attrs=vif, gateway=gateway) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) # NOTE(samalba): Deleting the interface will delete all # associated resources (remove from the bridge, its pair, etc...) utils.execute('ip', 'link', 'set', if_local_name, 'address', self._fe_random_mac(), run_as_root=True) utils.execute('brctl', 'addif', bridge, if_local_name, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance) def unplug(self, instance, vif): vif_type = vif['type'] LOG.debug('vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) if vif_type == network_model.VIF_TYPE_BRIDGE: self.unplug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: if self.ovs_hybrid_required(vif): self.unplug_ovs_hybrid(instance, vif) else: self.unplug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.unplug_midonet(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.unplug_iovisor(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) def unplug_iovisor(self, instance, vif): """Unplug vif from IOvisor Offboard an interface and deletes port from IOvisor """ if_local_name = 'tap%s' % vif['id'][:11] iface_id = vif['id'] try: utils.execute('ifc_ctl', 'gateway', 'ifdown', if_local_name, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'del_port', if_local_name, run_as_root=True) linux_net.delete_net_dev(if_local_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ovs(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" try: linux_net.delete_ovs_vif_port(vif['network']['bridge'], vif['devname']) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_midonet(self, instance, vif): """Unplug into MidoNet's network port This accomplishes unbinding of the vif from its MidoNet virtual port """ try: utils.execute('mm-ctl', '--unbind-port', network.get_ovs_interfaceid(vif), run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ovs_hybrid(self, instance, vif): """UnPlug using hybrid strategy Unhook port from OVS, unhook port from bridge, delete bridge, and delete both veth devices. """ try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if linux_net.device_exists(br_name): utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), v2_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_bridge(self, instance, vif): # NOTE(arosen): nothing has to be done in the linuxbridge case # as when the veth is deleted it automatically is removed from # the bridge. pass def attach(self, instance, vif, container_id): vif_type = vif['type'] if_remote_name = 'ns%s' % vif['id'][:11] LOG.debug('attach vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) try: utils.execute('ip', 'link', 'set', if_remote_name, 'netns', container_id, run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', if_remote_name, 'address', vif['address'], run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', if_remote_name, 'up', run_as_root=True) for subnet in vif['network']['subnets']: gateway = network.find_gateway(instance, subnet) ip = network.find_fixed_ip(instance, subnet) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'addr', 'add', ip, 'dev', if_remote_name, run_as_root=True) if gateway is not None: utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'route', 'replace', 'default', 'via', gateway, 'dev', if_remote_name, run_as_root=True) # Setup MTU on if_remote_name is required if it is a non # default value mtu = CONF.network_device_mtu if vif.get('mtu') is not None: mtu = vif.get('mtu') if mtu is not None: utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', if_remote_name, 'mtu', mtu, run_as_root=True) except Exception: LOG.exception("Failed to attach vif") def get_bridge_name(self, vif): return vif['network']['bridge'] def get_ovs_interfaceid(self, vif): return vif.get('ovs_interfaceid') or vif['id'] def get_br_name(self, iface_id): return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN] def get_veth_pair_names(self, iface_id): return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) def ovs_hybrid_required(self, vif): ovs_hybrid_required = self.get_firewall_required(vif) or \ self.get_hybrid_plug_enabled(vif) return ovs_hybrid_required def get_firewall_required(self, vif): if vif.get('details'): enabled = vif['details'].get('port_filter', False) if enabled: return False if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver": return True return False def get_hybrid_plug_enabled(self, vif): if vif.get('details'): return vif['details'].get('ovs_hybrid_plug', False) return False
#!/usr/bin/env python ############################################################################# # dictReader.py # 2015 James A. Stapleton # # This program parses JSON-dumped barcode-hashed short-read dictionaries # created by barcodeHasher.py and sends files to SPAdes or Velvet # for assembly. # # Options: # --makeHistogram: counts the number of reads in each barcode-defined # group, returns a file histogram.txt with one # count per line that can be used to make a # histogram and estimate the sample complexity. # The constant MIN_NUMBER_OF_SEQUENCES defines the # cutoff below which a bin is ignored. # # --runVelvet: creates a fasta file from the reads in each barcode- # defined group and sends it to Velvet for assembly. # # --diginorm: performs digital normalization on each read group before # sending it for assembly. # # --runSpades: creates fasta or fastq files from the reads in each # barcode-defined group and sends them to SPAdes for # assembly. # # --runTruSpades: creates fastq files from the reads in each # barcode-defined group and sends them to truSPAdes # for assembly. # # --HPCC: calls SPAdes with one thread as required by some HPCCs. # # --quality: creates fastq files rather than fasta files and uses # SPAdes's built-in error correction mode. # # --TRUNCATED_BARCODE_LENGTH: BARCODE_LENGTH - BARCODE_TRUNCATE from # the barcodeHasher.py run, default 16 # # --MIN_NUMBER_OF_SEQUENCES: minimum number of reads in a bin to count # in --makeHistogram # # --threads: number of threads to use when running SPAdes, default 8, # --HPCC sets to 1 # # --runCelera: calls the Celera assembler # # --runTadpole: calls the Tadpole assember from bbmap # ############################################################################# from __future__ import division from itertools import izip import argparse import subprocess import os def main(infile, makeHistogram, runVelvet, diginorm, runSpades, runTruSpades, HPCC, quality, TRUNCATED_BARCODE_LENGTH, MIN_NUMBER_OF_SEQUENCES, threads, runTadpole, runCelera): KMER_LENGTH = 99 # for diginorm MIN_CONTIG_LENGTH = 350 # for Velvet if not runVelvet and not makeHistogram and not runSpades and not runTruSpades and not runTadpole and not runCelera: print "Not doing anything!" return 0 if makeHistogram: #histogram = collections.Counter() histoOut = open("histogram.txt", 'w') goodReads = 0 totalReads = 0 if (runVelvet + runSpades + runTruSpades): if os.path.exists('./contig_list.txt'): subprocess.call(['rm', 'contig_list.txt']) if runTruSpades: quality = 1 if not os.path.exists('./truspades_input'): os.makedirs('./truspades_input') if runTadpole or runCelera: quality = 1 with open(infile, 'r') as data: # parse JSON-dumped dictionary (uniqueDict.txt, unpairedDict.txt, etc) # Consider one barcode-defined group at a time seq_list = [] for line in data: if line[-2] == '}': # end of the file break elif line[0] == '{': # start of the file continue # start of a barcode, example: "AAAAAACGTTATGCAG": [ elif line[-2] == '[': barcode = line[5:5+TRUNCATED_BARCODE_LENGTH] # start of a sequence, example: # "GGAAACTATACTAAAACTTGCTAAAAGCCATGATAAACTGAT", elif line[-3] == '"': sequence = line[9:-3] seq_list.append(sequence) # start of the last sequence in a group # no comma at the end, so -2 instead of -3 elif line[-2] == '"': sequence = line[9:-2] seq_list.append(sequence) # end of a barcode-defined group, example: ], elif line[-3] == ']' or line[-2] == ']': complete_list = seq_list seq_list = [] if makeHistogram: # this is the old way of printing a # histogram of barcode abundance: # returns pairs like 100, 20 # but this is hard to visualize # numBarcodes = len(complete_list) # histogram[numBarcodes] += 1 # new way: just print the number # of sequences in the barcode # group to a file, plot a histogram with Python # if len(complete_list) > 10 and len(complete_list) < 10000: if quality: if len(complete_list) > 2 * MIN_NUMBER_OF_SEQUENCES: # divide by two here to get number of PE read pairs print >> histoOut, len(complete_list)/4 totalReads += len(complete_list)/2 if len(complete_list) > 1000: goodReads += 500 elif len(complete_list) > 200: goodReads += len(complete_list)/2 else: if len(complete_list) > MIN_NUMBER_OF_SEQUENCES: # divide by two here to get number of PE read pairs print >> histoOut, len(complete_list)/2 totalReads += len(complete_list) if len(complete_list) > 500: goodReads += 500 elif len(complete_list) > 100: goodReads += len(complete_list) # write sequences from each items[barcode] to a fasta file, # send to assembler, overwrite fasta with next barcode sequnces # only run assembler if there are enough reads # for good coverage if (((not quality) and (len(complete_list) > MIN_NUMBER_OF_SEQUENCES)) or (quality and (len(complete_list) > 2 * MIN_NUMBER_OF_SEQUENCES))): if runVelvet: # open an output file that will be # overwritten each time through # the loop with a fasta list of sequences # to feed to Velvet with open('fasta.txt', 'w') as fasta: i = 1 for seq in complete_list: print >> fasta, ('>' + str(i)) if seq == '': fasta.write('A\n') else: fasta.write(seq+'\n') i += 1 # send fasta to Velveth # Diginorm if diginorm: subprocess.call(["normalize-by-median.py", "-k", "20", "-C", "20", "-N", "4", "-x", "5e8", "fasta.txt"]) subprocess.call(["velveth", "seqData", str(KMER_LENGTH), "fasta.txt.keep"]) else: subprocess.call(["velveth", "seqData", str(KMER_LENGTH), "-shortPaired", "fasta.txt"]) # get back files from Velveth # send these files to Velvetg subprocess.call(["velvetg", "seqData", "-exp_cov", "auto", "-cov_cutoff", 'auto', "-min_contig_lgth", str(MIN_CONTIG_LENGTH)]) # append contigs.fa to a growing file of contigs with open("seqData/contigs.fa", "r") as fin: data = fin.read() with open("contig_list.txt", "a") as fout: fout.write('>' + "Barcode: " + barcode + "\n") fout.write(data + "\n") with open("seqData/stats.txt", "r") as statsin: stats = statsin.read() with open("stats_list.txt", "a") as statsout: statsout.write('>'+"Barcode: " + barcode + "\n") statsout.write(stats + "\n") if runSpades or runTadpole or runCelera: # open an output file that will be # overwritten each time through # the loop with a fasta list of # sequences to feed to Spades if quality: with open('left.fq', 'w') as left,\ open('right.fq', 'w') as right,\ open('unpaired.fq', 'w') as unpaired: i = 1 for seq1, qual1, seq2, qual2 in\ grouper(4, complete_list): if (seq1 == "") and (seq2 == ""): continue elif seq1 == "": unpaired.write('@Seq_ID' + str(i) + '\n') unpaired.write(seq2 + '\n') unpaired.write('+\n') unpaired.write(qual2 + '\n') elif seq2 == "": unpaired.write('@Seq_ID' + str(i) + '\n') unpaired.write(seq1 + '\n') unpaired.write('+\n') unpaired.write(qual1 + '\n') else: left.write('@Seq_ID' + str(i) + '\n') left.write(seq1 + '\n') left.write('+\n') left.write(qual1 + '\n') right.write('@Seq_ID' + str(i) + '\n') right.write(seq2 + '\n') right.write('+\n') right.write(qual2 + '\n') i += 1 else: with open('fasta.fa', 'w') as fasta: i = 1 for seq in complete_list: print >> fasta, ('>' + str(i)) if seq == "": print >> fasta, "A" else: print >> fasta, seq i += 1 if runSpades: # send fasta to Spades if HPCC: threads = 1 if quality: if diginorm: subprocess.call(["normalize-by-median.py", "-k", "21", "-C", "20", "-N", "4", "-x", "5e8", "-p", "-s", "normC20k20.kh", "fastq.fq"]) subprocess.call(["filter-abund.py", "-V", "normC20k20.kh", "fastq.fq.keep"]) subprocess.call(["mv", "fastq.fq.keep", "fastq.fq"]) subprocess.call(["spades.py", "-k", "21,33,55,77,99,127", "-t", str(threads), "--careful", "--sc", "--pe1-12", "fastq.fq", "-o", "spades_output"]) else: subprocess.call(["spades.py", "-k", "21,33,55,77,99,127", "-t", str(threads), "--careful", "--sc", "--pe1-1", "left.fq", "--pe1-2", "right.fq", "--pe1-s", "unpaired.fq", "-o", "spades_output", "--disable-gzip-output"]) else: if diginorm: subprocess.call(["normalize-by-median.py", "-k", "21", "-C", "20", "-N", "4", "-x", "5e8", "-p", "-s", "normC20k20.kh", "fasta.fa"]) subprocess.call(["filter-abund.py", "-V", "normC20k20.kh", "fast.fa.keep"]) subprocess.call(["mv", "fasta.fa.keep", "fasta.fa"]) subprocess.call(["spades.py", "-k", "21,33,55,77,99,127", "-t", str(threads), "--careful", "--sc", "--pe1-12", "fasta.fa", "-o", "spades_output"]) else: subprocess.call(["spades.py", "-k", "21,33,55,77,99,127", "-t", str(threads), "--careful", "--only-assembler", "--sc", "--pe1-12", "fasta.fa", "-o", "spades_output"]) # append contigs.fasta to a growing file of contigs if os.path.exists("./spades_output/contigs.fasta"): with open("./spades_output/contigs.fasta", "r") as fin: data = fin.read() with open('contig_list.txt', 'a') as fout: fout.write('>' + "Barcode: " + barcode + "\n") fout.write(data + "\n") if runTruSpades: # open an output file that will be # overwritten each time through # the loop with a fastq list of # sequences to feed to truSpades with open('./truspades_input/reads_L1_R1.fastq', 'w') as left,\ open('./truspades_input/reads_L1_R2.fastq', 'w') as right: i = 1 for seq1, qual1, seq2, qual2 in\ grouper(4, complete_list): if (seq1 == "") or (seq2 == ""): continue left.write('@Seq_ID' + str(i) + '\n') left.write(seq1 + '\n') left.write('+\n') left.write(qual1 + '\n') right.write('@Seq_ID' + str(i) + '\n') right.write(seq2 + '\n') right.write('+\n') right.write(qual2 + '\n') i += 1 # write dataset file with open('dataset_file.txt', 'w') as dataset_file: current_path = os.path.dirname(os.path.realpath('dataset_file.txt')) dataset_file.write(barcode + ' ' + current_path + '/truspades_input/reads_L1_R1.fastq ' \ + current_path + '/truspades_input/reads_L1_R2.fastq') # run truSpades subprocess.call(["truspades.py", "--dataset", "dataset_file.txt", "-t", str(threads), "-o", "truspades_output"]) # append contigs.fasta to a growing file of contigs if os.path.exists("./truspades_output/TSLR.fasta"): with open("./truspades_output/TSLR.fasta", "r") as fin: data = fin.read() with open('contig_list.txt', 'a') as fout: fout.write('>' + "Barcode: " + barcode + "\n") fout.write(data + "\n") if runTadpole: subprocess.call(["tadpole.sh", "in=left.fq", "in2=right.fq", "overwrite=true", "oute1=correct1.fq", "oute2=correct2.fq", "mode=extend", "el=50", "er=50", "k=62", "ecc=t"]) subprocess.call(["tadpole.sh", "in=correct1.fq", "in2=correct2.fq", "out=tadpole.fa", "overwrite=true", "k=90", "rinse=t", "shave=t", "mincontig=250", "mincov=3", "bm1=8"]) # append contigs.fasta to a growing file of contigs if os.path.exists("tadpole.fa"): with open("tadpole.fa", "r") as fin: data = fin.read() with open('contig_list_tadpole.txt', 'a') as fout: fout.write('>' + "Barcode: " + barcode + "\n") fout.write(data + "\n") if runCelera: with open('leftright.frg', 'w') as outfile: subprocess.call(["fastqToCA", "-insertsize", "250", "25", "-libraryname", "celera", "-technology", "illumina", "-mates", "left.fq,right.fq", "-nonrandom"], stdout=outfile) subprocess.call(["runCA", "-d", "celera", "-p", "celera", "leftright.frg"]) if os.path.exists("celera/9-terminator/celera.utg.fasta"): with open("celera/9-terminator/celera.utg.fasta", "r") as fin: data = fin.read() with open('contig_list_celera.txt', 'a') as fout: fout.write('>' + "Barcode: " + barcode + "\n") fout.write(data + "\n") subprocess.call(["rm", "-r", "celera", "leftright.frg"]) if makeHistogram: # for numBarcodes in histogram: # print >> histoOut, (str(numBarcodes) + ',' # + str(histogram[numBarcodes])) histoOut.close() # del histogram print "efficiency score: ", goodReads/totalReads print "total number of reads: ", totalReads return 0 def grouper(n, iterable): "s -> (s0,s1,...sn-1), (sn,sn+1,...s2n-1), (s2n,s2n+1,...s3n-1), ..." return izip(*[iter(iterable)]*n) # http://stackoverflow.com/questions/4356329/ # creating-a-python-dictionary-from-a-line-of-text/4356415#4356415 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('infile') parser.add_argument('--makeHistogram', action='store_true', default=False, help='counts the number of reads in each barcode-defined group,\ returns a file histogram.txt with one count per line that can be used to\ make a histogram and estimate the sample complexity. The constant\ MIN_NUMBER_OF_SEQUENCES defines the cutoff below which a bin is ignored.') parser.add_argument('--runVelvet', action='store_true', default=False, help='creates a fasta file from the reads in each barcode-defined group and sends it to Velvet for assembly.') parser.add_argument('--diginorm', action='store_true', default=False, help='performs digital normalization on each read group before sending it for assembly.') parser.add_argument('--runSpades', action='store_true', default=False, help='creates fasta or fastq files from the reads in each barcode-defined group and sends them to SPAdes for assembly.') parser.add_argument('--runTruSpades', action='store_true', default=False, help='creates fastq files from the reads in each barcode-defined group and sends them to truSPAdes for assembly.') parser.add_argument('--HPCC', action='store_true', default=False, help='calls SPAdes with one thread as required by some HPCCs.') parser.add_argument('--quality', action='store_true', default=False, help='creates fastq files rather than fasta files and uses SPAdes built-in error correction mode.') parser.add_argument('--TRUNCATED_BARCODE_LENGTH', action='store', dest="TRUNCATED_BARCODE_LENGTH", type=int, default=16, help='BARCODE_LENGTH - BARCODE_TRUNCATE from the barcodeHasher.py run, default 16') parser.add_argument('--MIN_NUMBER_OF_SEQUENCES', action='store', dest="MIN_NUMBER_OF_SEQUENCES", type=int, default=100, help='minimum number of reads in a bin to count in --makeHistogram') parser.add_argument('--threads', action='store', dest="threads", type=int, default=8, help='number of threads to use when running SPAdes, default 8, --HPCC sets to 1') parser.add_argument('--runTadpole', action='store_true', default=False, help='calls the Tadpole assembler from bbmap') parser.add_argument('--runCelera', action='store_true', default=False, help='calls the Celera assembler') args = parser.parse_args() main(args.infile, args.makeHistogram, args.runVelvet, args.diginorm, args.runSpades, args.runTruSpades, args.HPCC, args.quality, args.TRUNCATED_BARCODE_LENGTH, args.MIN_NUMBER_OF_SEQUENCES, args.threads, args.runTadpole, args.runTruSpades)
# -*- coding: utf-8 -*- """ Tests of the neo.core.baseneo.BaseNeo class and related functions """ from datetime import datetime, date, time, timedelta from decimal import Decimal from fractions import Fraction import sys try: import unittest2 as unittest except ImportError: import unittest import numpy as np import quantities as pq try: from IPython.lib.pretty import pretty except ImportError as err: HAVE_IPYTHON = False else: HAVE_IPYTHON = True from neo.core.baseneo import (BaseNeo, _check_annotations, merge_annotations, merge_annotation) from neo.test.tools import assert_arrays_equal if sys.version_info[0] >= 3: _bytes = bytes long = int def bytes(s): return _bytes(s, encoding='ascii') class Test_check_annotations(unittest.TestCase): ''' TestCase to make sure _check_annotations works ''' def setUp(self): self.values = [1, 2.2, 3 + 2j, 'test', r'test', b'test', None, datetime(year=2008, month=12, day=3, hour=10, minute=4), timedelta(weeks=2, days=7, hours=18, minutes=28, seconds=18, milliseconds=28, microseconds=45), time(hour=10, minute=4), Decimal("3.14"), Fraction(13, 21), np.array([1.1, 1.2, 1.3]), np.array([1, 2, 3]), np.array('test', dtype='S'), np.array([True, False])] def test__check_annotations__invalid_ValueError(self): value = set([]) self.assertRaises(ValueError, _check_annotations, value) def test__check_annotations__invalid_dtype_ValueError(self): value = np.array([], dtype='O') self.assertRaises(ValueError, _check_annotations, value) def test__check_annotations__valid_dtypes(self): for value in self.values: _check_annotations(value) def test__check_annotations__list(self): _check_annotations(self.values) def test__check_annotations__tuple(self): _check_annotations(tuple(self.values)) _check_annotations((self.values, self.values)) def test__check_annotations__dict(self): names = ['value%s' % i for i in range(len(self.values))] values = dict(zip(names, self.values)) _check_annotations(values) class TestBaseNeo(unittest.TestCase): ''' TestCase to make sure basic initialization and methods work ''' def test_init(self): '''test to make sure initialization works properly''' base = BaseNeo(name='a base', description='this is a test') self.assertEqual(base.name, 'a base') self.assertEqual(base.description, 'this is a test') self.assertEqual(base.file_origin, None) def test_annotate(self): '''test to make sure annotation works properly''' base = BaseNeo() base.annotate(test1=1, test2=1) result1 = {'test1': 1, 'test2': 1} self.assertDictEqual(result1, base.annotations) base.annotate(test3=2, test4=3) result2 = {'test3': 2, 'test4': 3} result2a = dict(list(result1.items()) + list(result2.items())) self.assertDictContainsSubset(result1, base.annotations) self.assertDictContainsSubset(result2, base.annotations) self.assertDictEqual(result2a, base.annotations) base.annotate(test1=5, test2=8) result3 = {'test1': 5, 'test2': 8} result3a = dict(list(result3.items()) + list(result2.items())) self.assertDictContainsSubset(result2, base.annotations) self.assertDictContainsSubset(result3, base.annotations) self.assertDictEqual(result3a, base.annotations) self.assertNotEqual(base.annotations['test1'], result1['test1']) self.assertNotEqual(base.annotations['test2'], result1['test2']) def test__children(self): base = BaseNeo() self.assertEqual(base._single_parent_objects, ()) self.assertEqual(base._multi_parent_objects, ()) self.assertEqual(base._single_parent_containers, ()) self.assertEqual(base._multi_parent_containers, ()) self.assertEqual(base._parent_objects, ()) self.assertEqual(base._parent_containers, ()) self.assertEqual(base.parents, ()) class Test_BaseNeo_merge_annotations_merge(unittest.TestCase): ''' TestCase to make sure merge_annotations and merge methods work ''' def setUp(self): self.name1 = 'a base 1' self.name2 = 'a base 2' self.description1 = 'this is a test 1' self.description2 = 'this is a test 2' self.base1 = BaseNeo(name=self.name1, description=self.description1) self.base2 = BaseNeo(name=self.name2, description=self.description2) def test_merge_annotations__dict(self): self.base1.annotations = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1', 'val4': [.4], 'val5': {0: 0, 1: {0: 0}}, 'val6': np.array([0, 1, 2])} self.base2.annotations = {'val2': 2.2, 'val3': 'test2', 'val4': [4, 4.4], 'val5': {1: {1: 1}, 2: 2}, 'val6': np.array([4, 5, 6]), 'val7': True} ann1 = self.base1.annotations ann2 = self.base2.annotations ann1c = self.base1.annotations.copy() ann2c = self.base2.annotations.copy() targ = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1;test2', 'val4': [.4, 4, 4.4], 'val5': {0: 0, 1: {0: 0, 1: 1}, 2: 2}, 'val7': True} self.base1.merge_annotations(self.base2) val6t = np.array([0, 1, 2, 4, 5, 6]) val61 = ann1.pop('val6') val61c = ann1c.pop('val6') val62 = ann2.pop('val6') val62c = ann2c.pop('val6') self.assertEqual(ann1, self.base1.annotations) self.assertNotEqual(ann1c, self.base1.annotations) self.assertEqual(ann2c, self.base2.annotations) self.assertEqual(targ, self.base1.annotations) assert_arrays_equal(val61, val6t) self.assertRaises(AssertionError, assert_arrays_equal, val61c, val6t) assert_arrays_equal(val62, val62c) self.assertEqual(self.name1, self.base1.name) self.assertEqual(self.name2, self.base2.name) self.assertEqual(self.description1, self.base1.description) self.assertEqual(self.description2, self.base2.description) def test_merge_annotations__func__dict(self): ann1 = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1', 'val4': [.4], 'val5': {0: 0, 1: {0: 0}}, 'val6': np.array([0, 1, 2])} ann2 = {'val2': 2.2, 'val3': 'test2', 'val4': [4, 4.4], 'val5': {1: {1: 1}, 2: 2}, 'val6': np.array([4, 5, 6]), 'val7': True} ann1c = ann1.copy() ann2c = ann2.copy() targ = {'val0': 'val0', 'val1': 1, 'val2': 2.2, 'val3': 'test1;test2', 'val4': [.4, 4, 4.4], 'val5': {0: 0, 1: {0: 0, 1: 1}, 2: 2}, 'val7': True} res = merge_annotations(ann1, ann2) val6t = np.array([0, 1, 2, 4, 5, 6]) val6r = res.pop('val6') val61 = ann1.pop('val6') val61c = ann1c.pop('val6') val62 = ann2.pop('val6') val62c = ann2c.pop('val6') self.assertEqual(ann1, ann1c) self.assertEqual(ann2, ann2c) self.assertEqual(res, targ) assert_arrays_equal(val6r, val6t) self.assertRaises(AssertionError, assert_arrays_equal, val61, val6t) assert_arrays_equal(val61, val61c) assert_arrays_equal(val62, val62c) def test_merge_annotation__func__str(self): ann1 = 'test1' ann2 = 'test2' targ = 'test1;test2' res = merge_annotation(ann1, ann2) self.assertEqual(res, targ) def test_merge_annotation__func__ndarray(self): ann1 = np.array([0, 1, 2]) ann2 = np.array([4, 5, 6]) ann1c = ann1.copy() ann2c = ann2.copy() targ = np.array([0, 1, 2, 4, 5, 6]) res = merge_annotation(ann1, ann2) assert_arrays_equal(res, targ) assert_arrays_equal(ann1, ann1c) assert_arrays_equal(ann2, ann2c) def test_merge_annotation__func__list(self): ann1 = [0, 1, 2] ann2 = [4, 5, 6] ann1c = ann1[:] ann2c = ann2[:] targ = [0, 1, 2, 4, 5, 6] res = merge_annotation(ann1, ann2) self.assertEqual(res, targ) self.assertEqual(ann1, ann1c) self.assertEqual(ann2, ann2c) def test_merge_annotation__func__dict(self): ann1 = {0: 0, 1: {0: 0}} ann2 = {1: {1: 1}, 2: 2} ann1c = ann1.copy() ann2c = ann2.copy() targ = {0: 0, 1: {0: 0, 1: 1}, 2: 2} res = merge_annotation(ann1, ann2) self.assertEqual(res, targ) self.assertEqual(ann1, ann1c) self.assertEqual(ann2, ann2c) def test_merge_annotation__func__int(self): ann1 = 1 ann2 = 1 ann3 = 3 targ = 1 res = merge_annotation(ann1, ann2) self.assertEqual(res, targ) self.assertRaises(AssertionError, merge_annotation, ann1, ann3) def test_merge_annotation__func__float(self): ann1 = 1.1 ann2 = 1.1 ann3 = 1.3 targ = 1.1 res = merge_annotation(ann1, ann2) self.assertEqual(res, targ) self.assertRaises(AssertionError, merge_annotation, ann1, ann3) def test_merge_annotation__func__bool(self): ann1 = False ann2 = False ann3 = True ann4 = True targ1 = False targ2 = True res1 = merge_annotation(ann1, ann2) res2 = merge_annotation(ann3, ann4) self.assertEqual(res1, targ1) self.assertEqual(res2, targ2) self.assertRaises(AssertionError, merge_annotation, ann1, ann3) self.assertRaises(AssertionError, merge_annotation, ann2, ann4) class TestBaseNeoCoreTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for core built-in python data types ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_python_nonetype(self): '''test to make sure None type data is accepted''' value = None self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_int(self): '''test to make sure int type data is accepted''' value = 10 self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_long(self): '''test to make sure long type data is accepted''' value = long(7) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_float(self): '''test to make sure float type data is accepted''' value = 9.2 self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_complex(self): '''test to make sure complex type data is accepted''' value = complex(23.17, 11.29) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_string(self): '''test to make sure string type data is accepted''' value = 'this is a test' self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_unicode(self): '''test to make sure unicode type data is accepted''' value = u'this is also a test' self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_bytes(self): '''test to make sure bytes type data is accepted''' value = bytes('1,2,3,4,5') self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) class TestBaseNeoStandardLibraryTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for data types from the python standard library that are not core built-in data types ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_python_fraction(self): '''test to make sure Fraction type data is accepted''' value = Fraction(13, 21) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_decimal(self): '''test to make sure Decimal type data is accepted''' value = Decimal("3.14") self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_datetime(self): '''test to make sure datetime type data is accepted''' value = datetime(year=2008, month=12, day=3, hour=10, minute=4) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_date(self): '''test to make sure date type data is accepted''' value = date(year=2008, month=12, day=3) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_time(self): '''test to make sure time type data is accepted''' value = time(hour=10, minute=4) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_timedelta(self): '''test to make sure timedelta type data is accepted''' value = timedelta(weeks=2, days=7, hours=18, minutes=28, seconds=18, milliseconds=28, microseconds=45) self.base.annotate(data=value) result = {'data': value} self.assertEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) class TestBaseNeoContainerTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for data type inside python built-in container types ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_python_list(self): '''test to make sure list type data is accepted''' value = [None, 10, 9.2, complex(23, 11), ['this is a test', bytes('1,2,3,4,5')], [Fraction(13, 21), Decimal("3.14")]] self.base.annotate(data=value) result = {'data': value} self.assertListEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_tuple(self): '''test to make sure tuple type data is accepted''' value = (None, 10, 9.2, complex(23, 11), ('this is a test', bytes('1,2,3,4,5')), (Fraction(13, 21), Decimal("3.14"))) self.base.annotate(data=value) result = {'data': value} self.assertTupleEqual(value, self.base.annotations['data']) self.assertDictEqual(result, self.base.annotations) def test_python_dict(self): '''test to make sure dict type data is accepted''' value = {'NoneType': None, 'int': 10, 'float': 9.2, 'complex': complex(23, 11), 'dict1': {'string': 'this is a test', 'bytes': bytes('1,2,3,4,5')}, 'dict2': {'Fraction': Fraction(13, 21), 'Decimal': Decimal("3.14")}} self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_python_set(self): '''test to make sure set type data is rejected''' value = set([None, 10, 9.2, complex(23, 11)]) self.assertRaises(ValueError, self.base.annotate, data=value) def test_python_frozenset(self): '''test to make sure frozenset type data is rejected''' value = frozenset([None, 10, 9.2, complex(23, 11)]) self.assertRaises(ValueError, self.base.annotate, data=value) def test_python_iter(self): '''test to make sure iter type data is rejected''' value = iter([None, 10, 9.2, complex(23, 11)]) self.assertRaises(ValueError, self.base.annotate, data=value) class TestBaseNeoNumpyArrayTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for numpy arrays ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_numpy_array_int(self): '''test to make sure int type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint(self): '''test to make sure uint type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_int0(self): '''test to make sure int0 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int0) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint0(self): '''test to make sure uint0 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint0) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_int8(self): '''test to make sure int8 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int8) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint8(self): '''test to make sure uint8 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint8) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_int16(self): '''test to make sure int16 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint16(self): '''test to make sure uint16 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_int32(self): '''test to make sure int32 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint32(self): '''test to make sure uint32 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_int64(self): '''test to make sure int64 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.int64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_uint64(self): '''test to make sure uint64 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.uint64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_float(self): '''test to make sure float type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.float) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_floating(self): '''test to make sure floating type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.floating) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_double(self): '''test to make sure double type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.double) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_float16(self): '''test to make sure float16 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.float16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_float32(self): '''test to make sure float32 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.float32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_float64(self): '''test to make sure float64 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.float64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) @unittest.skipUnless(hasattr(np, "float128"), "float128 not available") def test_numpy_array_float128(self): '''test to make sure float128 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.float128) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_complex(self): '''test to make sure complex type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.complex) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_complex64(self): '''test to make sure complex64 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.complex64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_complex128(self): '''test to make sure complex128 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.complex128) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) @unittest.skipUnless(hasattr(np, "complex256"), "complex256 not available") def test_numpy_scalar_complex256(self): '''test to make sure complex256 type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.complex256) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_bool(self): '''test to make sure bool type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.bool) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_str(self): '''test to make sure str type numpy arrays are accepted''' value = np.array([1, 2, 3, 4, 5], dtype=np.str) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_string0(self): '''test to make sure string0 type numpy arrays are accepted''' if sys.version_info[0] >= 3: dtype = np.str0 else: dtype = np.string0 value = np.array([1, 2, 3, 4, 5], dtype=dtype) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) class TestBaseNeoNumpyScalarTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for numpy scalars ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_numpy_scalar_int(self): '''test to make sure int type numpy scalars are accepted''' value = np.array(99, dtype=np.int) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint(self): '''test to make sure uint type numpy scalars are accepted''' value = np.array(99, dtype=np.uint) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_int0(self): '''test to make sure int0 type numpy scalars are accepted''' value = np.array(99, dtype=np.int0) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint0(self): '''test to make sure uint0 type numpy scalars are accepted''' value = np.array(99, dtype=np.uint0) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_int8(self): '''test to make sure int8 type numpy scalars are accepted''' value = np.array(99, dtype=np.int8) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint8(self): '''test to make sure uint8 type numpy scalars are accepted''' value = np.array(99, dtype=np.uint8) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_int16(self): '''test to make sure int16 type numpy scalars are accepted''' value = np.array(99, dtype=np.int16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint16(self): '''test to make sure uint16 type numpy scalars are accepted''' value = np.array(99, dtype=np.uint16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_int32(self): '''test to make sure int32 type numpy scalars are accepted''' value = np.array(99, dtype=np.int32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint32(self): '''test to make sure uint32 type numpy scalars are accepted''' value = np.array(99, dtype=np.uint32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_int64(self): '''test to make sure int64 type numpy scalars are accepted''' value = np.array(99, dtype=np.int64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_uint64(self): '''test to make sure uint64 type numpy scalars are accepted''' value = np.array(99, dtype=np.uint64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_float(self): '''test to make sure float type numpy scalars are accepted''' value = np.array(99, dtype=np.float) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_floating(self): '''test to make sure floating type numpy scalars are accepted''' value = np.array(99, dtype=np.floating) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_double(self): '''test to make sure double type numpy scalars are accepted''' value = np.array(99, dtype=np.double) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_float16(self): '''test to make sure float16 type numpy scalars are accepted''' value = np.array(99, dtype=np.float16) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_float32(self): '''test to make sure float32 type numpy scalars are accepted''' value = np.array(99, dtype=np.float32) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_float64(self): '''test to make sure float64 type numpy scalars are accepted''' value = np.array(99, dtype=np.float64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) @unittest.skipUnless(hasattr(np, "float128"), "float128 not available") def test_numpy_scalar_float128(self): '''test to make sure float128 type numpy scalars are accepted''' value = np.array(99, dtype=np.float128) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_complex(self): '''test to make sure complex type numpy scalars are accepted''' value = np.array(99, dtype=np.complex) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_complex64(self): '''test to make sure complex64 type numpy scalars are accepted''' value = np.array(99, dtype=np.complex64) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_complex128(self): '''test to make sure complex128 type numpy scalars are accepted''' value = np.array(99, dtype=np.complex128) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) @unittest.skipUnless(hasattr(np, "complex256"), "complex256 not available") def test_numpy_scalar_complex256(self): '''test to make sure complex256 type numpy scalars are accepted''' value = np.array(99, dtype=np.complex256) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_bool(self): '''test to make sure bool type numpy scalars are rejected''' value = np.array(99, dtype=np.bool) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_array_str(self): '''test to make sure str type numpy scalars are accepted''' value = np.array(99, dtype=np.str) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_numpy_scalar_string0(self): '''test to make sure string0 type numpy scalars are rejected''' if sys.version_info[0] >= 3: dtype = np.str0 else: dtype = np.string0 value = np.array(99, dtype=dtype) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) class TestBaseNeoQuantitiesArrayTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for quantities arrays ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_quantities_array_int(self): '''test to make sure int type quantites arrays are accepted''' value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.int, units=pq.s) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_array_uint(self): '''test to make sure uint type quantites arrays are accepted''' value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.uint, units=pq.meter) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_array_float(self): '''test to make sure float type quantites arrays are accepted''' value = [1, 2, 3, 4, 5] * pq.kg self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_array_str(self): '''test to make sure str type quantites arrays are accepted''' value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.str, units=pq.meter) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) class TestBaseNeoQuantitiesScalarTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for quantities scalars ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_quantities_scalar_int(self): '''test to make sure int type quantites scalars are accepted''' value = pq.Quantity(99, dtype=np.int, units=pq.s) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_scalar_uint(self): '''test to make sure uint type quantites scalars are accepted''' value = pq.Quantity(99, dtype=np.uint, units=pq.meter) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_scalar_float(self): '''test to make sure float type quantites scalars are accepted''' value = 99 * pq.kg self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) def test_quantities_scalar_str(self): '''test to make sure str type quantites scalars are accepted''' value = pq.Quantity(99, dtype=np.str, units=pq.meter) self.base.annotate(data=value) result = {'data': value} self.assertDictEqual(result, self.base.annotations) class TestBaseNeoUserDefinedTypes(unittest.TestCase): ''' TestCase to make sure annotations are properly checked for arbitrary objects ''' def setUp(self): '''create the instance to be tested, called before every test''' self.base = BaseNeo() def test_my_class(self): '''test to make sure user defined class type data is rejected''' class Foo(object): pass value = Foo() self.assertRaises(ValueError, self.base.annotate, data=value) def test_my_class_list(self): '''test to make sure user defined class type data is rejected''' class Foo(object): pass value = [Foo(), Foo(), Foo()] self.assertRaises(ValueError, self.base.annotate, data=value) @unittest.skipUnless(HAVE_IPYTHON, "requires IPython") class Test_pprint(unittest.TestCase): def test__pretty(self): name = 'an object' description = 'this is a test' obj = BaseNeo(name=name, description=description) res = pretty(obj) targ = "BaseNeo name: '%s' description: '%s'" % (name, description) self.assertEqual(res, targ) if __name__ == "__main__": unittest.main()
#!/usr/bin/env python # The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # Written by Bram Cohen, Uoti Urpala and John Hoffman from __future__ import division from BitTorrent.platform import install_translation install_translation() import sys import os import threading from time import time, strftime from cStringIO import StringIO from BitTorrent.download import Feedback, Multitorrent from BitTorrent.defaultargs import get_defaults from BitTorrent.parseargs import printHelp from BitTorrent.zurllib import urlopen from BitTorrent.bencode import bdecode from BitTorrent.ConvertedMetainfo import ConvertedMetainfo from BitTorrent.prefs import Preferences from BitTorrent import configfile from BitTorrent import BTFailure from BitTorrent import version from BitTorrent import GetTorrent def fmttime(n): if n == 0: return _("download complete!") try: n = int(n) assert n >= 0 and n < 5184000 # 60 days except: return _("<unknown>") m, s = divmod(n, 60) h, m = divmod(m, 60) return _("finishing in %d:%02d:%02d") % (h, m, s) def fmtsize(n): s = str(n) size = s[-3:] while len(s) > 3: s = s[:-3] size = '%s,%s' % (s[-3:], size) if n > 999: unit = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'] i = 1 while i + 1 < len(unit) and (n >> 10) >= 999: i += 1 n >>= 10 n /= (1 << 10) size = '%s (%.0f %s)' % (size, n, unit[i]) return size class HeadlessDisplayer(object): def __init__(self, doneflag): self.doneflag = doneflag self.done = False self.percentDone = '' self.timeEst = '' self.downRate = '---' self.upRate = '---' self.shareRating = '' self.seedStatus = '' self.peerStatus = '' self.errors = [] self.file = '' self.downloadTo = '' self.fileSize = '' self.numpieces = 0 def set_torrent_values(self, name, path, size, numpieces): self.file = name self.downloadTo = path self.fileSize = fmtsize(size) self.numpieces = numpieces def finished(self): self.done = True self.downRate = '---' self.display({'activity':_("download succeeded"), 'fractionDone':1}) def error(self, errormsg): newerrmsg = strftime('[%H:%M:%S] ') + errormsg self.errors.append(newerrmsg) self.display({}) def display(self, statistics): fractionDone = statistics.get('fractionDone') activity = statistics.get('activity') timeEst = statistics.get('timeEst') downRate = statistics.get('downRate') upRate = statistics.get('upRate') spew = statistics.get('spew') print '\n\n\n\n' if spew is not None: self.print_spew(spew) if timeEst is not None: self.timeEst = fmttime(timeEst) elif activity is not None: self.timeEst = activity if fractionDone is not None: self.percentDone = str(int(fractionDone * 1000) / 10) if downRate is not None: self.downRate = '%.1f KB/s' % (downRate / (1 << 10)) if upRate is not None: self.upRate = '%.1f KB/s' % (upRate / (1 << 10)) downTotal = statistics.get('downTotal') if downTotal is not None: upTotal = statistics['upTotal'] if downTotal <= upTotal / 100: self.shareRating = _("oo (%.1f MB up / %.1f MB down)") % ( upTotal / (1<<20), downTotal / (1<<20)) else: self.shareRating = _("%.3f (%.1f MB up / %.1f MB down)") % ( upTotal / downTotal, upTotal / (1<<20), downTotal / (1<<20)) numCopies = statistics['numCopies'] nextCopies = ', '.join(["%d:%.1f%%" % (a,int(b*1000)/10) for a,b in zip(xrange(numCopies+1, 1000), statistics['numCopyList'])]) if not self.done: self.seedStatus = _("%d seen now, plus %d distributed copies " "(%s)") % (statistics['numSeeds' ], statistics['numCopies'], nextCopies) else: self.seedStatus = _("%d distributed copies (next: %s)") % ( statistics['numCopies'], nextCopies) self.peerStatus = _("%d seen now") % statistics['numPeers'] for err in self.errors[-4:]: print _("ERROR:\n") + err + '\n' print _("saving: "), self.file print _("file size: "), self.fileSize print _("percent done: "), self.percentDone print _("time left: "), self.timeEst print _("download to: "), self.downloadTo print _("download rate: "), self.downRate print _("upload rate: "), self.upRate print _("share rating: "), self.shareRating print _("seed status: "), self.seedStatus print _("peer status: "), self.peerStatus def print_spew(self, spew): s = StringIO() s.write('\n\n\n') for c in spew: s.write('%20s ' % c['ip']) if c['initiation'] == 'L': s.write('l') else: s.write('r') total, rate, interested, choked = c['upload'] s.write(' %10s %10s ' % (str(int(total/10485.76)/100), str(int(rate)))) if c['is_optimistic_unchoke']: s.write('*') else: s.write(' ') if interested: s.write('i') else: s.write(' ') if choked: s.write('c') else: s.write(' ') total, rate, interested, choked, snubbed = c['download'] s.write(' %10s %10s ' % (str(int(total/10485.76)/100), str(int(rate)))) if interested: s.write('i') else: s.write(' ') if choked: s.write('c') else: s.write(' ') if snubbed: s.write('s') else: s.write(' ') s.write('\n') print s.getvalue() class DL(Feedback): def __init__(self, metainfo, config): self.doneflag = threading.Event() self.metainfo = metainfo self.config = Preferences().initWithDict(config) def run(self): self.d = HeadlessDisplayer(self.doneflag) try: self.multitorrent = Multitorrent(self.config, self.doneflag, self.global_error) # raises BTFailure if bad metainfo = ConvertedMetainfo(bdecode(self.metainfo)) torrent_name = metainfo.name_fs if config['save_as']: if config['save_in']: raise BTFailure(_("You cannot specify both --save_as and " "--save_in")) saveas = config['save_as'] elif config['save_in']: saveas = os.path.join(config['save_in'], torrent_name) else: saveas = torrent_name self.d.set_torrent_values(metainfo.name, os.path.abspath(saveas), metainfo.total_bytes, len(metainfo.hashes)) self.torrent = self.multitorrent.start_torrent(metainfo, Preferences(self.config), self, saveas) except BTFailure, e: print str(e) return self.get_status() self.multitorrent.rawserver.install_sigint_handler() self.multitorrent.rawserver.listen_forever() self.d.display({'activity':_("shutting down"), 'fractionDone':0}) self.torrent.shutdown() def reread_config(self): try: newvalues = configfile.get_config(self.config, 'bittorrent-console') except Exception, e: self.d.error(_("Error reading config: ") + str(e)) return self.config.update(newvalues) # The set_option call can potentially trigger something that kills # the torrent (when writing this the only possibility is a change in # max_files_open causing an IOError while closing files), and so # the self.failed() callback can run during this loop. for option, value in newvalues.iteritems(): self.multitorrent.set_option(option, value) for option, value in newvalues.iteritems(): self.torrent.set_option(option, value) def get_status(self): self.multitorrent.rawserver.add_task(self.get_status, self.config['display_interval']) status = self.torrent.get_status(self.config['spew']) self.d.display(status) def global_error(self, level, text): self.d.error(text) def error(self, torrent, level, text): self.d.error(text) def failed(self, torrent, is_external): self.doneflag.set() def finished(self, torrent): self.d.finished() if __name__ == '__main__': uiname = 'bittorrent-console' defaults = get_defaults(uiname) metainfo = None if len(sys.argv) <= 1: printHelp(uiname, defaults) sys.exit(1) try: config, args = configfile.parse_configuration_and_args(defaults, uiname, sys.argv[1:], 0, 1) torrentfile = None if len(args): torrentfile = args[0] for opt in ('responsefile', 'url'): if config[opt]: print '"--%s"' % opt, _("deprecated, do not use") torrentfile = config[opt] if torrentfile is not None: metainfo, errors = GetTorrent.get(torrentfile) if errors: raise BTFailure(_("Error reading .torrent file: ") + '\n'.join(errors)) else: raise BTFailure(_("you must specify a .torrent file")) except BTFailure, e: print str(e) sys.exit(1) dl = DL(metainfo, config) dl.run()
__author__ = 'jan.zdunek' # Copyright 2015 Jan Zdunek # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import os import unittest from datetime import timedelta import robot import robot_profiler class RobotProfilerUnitTests(unittest.TestCase): def create_test_case_file(self, test_case_content): test_case_file_descriptor, test_case_filename = tempfile.mkstemp(suffix='.txt', text=True) os.write(test_case_file_descriptor, test_case_content) os.close(test_case_file_descriptor) assert isinstance(test_case_filename, str) return test_case_filename def assertTimedeltaAlmostEqual(self, first, second, msg=None, delta=timedelta(milliseconds=100)): if (type(first) is timedelta) and (type(second) is timedelta): if abs(second - first) < delta: return else: raise self.failureException('%s != %s within %s delta.' % (first, second, delta)) else: raise self.failureException('Assertion only takes timedelta objects.') def run_and_analyse_robottest(self, testfilename): outputfile = tempfile.mkstemp(suffix='.xml')[1] robot.run(testfilename, report='NONE', log='NONE', output=outputfile) keywords = robot_profiler.analyse_output_xml([outputfile]) return keywords def run_and_analyse_multiple_robottests(self, list_of_testfilename): list_of_output_xml_files = [] for testfilename in list_of_testfilename: outputfile = tempfile.mkstemp(suffix='.xml')[1] robot.run(testfilename, report='NONE', log='NONE', output=outputfile) list_of_output_xml_files.append(outputfile) keywords = robot_profiler.analyse_output_xml(list_of_output_xml_files) return keywords def test_analyse_output_xml_1(self): test_case = """ *** Test Case *** Test Case 1 [Documentation] Test case for the Robot Profiler: Keyword from the BuiltIn library in different spellings. ... This checks that the profiler is able to read the status messages and to gather the timestamps ... from the output xml file. The Robot Profiler relies on the behaviour of the Robot Framework ... to log the keyword's name as taken from the library despite the spellings used in the test ... files. This is checked as well. Sleep 1s Sleep 2s sleep 3s BuiltIn.Sleep 4s """ test_case_file_name = self.create_test_case_file(test_case) keywords = self.run_and_analyse_robottest(test_case_file_name) self.assertEqual(1, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.Sleep', keywords) self.assertEqual(4, len(keywords['BuiltIn.Sleep']), 'Wrong number of durations found.') self.assertTimedeltaAlmostEqual(timedelta(seconds=1), keywords['BuiltIn.Sleep'][0]) self.assertTimedeltaAlmostEqual(timedelta(seconds=2), keywords['BuiltIn.Sleep'][1]) self.assertTimedeltaAlmostEqual(timedelta(seconds=3), keywords['BuiltIn.Sleep'][2]) self.assertTimedeltaAlmostEqual(timedelta(seconds=4), keywords['BuiltIn.Sleep'][3]) def test_analyse_output_xml_2(self): test_case = """ *** Test Case *** Test Case 2 [Documentation] Test case for the Robot Profiler: Multiple keywords from the BuiltIn library. ... This checks that the profiler is able to handle multiple keywords. Sleep 1s No Operation """ test_case_file_name = self.create_test_case_file(test_case) keywords = self.run_and_analyse_robottest(test_case_file_name) self.assertEqual(2, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.Sleep', keywords) self.assertIn('BuiltIn.No Operation', keywords) def test_analyse_output_xml_3(self): test_case = """ *** Keyword *** User Defined Keyword No Operation *** Test Case *** Test Case 3 [Documentation] Test case for the Robot Profiler: User Defined Keywords. ... This test checks that the profiler is able to handle user defined keywords in different spellings. User Defined Keyword user defined keyword userdefinedkeyword """ test_case_file_name = self.create_test_case_file(test_case) keywords = self.run_and_analyse_robottest(test_case_file_name) self.assertEqual(2, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.No Operation', keywords) self.assertIn('User Defined Keyword', keywords) self.assertEqual(3, len(keywords['User Defined Keyword']), 'Wrong number of durations found.') def test_analyse_output_xml_4(self): test_case = """ *** Keyword *** User Defined Keyword No Operation Another User Defined Keyword No Operation *** Test Case *** Test Case 4 [Documentation] Test case for the Robot Profiler: Multiple User Defined Keywords. ... This test checks that the profiler is able to handle multiple user defined keywords. User Defined Keyword Another User Defined Keyword """ test_case_file_name = self.create_test_case_file(test_case) keywords = self.run_and_analyse_robottest(test_case_file_name) self.assertEqual(3, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.No Operation', keywords) self.assertIn('User Defined Keyword', keywords) self.assertIn('Another User Defined Keyword', keywords) def test_analyse_output_xml_5(self): test_case = """ *** Settings *** Documentation Test Case for the Robot Profiler: Multiple Test Cases ... Checks that the profiler is able to gather data from multiple test cases. *** Testcase *** Test Case 5 A Sleep 1s Test Case 5 B Sleep 2s """ test_case_file_name = self.create_test_case_file(test_case) keywords = self.run_and_analyse_robottest(test_case_file_name) self.assertEqual(1, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.Sleep', keywords) self.assertEqual(2, len(keywords['BuiltIn.Sleep']), 'Wrong number of durations found.') self.assertTimedeltaAlmostEqual(timedelta(seconds=1), keywords['BuiltIn.Sleep'][0]) self.assertTimedeltaAlmostEqual(timedelta(seconds=2), keywords['BuiltIn.Sleep'][1]) def test_analyse_output_xml_6(self): test_case = """ *** Settings *** Documentation Test Case for the Robot Profiler: Multiple Test Cases Files - File A ... Checks that the profiler is able to gather data from multiple output xml files. *** Testcase *** Test Case 6 A Sleep 1s """ test_case_file_name_6a = self.create_test_case_file(test_case) test_case = """ *** Settings *** Documentation Test Case for the Robot Profiler: Multiple Test Cases Files - File B ... Checks that the profiler is able to gather data from multiple output xml files. *** Testcase *** Test Case 6 B Sleep 2s """ test_case_file_name_6b = self.create_test_case_file(test_case) keywords = self.run_and_analyse_multiple_robottests([test_case_file_name_6a, test_case_file_name_6b]) self.assertEqual(1, len(keywords), 'Wrong number of keywords found.') self.assertIn('BuiltIn.Sleep', keywords) self.assertEqual(2, len(keywords['BuiltIn.Sleep']), 'Wrong number of durations found.') self.assertTimedeltaAlmostEqual(timedelta(seconds=1), keywords['BuiltIn.Sleep'][0]) self.assertTimedeltaAlmostEqual(timedelta(seconds=2), keywords['BuiltIn.Sleep'][1]) def test_evaluate_durations(self): keyword_information = robot_profiler.evaluate_durations( {'BuiltIn.Sleep': [timedelta(seconds=1), timedelta(seconds=1)], 'BuiltIn.No Operation': [timedelta(seconds=0)]}) self.assertEqual(2, len(keyword_information), 'Wrong number of evaluated keywords.') self.assertIn('BuiltIn.Sleep', keyword_information) self.assertListEqual([2, timedelta(seconds=2), timedelta(seconds=1)], keyword_information['BuiltIn.Sleep']) self.assertIn('BuiltIn.No Operation', keyword_information) self.assertListEqual([1, timedelta(seconds=0), timedelta(seconds=0)], keyword_information['BuiltIn.No Operation']) def test_create_output_line_1(self): line = robot_profiler.create_output_line("BuiltIn.Sleep", 2, timedelta(seconds=2), timedelta(seconds=1), ';') fields = line.split(';') self.assertEqual(4, len(fields)) self.assertEqual('BuiltIn.Sleep', fields[0]) self.assertEqual('2', fields[1]) self.assertEqual('2', fields[2]) self.assertEqual('1', fields[3]) def test_create_output_line_2(self): line = robot_profiler.create_output_line("BuiltIn.Sleep", 2, timedelta(seconds=2, milliseconds=200), timedelta(seconds=1, milliseconds=100), ';') fields = line.split(';') self.assertEqual(4, len(fields)) self.assertEqual('BuiltIn.Sleep', fields[0]) self.assertEqual('2', fields[1]) self.assertEqual('2.2', fields[2]) self.assertEqual('1.1', fields[3]) def test_parse_file_name_list_1(self): file_name_list = ['output_1.xml', 'output_2.xml', 'output_3.xml'] list_of_input_names, output_name = robot_profiler.parse_file_name_list(file_name_list) self.assertListEqual(file_name_list, list_of_input_names) self.assertEqual('output_1.csv', output_name) def test_parse_file_name_list_2(self): file_name_list = ['output_1.xml', 'output_2.xml', 'my_analysis.csv'] list_of_input_names, output_name = robot_profiler.parse_file_name_list(file_name_list) self.assertListEqual(file_name_list[0:-1], list_of_input_names) self.assertEqual('my_analysis.csv', output_name) def test_parse_file_name_list_3(self): with self.assertRaises(AssertionError): robot_profiler.parse_file_name_list('output.xml') def test_parse_file_name_list_4(self): file_name_list = ['output.xml'] list_of_input_names, output_name = robot_profiler.parse_file_name_list(file_name_list) self.assertListEqual(file_name_list, list_of_input_names) self.assertEqual('output.csv', output_name) if __name__ == '__main__': unittest.main()
""" Tools for doing common subexpression elimination. """ from collections import OrderedDict from sympy.core import Basic, Mul, Add, sympify from sympy.core.basic import preorder_traversal from sympy.core.function import _coeff_isneg from sympy.core.compatibility import iterable from sympy.utilities.iterables import numbered_symbols, \ sift, topological_sort, ordered from sympy.simplify import cse_opts # (preprocessor, postprocessor) pairs which are commonly useful. They should # each take a sympy expression and return a possibly transformed expression. # When used in the function ``cse()``, the target expressions will be transformed # by each of the preprocessor functions in order. After the common # subexpressions are eliminated, each resulting expression will have the # postprocessor functions transform them in *reverse* order in order to undo the # transformation if necessary. This allows the algorithm to operate on # a representation of the expressions that allows for more optimization # opportunities. # ``None`` can be used to specify no transformation for either the preprocessor or # postprocessor. cse_optimizations = list(cse_opts.default_optimizations) # sometimes we want the output in a different format; non-trivial # transformations can be put here for users # =============================================================== def reps_toposort(r): """Sort replacements `r` so (k1, v1) appears before (k2, v2) if k2 is in v1's free symbols. This orders items in the way that cse returns its results (hence, in order to use the replacements in a substitution option it would make sense to reverse the order). Examples ======== >>> from sympy.simplify.cse_main import reps_toposort >>> from sympy.abc import x, y >>> from sympy import Eq >>> for l, r in reps_toposort([(x, y + 1), (y, 2)]): ... print Eq(l, r) ... y == 2 x == y + 1 """ r = sympify(r) E = [] for c1, (k1, v1) in enumerate(r): for c2, (k2, v2) in enumerate(r): if k1 in v2.free_symbols: E.append((c1, c2)) return [r[i] for i in topological_sort((range(len(r)), E))] def cse_separate(r, e): """Move expressions that are in the form (symbol, expr) out of the expressions and sort them into the replacements using the reps_toposort. Examples ======== >>> from sympy.simplify.cse_main import cse_separate >>> from sympy.abc import x, y, z >>> from sympy import cos, exp, cse, Eq, symbols >>> x0, x1 = symbols('x:2') >>> eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1)) >>> cse([eq, Eq(x, z + 1), z - 2], postprocess=cse_separate) in [ ... [[(x0, y + 1), (x, z + 1), (x1, x + 1)], ... [x1 + exp(x1/x0) + cos(x0), z - 2]], ... [[(x1, y + 1), (x, z + 1), (x0, x + 1)], ... [x0 + exp(x0/x1) + cos(x1), z - 2]]] ... True """ d = sift(e, lambda w: w.is_Equality and w.lhs.is_Symbol) r = r + [w.args for w in d[True]] e = d[False] return [reps_toposort(r), e] # ====end of cse postprocess idioms=========================== def preprocess_for_cse(expr, optimizations): """ Preprocess an expression to optimize for common subexpression elimination. Parameters ---------- expr : sympy expression The target expression to optimize. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs. Returns ------- expr : sympy expression The transformed expression. """ for pre, post in optimizations: if pre is not None: expr = pre(expr) return expr def postprocess_for_cse(expr, optimizations): """ Postprocess an expression after common subexpression elimination to return the expression to canonical sympy form. Parameters ---------- expr : sympy expression The target expression to transform. optimizations : list of (callable, callable) pairs, optional The (preprocessor, postprocessor) pairs. The postprocessors will be applied in reversed order to undo the effects of the preprocessors correctly. Returns ------- expr : sympy expression The transformed expression. """ if optimizations is None: optimizations = cse_optimizations for pre, post in reversed(optimizations): if post is not None: expr = post(expr) return expr def _remove_singletons(reps, exprs): """ Helper function for cse that will remove expressions that weren't used more than once. """ u_reps = [] # the useful reps that are used more than once for i, ui in enumerate(reps): used = [] # where it was used ri, ei = ui # keep track of whether the substitution was used more # than once. If used is None, it was never used (yet); # if used is an int, that is the last place where it was # used (>=0 in the reps, <0 in the expressions) and if # it is True, it was used more than once. used = None tot = 0 # total times used so far # search through the reps for j in range(i + 1, len(reps)): c = reps[j][1].count(ri) if c: tot += c if tot > 1: u_reps.append(ui) used = True break else: used = j if used is not True: # then search through the expressions for j, rj in enumerate(exprs): c = rj.count(ri) if c: # append a negative so we know that it was in the # expression that used it tot += c if tot > 1: u_reps.append(ui) used = True break else: used = j - len(exprs) if type(used) is int: # undo the change rep = {ri: ei} j = used if j < 0: exprs[j] = exprs[j].subs(rep) else: reps[j] = reps[j][0], reps[j][1].subs(rep) # reuse unused symbols so a contiguous range of symbols is returned if len(u_reps) != len(reps): for i, ri in enumerate(u_reps): if u_reps[i][0] != reps[i][0]: rep = (u_reps[i][0], reps[i][0]) u_reps[i] = rep[1], u_reps[i][1].subs(*rep) for j in range(i + 1, len(u_reps)): u_reps[j] = u_reps[j][0], u_reps[j][1].subs(*rep) for j, rj in enumerate(exprs): exprs[j] = exprs[j].subs(*rep) reps[:] = u_reps # change happens in-place def cse(exprs, symbols=None, optimizations=None, postprocess=None): """ Perform common subexpression elimination on an expression. Parameters ========== exprs : list of sympy expressions, or a single sympy expression The expressions to reduce. symbols : infinite iterator yielding unique Symbols The symbols used to label the common subexpressions which are pulled out. The ``numbered_symbols`` generator is useful. The default is a stream of symbols of the form "x0", "x1", etc. This must be an infinite iterator. optimizations : list of (callable, callable) pairs, optional The (preprocessor, postprocessor) pairs. If not provided, ``sympy.simplify.cse.cse_optimizations`` is used. postprocess : a function which accepts the two return values of cse and returns the desired form of output from cse, e.g. if you want the replacements reversed the function might be the following lambda: lambda r, e: return reversed(r), e Returns ======= replacements : list of (Symbol, expression) pairs All of the common subexpressions that were replaced. Subexpressions earlier in this list might show up in subexpressions later in this list. reduced_exprs : list of sympy expressions The reduced expressions with all of the replacements above. """ from sympy.matrices import Matrix if symbols is None: symbols = numbered_symbols() else: # In case we get passed an iterable with an __iter__ method instead of # an actual iterator. symbols = iter(symbols) tmp_symbols = numbered_symbols('_csetmp') subexp_iv = dict() muls = set() adds = set() if optimizations is None: # Pull out the default here just in case there are some weird # manipulations of the module-level list in some other thread. optimizations = list(cse_optimizations) # Handle the case if just one expression was passed. if isinstance(exprs, Basic): exprs = [exprs] # Preprocess the expressions to give us better optimization opportunities. prep_exprs = [preprocess_for_cse(e, optimizations) for e in exprs] # Find all subexpressions. def _parse(expr): if expr.is_Atom: # Exclude atoms, since there is no point in renaming them. return expr if iterable(expr): return expr subexpr = type(expr)(*map(_parse, expr.args)) if subexpr in subexp_iv: return subexp_iv[subexpr] if subexpr.is_Mul: muls.add(subexpr) elif subexpr.is_Add: adds.add(subexpr) ivar = next(tmp_symbols) subexp_iv[subexpr] = ivar return ivar tmp_exprs = list() for expr in prep_exprs: if isinstance(expr, Basic): tmp_exprs.append(_parse(expr)) else: tmp_exprs.append(expr) # process adds - any adds that weren't repeated might contain # subpatterns that are repeated, e.g. x+y+z and x+y have x+y in common adds = list(ordered(adds)) addargs = [set(a.args) for a in adds] for i in xrange(len(addargs)): for j in xrange(i + 1, len(addargs)): com = addargs[i].intersection(addargs[j]) if len(com) > 1: add_subexp = Add(*com) diff_add_i = addargs[i].difference(com) diff_add_j = addargs[j].difference(com) if add_subexp in subexp_iv: ivar = subexp_iv[add_subexp] else: ivar = next(tmp_symbols) subexp_iv[add_subexp] = ivar if diff_add_i: newadd = Add(ivar,*diff_add_i) subexp_iv[newadd] = subexp_iv.pop(adds[i]) adds[i] = newadd #else add_i is itself subexp_iv[add_subexp] -> ivar if diff_add_j: newadd = Add(ivar,*diff_add_j) subexp_iv[newadd] = subexp_iv.pop(adds[j]) adds[j] = newadd #else add_j is itself subexp_iv[add_subexp] -> ivar addargs[i] = diff_add_i addargs[j] = diff_add_j for k in xrange(j + 1, len(addargs)): if com.issubset(addargs[k]): diff_add_k = addargs[k].difference(com) if diff_add_k: newadd = Add(ivar,*diff_add_k) subexp_iv[newadd] = subexp_iv.pop(adds[k]) adds[k] = newadd #else add_k is itself subexp_iv[add_subexp] -> ivar addargs[k] = diff_add_k # process muls - any muls that weren't repeated might contain # subpatterns that are repeated, e.g. x*y*z and x*y have x*y in common # *assumes that there are no non-commutative parts* muls = list(ordered(muls)) mulargs = [set(a.args) for a in muls] for i in xrange(len(mulargs)): for j in xrange(i + 1, len(mulargs)): com = mulargs[i].intersection(mulargs[j]) if len(com) > 1: mul_subexp = Mul(*com) diff_mul_i = mulargs[i].difference(com) diff_mul_j = mulargs[j].difference(com) if mul_subexp in subexp_iv: ivar = subexp_iv[mul_subexp] else: ivar = next(tmp_symbols) subexp_iv[mul_subexp] = ivar if diff_mul_i: newmul = Mul(ivar,*diff_mul_i) subexp_iv[newmul] = subexp_iv.pop(muls[i]) muls[i] = newmul #else mul_i is itself subexp_iv[mul_subexp] -> ivar if diff_mul_j: newmul = Mul(ivar,*diff_mul_j) subexp_iv[newmul] = subexp_iv.pop(muls[j]) muls[j] = newmul #else mul_j is itself subexp_iv[mul_subexp] -> ivar mulargs[i] = diff_mul_i mulargs[j] = diff_mul_j for k in xrange(j + 1, len(mulargs)): if com.issubset(mulargs[k]): diff_mul_k = mulargs[k].difference(com) if diff_mul_k: newmul = Mul(ivar,*diff_mul_k) subexp_iv[newmul] = subexp_iv.pop(muls[k]) muls[k] = newmul #else mul_k is itself subexp_iv[mul_subexp] -> ivar mulargs[k] = diff_mul_k # Find all of the repeated subexpressions. ivar_se = {iv:se for se,iv in subexp_iv.iteritems()} used_ivs = set() repeated = set() def _find_repeated_subexprs(subexpr): if subexpr.is_Atom: symbs = [subexpr] else: symbs = subexpr.args for symb in symbs: if symb in ivar_se: if symb not in used_ivs: _find_repeated_subexprs(ivar_se[symb]) used_ivs.add(symb) else: repeated.add(symb) for expr in tmp_exprs: _find_repeated_subexprs(expr) # Substitute symbols for all of the repeated subexpressions. # remove temporary replacements that weren't used more than once tmpivs_ivs = dict() ordered_iv_se = OrderedDict() def _get_subexprs(args): args = list(args) for i,symb in enumerate(args): if symb in ivar_se: if symb in tmpivs_ivs: args[i] = tmpivs_ivs[symb] else: subexpr = ivar_se[symb] subexpr = type(subexpr)(*_get_subexprs(subexpr.args)) if symb in repeated: ivar = next(symbols) ordered_iv_se[ivar] = subexpr tmpivs_ivs[symb] = ivar args[i] = ivar else: args[i] = subexpr return args out_exprs = _get_subexprs(tmp_exprs) # Postprocess the expressions to return the expressions to canonical form. ordered_iv_se_notopt = ordered_iv_se ordered_iv_se = OrderedDict() for i, (ivar, subexpr) in enumerate(ordered_iv_se_notopt.items()): subexpr = postprocess_for_cse(subexpr, optimizations) ordered_iv_se[ivar] = subexpr out_exprs = [postprocess_for_cse(e, optimizations) for e in out_exprs] if isinstance(exprs, Matrix): out_exprs = Matrix(exprs.rows, exprs.cols, out_exprs) if postprocess is None: return ordered_iv_se.items(), out_exprs return postprocess(ordered_iv_se.items(), out_exprs)
# Copyright (c) 2012-2013 ARM Limited # All rights reserved # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2006-2008 The Regents of The University of Michigan # Copyright (c) 2010 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Lisa Hsu import sys from os import getcwd from os.path import join as joinpath from common import CpuConfig from common import MemConfig import m5 from m5.defines import buildEnv from m5.objects import * from m5.util import * addToPath('../common') def getCPUClass(cpu_type): """Returns the required cpu class and the mode of operation.""" cls = CpuConfig.get(cpu_type) return cls, cls.memory_mode() def setCPUClass(options): """Returns two cpu classes and the initial mode of operation. Restoring from a checkpoint or fast forwarding through a benchmark can be done using one type of cpu, and then the actual simulation can be carried out using another type. This function returns these two types of cpus and the initial mode of operation depending on the options provided. """ TmpClass, test_mem_mode = getCPUClass(options.cpu_type) CPUClass = None if TmpClass.require_caches() and \ not options.caches and not options.ruby: fatal("%s must be used with caches" % options.cpu_type) if options.checkpoint_restore != None: if options.restore_with_cpu != options.cpu_type: CPUClass = TmpClass TmpClass, test_mem_mode = getCPUClass(options.restore_with_cpu) elif options.fast_forward: CPUClass = TmpClass TmpClass = AtomicSimpleCPU test_mem_mode = 'atomic' return (TmpClass, test_mem_mode, CPUClass) def setMemClass(options): """Returns a memory controller class.""" return MemConfig.get(options.mem_type) def setWorkCountOptions(system, options): if options.work_item_id != None: system.work_item_id = options.work_item_id if options.num_work_ids != None: system.num_work_ids = options.num_work_ids if options.work_begin_cpu_id_exit != None: system.work_begin_cpu_id_exit = options.work_begin_cpu_id_exit if options.work_end_exit_count != None: system.work_end_exit_count = options.work_end_exit_count if options.work_end_checkpoint_count != None: system.work_end_ckpt_count = options.work_end_checkpoint_count if options.work_begin_exit_count != None: system.work_begin_exit_count = options.work_begin_exit_count if options.work_begin_checkpoint_count != None: system.work_begin_ckpt_count = options.work_begin_checkpoint_count if options.work_cpus_checkpoint_count != None: system.work_cpus_ckpt_count = options.work_cpus_checkpoint_count def findCptDir(options, cptdir, testsys): """Figures out the directory from which the checkpointed state is read. There are two different ways in which the directories holding checkpoints can be named -- 1. cpt.<benchmark name>.<instruction count when the checkpoint was taken> 2. cpt.<some number, usually the tick value when the checkpoint was taken> This function parses through the options to figure out which one of the above should be used for selecting the checkpoint, and then figures out the appropriate directory. """ from os.path import isdir, exists from os import listdir import re if not isdir(cptdir): fatal("checkpoint dir %s does not exist!", cptdir) cpt_starttick = 0 if options.at_instruction or options.simpoint: inst = options.checkpoint_restore if options.simpoint: # assume workload 0 has the simpoint if testsys.cpu[0].workload[0].simpoint == 0: fatal('Unable to find simpoint') inst += int(testsys.cpu[0].workload[0].simpoint) checkpoint_dir = joinpath(cptdir, "cpt.%s.%s" % (options.bench, inst)) if not exists(checkpoint_dir): fatal("Unable to find checkpoint directory %s", checkpoint_dir) elif options.restore_simpoint_checkpoint: # Restore from SimPoint checkpoints # Assumes that the checkpoint dir names are formatted as follows: dirs = listdir(cptdir) expr = re.compile('cpt\.simpoint_(\d+)_inst_(\d+)' + '_weight_([\d\.e\-]+)_interval_(\d+)_warmup_(\d+)') cpts = [] for dir in dirs: match = expr.match(dir) if match: cpts.append(dir) cpts.sort() cpt_num = options.checkpoint_restore if cpt_num > len(cpts): fatal('Checkpoint %d not found', cpt_num) checkpoint_dir = joinpath(cptdir, cpts[cpt_num - 1]) match = expr.match(cpts[cpt_num - 1]) if match: index = int(match.group(1)) start_inst = int(match.group(2)) weight_inst = float(match.group(3)) interval_length = int(match.group(4)) warmup_length = int(match.group(5)) print "Resuming from", checkpoint_dir simpoint_start_insts = [] simpoint_start_insts.append(warmup_length) simpoint_start_insts.append(warmup_length + interval_length) testsys.cpu[0].simpoint_start_insts = simpoint_start_insts if testsys.switch_cpus != None: testsys.switch_cpus[0].simpoint_start_insts = simpoint_start_insts print "Resuming from SimPoint", print "#%d, start_inst:%d, weight:%f, interval:%d, warmup:%d" % \ (index, start_inst, weight_inst, interval_length, warmup_length) else: dirs = listdir(cptdir) expr = re.compile('cpt\.([0-9]+)') cpts = [] for dir in dirs: match = expr.match(dir) if match: cpts.append(match.group(1)) cpts.sort(lambda a,b: cmp(long(a), long(b))) cpt_num = options.checkpoint_restore if cpt_num > len(cpts): fatal('Checkpoint %d not found', cpt_num) cpt_starttick = int(cpts[cpt_num - 1]) checkpoint_dir = joinpath(cptdir, "cpt.%s" % cpts[cpt_num - 1]) return cpt_starttick, checkpoint_dir def scriptCheckpoints(options, maxtick, cptdir): if options.at_instruction or options.simpoint: checkpoint_inst = int(options.take_checkpoints) # maintain correct offset if we restored from some instruction if options.checkpoint_restore != None: checkpoint_inst += options.checkpoint_restore print "Creating checkpoint at inst:%d" % (checkpoint_inst) exit_event = m5.simulate() exit_cause = exit_event.getCause() print "exit cause = %s" % exit_cause # skip checkpoint instructions should they exist while exit_cause == "checkpoint": exit_event = m5.simulate() exit_cause = exit_event.getCause() if exit_cause == "a thread reached the max instruction count": m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \ (options.bench, checkpoint_inst))) print "Checkpoint written." else: when, period = options.take_checkpoints.split(",", 1) when = int(when) period = int(period) num_checkpoints = 0 exit_event = m5.simulate(when - m5.curTick()) exit_cause = exit_event.getCause() while exit_cause == "checkpoint": exit_event = m5.simulate(when - m5.curTick()) exit_cause = exit_event.getCause() if exit_cause == "simulate() limit reached": m5.checkpoint(joinpath(cptdir, "cpt.%d")) num_checkpoints += 1 sim_ticks = when max_checkpoints = options.max_checkpoints while num_checkpoints < max_checkpoints and \ exit_cause == "simulate() limit reached": if (sim_ticks + period) > maxtick: exit_event = m5.simulate(maxtick - sim_ticks) exit_cause = exit_event.getCause() break else: exit_event = m5.simulate(period) exit_cause = exit_event.getCause() sim_ticks += period while exit_event.getCause() == "checkpoint": exit_event = m5.simulate(sim_ticks - m5.curTick()) if exit_event.getCause() == "simulate() limit reached": m5.checkpoint(joinpath(cptdir, "cpt.%d")) num_checkpoints += 1 return exit_event def benchCheckpoints(options, maxtick, cptdir): exit_event = m5.simulate(maxtick - m5.curTick()) exit_cause = exit_event.getCause() num_checkpoints = 0 max_checkpoints = options.max_checkpoints while exit_cause == "checkpoint": m5.checkpoint(joinpath(cptdir, "cpt.%d")) num_checkpoints += 1 if num_checkpoints == max_checkpoints: exit_cause = "maximum %d checkpoints dropped" % max_checkpoints break exit_event = m5.simulate(maxtick - m5.curTick()) exit_cause = exit_event.getCause() return exit_event # Set up environment for taking SimPoint checkpoints # Expecting SimPoint files generated by SimPoint 3.2 def parseSimpointAnalysisFile(options, testsys): import re simpoint_filename, weight_filename, interval_length, warmup_length = \ options.take_simpoint_checkpoints.split(",", 3) print "simpoint analysis file:", simpoint_filename print "simpoint weight file:", weight_filename print "interval length:", interval_length print "warmup length:", warmup_length interval_length = int(interval_length) warmup_length = int(warmup_length) # Simpoint analysis output starts interval counts with 0. simpoints = [] simpoint_start_insts = [] # Read in SimPoint analysis files simpoint_file = open(simpoint_filename) weight_file = open(weight_filename) while True: line = simpoint_file.readline() if not line: break m = re.match("(\d+)\s+(\d+)", line) if m: interval = int(m.group(1)) else: fatal('unrecognized line in simpoint file!') line = weight_file.readline() if not line: fatal('not enough lines in simpoint weight file!') m = re.match("([0-9\.e\-]+)\s+(\d+)", line) if m: weight = float(m.group(1)) else: fatal('unrecognized line in simpoint weight file!') if (interval * interval_length - warmup_length > 0): starting_inst_count = \ interval * interval_length - warmup_length actual_warmup_length = warmup_length else: # Not enough room for proper warmup # Just starting from the beginning starting_inst_count = 0 actual_warmup_length = interval * interval_length simpoints.append((interval, weight, starting_inst_count, actual_warmup_length)) # Sort SimPoints by starting inst count simpoints.sort(key=lambda obj: obj[2]) for s in simpoints: interval, weight, starting_inst_count, actual_warmup_length = s print str(interval), str(weight), starting_inst_count, \ actual_warmup_length simpoint_start_insts.append(starting_inst_count) print "Total # of simpoints:", len(simpoints) testsys.cpu[0].simpoint_start_insts = simpoint_start_insts return (simpoints, interval_length) def takeSimpointCheckpoints(simpoints, interval_length, cptdir): num_checkpoints = 0 index = 0 last_chkpnt_inst_count = -1 for simpoint in simpoints: interval, weight, starting_inst_count, actual_warmup_length = simpoint if starting_inst_count == last_chkpnt_inst_count: # checkpoint starting point same as last time # (when warmup period longer than starting point) exit_cause = "simpoint starting point found" code = 0 else: exit_event = m5.simulate() # skip checkpoint instructions should they exist while exit_event.getCause() == "checkpoint": print "Found 'checkpoint' exit event...ignoring..." exit_event = m5.simulate() exit_cause = exit_event.getCause() code = exit_event.getCode() if exit_cause == "simpoint starting point found": m5.checkpoint(joinpath(cptdir, "cpt.simpoint_%02d_inst_%d_weight_%f_interval_%d_warmup_%d" % (index, starting_inst_count, weight, interval_length, actual_warmup_length))) print "Checkpoint #%d written. start inst:%d weight:%f" % \ (num_checkpoints, starting_inst_count, weight) num_checkpoints += 1 last_chkpnt_inst_count = starting_inst_count else: break index += 1 print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_cause) print "%d checkpoints taken" % num_checkpoints sys.exit(code) def restoreSimpointCheckpoint(): exit_event = m5.simulate() exit_cause = exit_event.getCause() if exit_cause == "simpoint starting point found": print "Warmed up! Dumping and resetting stats!" m5.stats.dump() m5.stats.reset() exit_event = m5.simulate() exit_cause = exit_event.getCause() if exit_cause == "simpoint starting point found": print "Done running SimPoint!" sys.exit(exit_event.getCode()) print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_cause) sys.exit(exit_event.getCode()) def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq): print "starting switch loop" while True: exit_event = m5.simulate(switch_freq) exit_cause = exit_event.getCause() if exit_cause != "simulate() limit reached": return exit_event m5.switchCpus(testsys, repeat_switch_cpu_list) tmp_cpu_list = [] for old_cpu, new_cpu in repeat_switch_cpu_list: tmp_cpu_list.append((new_cpu, old_cpu)) repeat_switch_cpu_list = tmp_cpu_list if (maxtick - m5.curTick()) <= switch_freq: exit_event = m5.simulate(maxtick - m5.curTick()) return exit_event def run(options, root, testsys, cpu_class): if options.checkpoint_dir: cptdir = options.checkpoint_dir elif m5.options.outdir: cptdir = m5.options.outdir else: cptdir = getcwd() if options.fast_forward and options.checkpoint_restore != None: fatal("Can't specify both --fast-forward and --checkpoint-restore") if options.standard_switch and not options.caches: fatal("Must specify --caches when using --standard-switch") if options.standard_switch and options.repeat_switch: fatal("Can't specify both --standard-switch and --repeat-switch") if options.repeat_switch and options.take_checkpoints: fatal("Can't specify both --repeat-switch and --take-checkpoints") np = options.num_cpus switch_cpus = None if options.prog_interval: for i in xrange(np): testsys.cpu[i].progress_interval = options.prog_interval if options.maxinsts: for i in xrange(np): testsys.cpu[i].max_insts_any_thread = options.maxinsts if cpu_class: switch_cpus = [cpu_class(switched_out=True, cpu_id=(i)) for i in xrange(np)] for i in xrange(np): if options.fast_forward: testsys.cpu[i].max_insts_any_thread = int(options.fast_forward) switch_cpus[i].system = testsys switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain switch_cpus[i].progress_interval = \ testsys.cpu[i].progress_interval # simulation period if options.maxinsts: switch_cpus[i].max_insts_any_thread = options.maxinsts # Add checker cpu if selected if options.checker: switch_cpus[i].addCheckerCpu() # If elastic tracing is enabled attach the elastic trace probe # to the switch CPUs if options.elastic_trace_en: CpuConfig.config_etrace(cpu_class, switch_cpus, options) testsys.switch_cpus = switch_cpus switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] if options.repeat_switch: switch_class = getCPUClass(options.cpu_type)[0] if switch_class.require_caches() and \ not options.caches: print "%s: Must be used with caches" % str(switch_class) sys.exit(1) if not switch_class.support_take_over(): print "%s: CPU switching not supported" % str(switch_class) sys.exit(1) repeat_switch_cpus = [switch_class(switched_out=True, \ cpu_id=(i)) for i in xrange(np)] for i in xrange(np): repeat_switch_cpus[i].system = testsys repeat_switch_cpus[i].workload = testsys.cpu[i].workload repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain if options.maxinsts: repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts if options.checker: repeat_switch_cpus[i].addCheckerCpu() testsys.repeat_switch_cpus = repeat_switch_cpus if cpu_class: repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i]) for i in xrange(np)] else: repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i]) for i in xrange(np)] if options.standard_switch: switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i)) for i in xrange(np)] switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i)) for i in xrange(np)] for i in xrange(np): switch_cpus[i].system = testsys switch_cpus_1[i].system = testsys switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus_1[i].workload = testsys.cpu[i].workload switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain switch_cpus_1[i].clk_domain = testsys.cpu[i].clk_domain # if restoring, make atomic cpu simulate only a few instructions if options.checkpoint_restore != None: testsys.cpu[i].max_insts_any_thread = 1 # Fast forward to specified location if we are not restoring elif options.fast_forward: testsys.cpu[i].max_insts_any_thread = int(options.fast_forward) # Fast forward to a simpoint (warning: time consuming) elif options.simpoint: if testsys.cpu[i].workload[0].simpoint == 0: fatal('simpoint not found') testsys.cpu[i].max_insts_any_thread = \ testsys.cpu[i].workload[0].simpoint # No distance specified, just switch else: testsys.cpu[i].max_insts_any_thread = 1 # warmup period if options.warmup_insts: switch_cpus[i].max_insts_any_thread = options.warmup_insts # simulation period if options.maxinsts: switch_cpus_1[i].max_insts_any_thread = options.maxinsts # attach the checker cpu if selected if options.checker: switch_cpus[i].addCheckerCpu() switch_cpus_1[i].addCheckerCpu() testsys.switch_cpus = switch_cpus testsys.switch_cpus_1 = switch_cpus_1 switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)] # set the checkpoint in the cpu before m5.instantiate is called if options.take_checkpoints != None and \ (options.simpoint or options.at_instruction): offset = int(options.take_checkpoints) # Set an instruction break point if options.simpoint: for i in xrange(np): if testsys.cpu[i].workload[0].simpoint == 0: fatal('no simpoint for testsys.cpu[%d].workload[0]', i) checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset testsys.cpu[i].max_insts_any_thread = checkpoint_inst # used for output below options.take_checkpoints = checkpoint_inst else: options.take_checkpoints = offset # Set all test cpus with the right number of instructions # for the upcoming simulation for i in xrange(np): testsys.cpu[i].max_insts_any_thread = offset if options.take_simpoint_checkpoints != None: simpoints, interval_length = parseSimpointAnalysisFile(options, testsys) checkpoint_dir = None if options.checkpoint_restore: cpt_starttick, checkpoint_dir = findCptDir(options, cptdir, testsys) m5.instantiate(checkpoint_dir) # Initialization is complete. If we're not in control of simulation # (that is, if we're a slave simulator acting as a component in another # 'master' simulator) then we're done here. The other simulator will # call simulate() directly. --initialize-only is used to indicate this. if options.initialize_only: return # Handle the max tick settings now that tick frequency was resolved # during system instantiation # NOTE: the maxtick variable here is in absolute ticks, so it must # include any simulated ticks before a checkpoint explicit_maxticks = 0 maxtick_from_abs = m5.MaxTick maxtick_from_rel = m5.MaxTick maxtick_from_maxtime = m5.MaxTick if options.abs_max_tick: maxtick_from_abs = options.abs_max_tick explicit_maxticks += 1 if options.rel_max_tick: maxtick_from_rel = options.rel_max_tick if options.checkpoint_restore: # NOTE: this may need to be updated if checkpoints ever store # the ticks per simulated second maxtick_from_rel += cpt_starttick if options.at_instruction or options.simpoint: warn("Relative max tick specified with --at-instruction or" \ " --simpoint\n These options don't specify the " \ "checkpoint start tick, so assuming\n you mean " \ "absolute max tick") explicit_maxticks += 1 if options.maxtime: maxtick_from_maxtime = m5.ticks.fromSeconds(options.maxtime) explicit_maxticks += 1 if explicit_maxticks > 1: warn("Specified multiple of --abs-max-tick, --rel-max-tick, --maxtime."\ " Using least") maxtick = min([maxtick_from_abs, maxtick_from_rel, maxtick_from_maxtime]) if options.checkpoint_restore != None and maxtick < cpt_starttick: fatal("Bad maxtick (%d) specified: " \ "Checkpoint starts starts from tick: %d", maxtick, cpt_starttick) if options.standard_switch or cpu_class: if options.standard_switch: print "Switch at instruction count:%s" % \ str(testsys.cpu[0].max_insts_any_thread) exit_event = m5.simulate() elif cpu_class and options.fast_forward: print "Switch at instruction count:%s" % \ str(testsys.cpu[0].max_insts_any_thread) exit_event = m5.simulate() else: print "Switch at curTick count:%s" % str(10000) exit_event = m5.simulate(10000) print "Switched CPUS @ tick %s" % (m5.curTick()) m5.switchCpus(testsys, switch_cpu_list) if options.standard_switch: print "Switch at instruction count:%d" % \ (testsys.switch_cpus[0].max_insts_any_thread) #warmup instruction count may have already been set if options.warmup_insts: exit_event = m5.simulate() else: exit_event = m5.simulate(options.standard_switch) print "Switching CPUS @ tick %s" % (m5.curTick()) print "Simulation ends instruction count:%d" % \ (testsys.switch_cpus_1[0].max_insts_any_thread) m5.switchCpus(testsys, switch_cpu_list1) # If we're taking and restoring checkpoints, use checkpoint_dir # option only for finding the checkpoints to restore from. This # lets us test checkpointing by restoring from one set of # checkpoints, generating a second set, and then comparing them. if (options.take_checkpoints or options.take_simpoint_checkpoints) \ and options.checkpoint_restore: if m5.options.outdir: cptdir = m5.options.outdir else: cptdir = getcwd() if options.take_checkpoints != None : # Checkpoints being taken via the command line at <when> and at # subsequent periods of <period>. Checkpoint instructions # received from the benchmark running are ignored and skipped in # favor of command line checkpoint instructions. exit_event = scriptCheckpoints(options, maxtick, cptdir) # Take SimPoint checkpoints elif options.take_simpoint_checkpoints != None: takeSimpointCheckpoints(simpoints, interval_length, cptdir) # Restore from SimPoint checkpoints elif options.restore_simpoint_checkpoint != None: restoreSimpointCheckpoint() else: if options.fast_forward: m5.stats.reset() print "**** REAL SIMULATION ****" # If checkpoints are being taken, then the checkpoint instruction # will occur in the benchmark code it self. if options.repeat_switch and maxtick > options.repeat_switch: exit_event = repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, options.repeat_switch) else: exit_event = benchCheckpoints(options, maxtick, cptdir) print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()) if options.checkpoint_at_end: m5.checkpoint(joinpath(cptdir, "cpt.%d")) if not m5.options.interactive: sys.exit(exit_event.getCode())
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'PyConTutorialProposal.cte_tutorial_id' db.add_column(u'pycon_pycontutorialproposal', 'cte_tutorial_id', self.gf('django.db.models.fields.CharField')(default='', max_length=150, blank=True), keep_default=False) # Adding field 'PyConTutorialProposal.max_attendees' db.add_column(u'pycon_pycontutorialproposal', 'max_attendees', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'PyConTutorialProposal.cte_tutorial_id' db.delete_column(u'pycon_pycontutorialproposal', 'cte_tutorial_id') # Deleting field 'PyConTutorialProposal.max_attendees' db.delete_column(u'pycon_pycontutorialproposal', 'max_attendees') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'conference.conference': { 'Meta': {'object_name': 'Conference'}, 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'conference.section': { 'Meta': {'object_name': 'Section'}, 'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'proposals.additionalspeaker': { 'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}), 'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, u'proposals.proposalbase': { 'Meta': {'object_name': 'ProposalBase'}, 'abstract': ('django.db.models.fields.TextField', [], {}), 'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}), 'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}), 'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}), 'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'proposals.proposalkind': { 'Meta': {'object_name': 'ProposalKind'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, u'pycon.pyconlightningtalkproposal': { 'Meta': {'object_name': 'PyConLightningTalkProposal'}, 'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'audience_level': ('django.db.models.fields.IntegerField', [], {}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}), 'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}), 'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'pycon.pyconposterproposal': { 'Meta': {'object_name': 'PyConPosterProposal'}, 'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'audience_level': ('django.db.models.fields.IntegerField', [], {}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}), 'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}), 'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'pycon.pyconproposalcategory': { 'Meta': {'object_name': 'PyConProposalCategory'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, u'pycon.pyconsponsortutorialproposal': { 'Meta': {'object_name': 'PyConSponsorTutorialProposal', '_ormbases': [u'proposals.ProposalBase']}, u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}) }, u'pycon.pycontalkproposal': { 'Meta': {'object_name': 'PyConTalkProposal'}, 'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'audience_level': ('django.db.models.fields.IntegerField', [], {}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}), 'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'duration': ('django.db.models.fields.IntegerField', [], {}), 'outline': ('django.db.models.fields.TextField', [], {}), 'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '400'}), u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}), 'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'pycon.pycontutorialproposal': { 'Meta': {'object_name': 'PyConTutorialProposal'}, 'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'audience_level': ('django.db.models.fields.IntegerField', [], {}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}), 'cte_tutorial_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}), 'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'domain_level': ('django.db.models.fields.IntegerField', [], {}), 'handout': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'max_attendees': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'more_info': ('django.db.models.fields.TextField', [], {}), 'outline': ('django.db.models.fields.TextField', [], {}), 'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}), u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}), 'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'registrants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'speakers.speaker': { 'Meta': {'object_name': 'Speaker'}, 'annotation': ('django.db.models.fields.TextField', [], {}), 'biography': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}), 'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}) }, u'taggit.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}) } } complete_apps = ['pycon']
import os, time import json from datetime import datetime, tzinfo from django.core.exceptions import MultipleObjectsReturned from django.conf import settings from django.contrib.auth.models import User, Group from rest_framework.decorators import detail_route from rest_framework import viewsets from rest_framework import permissions from rest_framework import status from rest_framework.response import Response from rest_framework.parsers import FormParser from rest_framework.parsers import MultiPartParser from .serializers import * from .models import * from allauth.account.adapter import get_adapter from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.oauth2.client import OAuth2Client from rest_auth.registration.views import SocialLoginView from rest_framework.views import APIView from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.views import LoginView from rest_auth.social_serializers import TwitterLoginSerializer from django.http import HttpResponse, HttpResponseRedirect import requests from requests_oauthlib import OAuth1 from urlparse import parse_qs from requests_oauthlib import OAuth2Session from requests_oauthlib.compliance_fixes import facebook_compliance_fix from oauthlib.oauth2 import BackendApplicationClient from oauthlib.oauth2 import WebApplicationClient from janitor import * from buckets import facebook from buckets import twitter class UpdatePassword(APIView): """ An endpoint for changing password. """ permission_classes = (permissions.IsAuthenticated, ) def get_object(self, queryset=None): return self.request.user def put(self, request, *args, **kwargs): self.object = self.get_object() serializer = ChangePasswordSerializer(data=request.data) if serializer.is_valid(): # Check old password old_password = serializer.data.get("old_password") if not self.object.check_password(old_password): return Response({"old_password": ["Wrong password."]}, status=status.HTTP_400_BAD_REQUEST) # set_password also hashes the password that the user will get self.object.set_password(serializer.data.get("new_password")) self.object.save() return Response({'message': 'success'}, status=status.HTTP_200_OK) #Response(status=status.HTTP_204_NO_CONTENT) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class FacebookLogin(SocialLoginView): """ Facebook login for system user accounts """ adapter_class = FacebookOAuth2Adapter client_class = OAuth2Client callback_url = os.environ["OAUTH2_REDIRECT_URI"] + "?chan=facebook" serializer_class = SocialLoginSerializer def process_login(self): get_adapter(self.request).login(self.request, self.user) class TwitterLogin(LoginView): adapter_class = TwitterOAuthAdapter #client_class = OAuth2Client #callback_url = os.environ["OAUTH2_REDIRECT_URI"] + "?chan=facebook" serializer_class = TwitterLoginSerializer def process_login(self): get_adapter(self.request).login(self.request, self.user) class UserViewSet(viewsets.ModelViewSet): """ Allows system users to be viewed or edited. """ queryset = User.objects.all().order_by('-date_joined') serializer_class = UserSerializer lookup_field = 'username' permission_classes = (permissions.IsAuthenticatedOrReadOnly,) class PostViewSet(viewsets.ModelViewSet): """ Scheduled posts """ permission_classes = (permissions.IsAuthenticatedOrReadOnly,) queryset = Post.objects.all() serializer_class = PostSerializer class PhaseViewSet(viewsets.ModelViewSet): """ Topics, these belong to one or many projects """ queryset = Phase.objects.all() serializer_class = PhaseSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) # def update(self, request, *args, **kwargs): # #def update(self, request, pk=None): # pk=kwargs['pk'] # print('Update phase id ='+pk) # #print('ob j =',getattr(self.get_object(),'description')) # instance = self.get_object() # serializer = self.get_serializer(instance, data=request.data, partial=False) # serializer.is_valid(raise_exception=True) # serializer.save() # return Response(status=status.HTTP_200_OK, statusText='success') class CampaignViewSet(viewsets.ModelViewSet): """ Marketing projects or campaignsi, these hold many topics """ queryset = Campaign.objects.all() serializer_class = CampaignSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) class SocialAccountViewSet(viewsets.ModelViewSet): """ Managed social accounts """ queryset = SocialAccount.objects.all() serializer_class = SocialAccountSerializer permission_classes = (permissions.AllowAny,) class SocialGroupViewSet(viewsets.ModelViewSet): """ Managed social accounts """ queryset = SocialGroup.objects.all() serializer_class = SocialGroupSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) class SocialAccountGroupViewSet(viewsets.ModelViewSet): """ Groups of social accounts """ queryset = SocialAccountGroup.objects.all() serializer_class = SocialAccountGroupSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) @detail_route() def search(self, request, *args, **kwargs): ag = self.filter(name="algo") class BucketViewSet(viewsets.ModelViewSet): """ Marketing channels registry """ queryset = Bucket.objects.all() serializer_class = BucketSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) class StaticPageViewSet(viewsets.ModelViewSet): """ Customizable system static pages """ queryset = StaticPage.objects.all() serializer_class = StaticPageSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) class ImageStoreViewSet(viewsets.ModelViewSet): """ Image store vireset """ queryset = ImageStore.objects.all() serializer_class = ImageStoreSerializer parser_classes = (FormParser, MultiPartParser) permission_classes = (permissions.IsAuthenticatedOrReadOnly,) def perform_create(self, serializer): serializer.save(image = self.request.data.get('image')) class PagesTokenViewSet(viewsets.ModelViewSet): """ Customizable system static pages """ queryset = PagesToken.objects.all() serializer_class = PagesTokenSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) def twitter_auth(request): """ Twitter login of Follower accounts only """ bucket = twitter.Twitter() oauth = bucket.get_oauthsession() url = oauth.get_authorization_url() request.session['request_token'] = oauth.request_token return HttpResponseRedirect(url) def callback(request): """ Generic (perhaps) callback endpoint for any social network when registering Follower accounts. """ # Set to 0 for production, 1 is for development only os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' # Only process if a chan is present if 'chan' in request.GET: user = None bucket = None ## Facebook callback handling if (request.GET.get("chan") == "facebook"): bucket = facebook.Facebook() elif (request.GET.get("chan") == "twitter"): bucket = twitter.Twitter() request_token = request.session['request_token'] del request.session['request_token'] bucket.request_token = request_token bucket.verifier = request.GET.get('oauth_verifier') oauth = bucket.get_oauth2session() token = bucket.get_token(request.get_full_path()) user = bucket.get_user_detail() if user is not None: # Check for existing social account before save try: obj, created = SocialAccount.objects.get_or_create( bucket_id = user["id"], bucket = bucket.tagname, defaults = { 'username': user["name"], 'email': user["email"], 'image_link': user["image"], 'access_token': token, #json.JSONEncoder().encode(token), 'token_expiration': datetime.fromtimestamp(token["expires_in"]) if "expires_in" in token else None, } ) if(created is False): obj.access_token = token obj.token_expiration = datetime.fromtimestamp(token["expires_in"]) if "expires_in" in token else None parameter="?action=success" except MultipleObjectsReturned: # TODO do the logging thing parameter="?action=error" # social_account = SocialAccount( # username = user["name"], # email = user["email"], # bucket_id = user["id"], # image_link = user["image"], # access_token = token, #json.JSONEncoder().encode(token), # token_expiration = datetime.fromtimestamp(token["expires_in"]) # if "expires_in" in token else None, # bucket = bucket.tagname) # social_account.save() elif 'login' in request.GET: response = HttpResponse('<script type="text/javascript">window.close(); </script>') if 'denied' in request.GET: response.set_cookie('tw_denied', 'Access Denied', domain=settings.SESSION_COOKIE_DOMAIN) else: request_key = request.session['tw_request_token_key'] request_secret = request.session['tw_request_token_secret'] del request.session['tw_request_token_key'] del request.session['tw_request_token_secret'] token_verifier = request.GET.get('oauth_verifier') oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request_key, resource_owner_secret=request_secret, verifier=token_verifier, ) r = requests.post(url=settings.TWITTER_ACCESS_TOKEN_URL, auth=oauth) credentials = parse_qs(r.content) #response.set_cookie('tw_response', r.text, domain=settings.SESSION_COOKIE_DOMAIN) response.set_cookie('tw_access_token', credentials.get('oauth_token')[0], domain=settings.SESSION_COOKIE_DOMAIN) response.set_cookie('tw_access_token_secret', credentials.get('oauth_token_secret')[0], domain=settings.SESSION_COOKIE_DOMAIN) response.set_cookie('tw_bucket_id', credentials.get('user_id')[0], domain=settings.SESSION_COOKIE_DOMAIN) return response else: parameter="?action=error" return HttpResponseRedirect(os.environ["SEMITKI_LANDING"]+parameter) def publish_now(request, pk): """ Publish a new post """ staff = False page = False if ("staff" in request.GET): staff = True if ("page" in request.GET): page = True return HttpResponse(stuff_it(pk, staff, page)) def fb_exchange_token(request): """ DEPRECATED Exchange a short lived facebook token for a long lived one """ fb = facebook.Facebook() r = requests.get(fb.token_url + '?grant_type=fb_exchange_token' + '&client_id=' + settings.SOCIAL_AUTH_FACEBOOK_KEY + '&client_secret=' + settings.SOCIAL_AUTH_FACEBOOK_SECRET + '&fb_exchange_token=' + request.GET['access_token']) return HttpResponse(r.text) def tw_request_token(request): ts = str(int(time.time())) oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, timestamp = ts) payload = {'oauth_callback': os.environ["OAUTH2_REDIRECT_URI"]+"?login=twitter"} r = requests.post(url=settings.TWITTER_REQUEST_TOKEN_URL, auth=oauth, params=payload) h = r.headers out = r.text credentials = parse_qs(r.content) request.session['tw_request_token_key'] = credentials.get('oauth_token')[0] request.session['tw_request_token_secret'] = credentials.get('oauth_token_secret')[0] authorize_url = settings.TWITTER_AUTHENTICATE_URL + credentials.get('oauth_token')[0] return HttpResponseRedirect(authorize_url)
#!/usr/bin/python3 # Copyright 2014 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`base` --- SCION certificate server ======================================== """ # Stdlib import datetime import logging import threading # External packages from nacl.exceptions import CryptoError from prometheus_client import Counter, Gauge # SCION from external.expiring_dict import ExpiringDict from lib.crypto.asymcrypto import get_enc_key, get_sig_key from lib.crypto.certificate_chain import CertificateChain, verify_sig_chain_trc from lib.crypto.trc import TRC from lib.crypto.symcrypto import crypto_hash from lib.crypto.symcrypto import kdf from lib.defines import CERTIFICATE_SERVICE, GEN_CACHE_PATH from lib.drkey.drkey_mgmt import ( DRKeyMgmt, DRKeyReply, DRKeyRequest, ) from lib.drkey.suite import ( decrypt_drkey, drkey_signing_input_req, get_drkey_reply, get_drkey_request, get_signing_input_rep, ) from lib.drkey.types import DRKeySecretValue, FirstOrderDRKey from lib.drkey.util import drkey_time, get_drkey_exp_time from lib.errors import SCIONVerificationError from lib.main import main_default, main_wrapper from lib.packet.cert_mgmt import ( CertMgmt, CertChainReply, CertChainRequest, TRCReply, TRCRequest, ) from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id from lib.packet.svc import SVCType from lib.requests import RequestHandler from lib.thread import thread_safety_net from lib.types import ( CertMgmtType, DRKeyMgmtType, PayloadClass, ) from lib.util import ( SCIONTime, sleep_interval, ) from lib.zk.cache import ZkSharedCache from lib.zk.errors import ZkNoConnection from lib.zk.id import ZkID from lib.zk.zk import ZK_LOCK_SUCCESS, Zookeeper from scion_elem.scion_elem import SCIONElement # Exported metrics. REQS_TOTAL = Counter("cs_requests_total", "# of total requests", ["server_id", "isd_as", "type"]) IS_MASTER = Gauge("cs_is_master", "true if this process is the replication master", ["server_id", "isd_as"]) # Max amount of DRKey secret values. 1 current, 1 prefetch, 1 buffer. DRKEY_MAX_SV = 3 # Max TTL of first order DRKey. 1 Day prefetch, 1 Day current. DRKEY_MAX_TTL = datetime.timedelta(days=2).total_seconds() # Max number of stored first order DRKeys DRKEY_MAX_KEYS = 10**6 # Timeout for first order DRKey requests DRKEY_REQUEST_TIMEOUT = 5 class CertServer(SCIONElement): """ The SCION Certificate Server. """ SERVICE_TYPE = CERTIFICATE_SERVICE # ZK path for incoming cert chains ZK_CC_CACHE_PATH = "cert_chain_cache" # ZK path for incoming TRCs ZK_TRC_CACHE_PATH = "trc_cache" ZK_DRKEY_PATH = "drkey_cache" def __init__(self, server_id, conf_dir, spki_cache_dir=GEN_CACHE_PATH, prom_export=None): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param str prom_export: prometheus export address. """ super().__init__(server_id, conf_dir, spki_cache_dir=spki_cache_dir, prom_export=prom_export) self.config = self._load_as_conf() cc_labels = {**self._labels, "type": "cc"} if self._labels else None trc_labels = {**self._labels, "type": "trc"} if self._labels else None drkey_labels = {**self._labels, "type": "drkey"} if self._labels else None self.cc_requests = RequestHandler.start( "CC Requests", self._check_cc, self._fetch_cc, self._reply_cc, labels=cc_labels, ) self.trc_requests = RequestHandler.start( "TRC Requests", self._check_trc, self._fetch_trc, self._reply_trc, labels=trc_labels, ) self.drkey_protocol_requests = RequestHandler.start( "DRKey Requests", self._check_drkey, self._fetch_drkey, self._reply_proto_drkey, labels=drkey_labels, ) self.CTRL_PLD_CLASS_MAP = { PayloadClass.CERT: { CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request, CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply, CertMgmtType.TRC_REQ: self.process_trc_request, CertMgmtType.TRC_REPLY: self.process_trc_reply, }, PayloadClass.DRKEY: { DRKeyMgmtType.FIRST_ORDER_REQUEST: self.process_drkey_request, DRKeyMgmtType.FIRST_ORDER_REPLY: self.process_drkey_reply, }, } zkid = ZkID.from_values(self.addr.isd_as, self.id, [(self.addr.host, self._port)]).pack() self.zk = Zookeeper(self.topology.isd_as, CERTIFICATE_SERVICE, zkid, self.topology.zookeepers) self.zk.retry("Joining party", self.zk.party_setup) self.trc_cache = ZkSharedCache(self.zk, self.ZK_TRC_CACHE_PATH, self._cached_trcs_handler) self.cc_cache = ZkSharedCache(self.zk, self.ZK_CC_CACHE_PATH, self._cached_certs_handler) self.drkey_cache = ZkSharedCache(self.zk, self.ZK_DRKEY_PATH, self._cached_drkeys_handler) self.signing_key = get_sig_key(self.conf_dir) self.private_key = get_enc_key(self.conf_dir) self.drkey_secrets = ExpiringDict(DRKEY_MAX_SV, DRKEY_MAX_TTL) self.first_order_drkeys = ExpiringDict(DRKEY_MAX_KEYS, DRKEY_MAX_TTL) def worker(self): """ Worker thread that takes care of reading shared entries from ZK, and handling master election. """ worker_cycle = 1.0 start = SCIONTime.get_time() while self.run_flag.is_set(): sleep_interval(start, worker_cycle, "CS.worker cycle", self._quiet_startup()) start = SCIONTime.get_time() # Update IS_MASTER metric. if self._labels: IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock())) try: self.zk.wait_connected() self.trc_cache.process() self.cc_cache.process() self.drkey_cache.process() # Try to become a master. ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0) if ret: # Either got the lock, or already had it. if ret == ZK_LOCK_SUCCESS: logging.info("Became master") self.trc_cache.expire(worker_cycle * 10) self.cc_cache.expire(worker_cycle * 10) self.drkey_cache.expire(worker_cycle * 10) except ZkNoConnection: logging.warning('worker(): ZkNoConnection') pass def _cached_trcs_handler(self, raw_entries): """ Handles cached (through ZK) TRCs, passed as a list. """ for raw in raw_entries: trc = TRC.from_raw(raw.decode('utf-8')) rep = CtrlPayload(CertMgmt(TRCReply.from_values(trc))) self.process_trc_reply(rep, None, from_zk=True) if len(raw_entries) > 0: logging.debug("Processed %s trcs from ZK", len(raw_entries)) def _cached_certs_handler(self, raw_entries): """ Handles cached (through ZK) chains, passed as a list. """ for raw in raw_entries: cert = CertificateChain.from_raw(raw.decode('utf-8')) rep = CtrlPayload(CertMgmt(CertChainReply.from_values(cert))) self.process_cert_chain_reply(rep, None, from_zk=True) if len(raw_entries) > 0: logging.debug("Processed %s certs from ZK", len(raw_entries)) def _cached_drkeys_handler(self, raw_entries): for raw in raw_entries: msg = CtrlPayload(DRKeyMgmt(DRKeyReply.from_raw(raw))) self.process_drkey_reply(msg, None, from_zk=True) def _share_object(self, pld, is_trc): """ Share path segments (via ZK) with other path servers. """ pld_packed = pld.pack() pld_hash = crypto_hash(pld_packed).hex() try: if is_trc: self.trc_cache.store("%s-%s" % (pld_hash, SCIONTime.get_time()), pld_packed) else: self.cc_cache.store("%s-%s" % (pld_hash, SCIONTime.get_time()), pld_packed) except ZkNoConnection: logging.warning("Unable to store %s in shared path: " "no connection to ZK" % "TRC" if is_trc else "CC") return logging.debug("%s stored in ZK: %s" % ("TRC" if is_trc else "CC", pld_hash)) def process_cert_chain_request(self, cpld, meta): """Process a certificate chain request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, CertChainRequest), type(req) key = req.isd_as(), req.p.version logging.info("Cert chain request received for %sv%s from %s", *key, meta) REQS_TOTAL.labels(**self._labels, type="cc").inc() local = meta.ia == self.addr.isd_as if not self._check_cc(key): if not local: logging.warning( "Dropping CC request from %s for %sv%s: " "CC not found && requester is not local)", meta, *key) else: self.cc_requests.put((key, (meta, req, cpld.req_id))) return self._reply_cc(key, (meta, req, cpld.req_id)) def process_cert_chain_reply(self, cpld, meta, from_zk=False): """Process a certificate chain reply.""" cmgt = cpld.union rep = cmgt.union assert isinstance(rep, CertChainReply), type(rep) ia_ver = rep.chain.get_leaf_isd_as_ver() logging.info("Cert chain reply received for %sv%s (ZK: %s)" % (ia_ver[0], ia_ver[1], from_zk)) self.trust_store.add_cert(rep.chain) if not from_zk: self._share_object(rep.chain, is_trc=False) # Reply to all requests for this certificate chain self.cc_requests.put((ia_ver, None)) def _check_cc(self, key): isd_as, ver = key ver = None if ver == CertChainRequest.NEWEST_VERSION else ver cert_chain = self.trust_store.get_cert(isd_as, ver) if cert_chain: return True logging.debug('Cert chain not found for %sv%s', *key) return False def _fetch_cc(self, key, req_info): # Do not attempt to fetch the CertChain from a remote AS if the cacheOnly flag is set. _, orig_req, _ = req_info if orig_req.p.cacheOnly: return self._send_cc_request(*key) def _send_cc_request(self, isd_as, ver): req = CertChainRequest.from_values(isd_as, ver, cache_only=True) path_meta = self._get_path_via_sciond(isd_as) if path_meta: meta = self._build_meta(isd_as, host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(CertMgmt(req), req_id=req_id), meta) logging.info("Cert chain request sent to %s via [%s]: %s [id: %016x]", meta, path_meta.short_desc(), req.short_desc(), req_id) else: logging.warning("Cert chain request (for %s) not sent: " "no path found", req.short_desc()) def _reply_cc(self, key, req_info): isd_as, ver = key ver = None if ver == CertChainRequest.NEWEST_VERSION else ver meta = req_info[0] req_id = req_info[2] cert_chain = self.trust_store.get_cert(isd_as, ver) self.send_meta( CtrlPayload(CertMgmt(CertChainReply.from_values(cert_chain)), req_id=req_id), meta) logging.info("Cert chain for %sv%s sent to %s [id: %016x]", isd_as, ver, meta, req_id) def process_trc_request(self, cpld, meta): """Process a TRC request.""" cmgt = cpld.union req = cmgt.union assert isinstance(req, TRCRequest), type(req) key = req.isd_as()[0], req.p.version logging.info("TRC request received for %sv%s from %s [id: %s]", *key, meta, cpld.req_id_str()) REQS_TOTAL.labels(**self._labels, type="trc").inc() local = meta.ia == self.addr.isd_as if not self._check_trc(key): if not local: logging.warning( "Dropping TRC request from %s for %sv%s: " "TRC not found && requester is not local)", meta, *key) else: self.trc_requests.put((key, (meta, req, cpld.req_id))) return self._reply_trc(key, (meta, req, cpld.req_id)) def process_trc_reply(self, cpld, meta, from_zk=False): """ Process a TRC reply. :param trc_rep: TRC reply. :type trc_rep: TRCReply """ cmgt = cpld.union trc_rep = cmgt.union assert isinstance(trc_rep, TRCReply), type(trc_rep) isd, ver = trc_rep.trc.get_isd_ver() logging.info("TRCReply received for ISD %sv%s, ZK: %s [id: %s]", isd, ver, from_zk, cpld.req_id_str()) self.trust_store.add_trc(trc_rep.trc) if not from_zk: self._share_object(trc_rep.trc, is_trc=True) # Reply to all requests for this TRC self.trc_requests.put(((isd, ver), None)) def _check_trc(self, key): isd, ver = key ver = None if ver == TRCRequest.NEWEST_VERSION else ver trc = self.trust_store.get_trc(isd, ver) if trc: return True logging.debug('TRC not found for %sv%s', *key) return False def _fetch_trc(self, key, req_info): # Do not attempt to fetch the TRC from a remote AS if the cacheOnly flag is set. _, orig_req, _ = req_info if orig_req.p.cacheOnly: return self._send_trc_request(*key) def _send_trc_request(self, isd, ver): trc_req = TRCRequest.from_values(isd, ver, cache_only=True) path_meta = self._get_path_via_sciond(trc_req.isd_as()) if path_meta: meta = self._build_meta( path_meta.dst_ia(), host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(CertMgmt(trc_req), req_id=req_id), meta) logging.info("TRC request sent to %s via [%s]: %s [id: %016x]", meta, path_meta.short_desc(), trc_req.short_desc(), req_id) else: logging.warning("TRC request not sent for %s: no path found.", trc_req.short_desc()) def _reply_trc(self, key, req_info): isd, ver = key ver = None if ver == TRCRequest.NEWEST_VERSION else ver meta = req_info[0] req_id = req_info[2] trc = self.trust_store.get_trc(isd, ver) self.send_meta(CtrlPayload(CertMgmt(TRCReply.from_values(trc)), req_id=req_id), meta) logging.info("TRC for %sv%s sent to %s [id: %016x]", isd, ver, meta, req_id) def process_drkey_request(self, cpld, meta): """ Process first order DRKey requests from other ASes. :param DRKeyRequest req: the DRKey request :param UDPMetadata meta: the metadata """ dpld = cpld.union req = dpld.union assert isinstance(req, DRKeyRequest), type(req) logging.info("DRKeyRequest received from %s: %s [id: %s]", meta, req.short_desc(), cpld.req_id_str()) REQS_TOTAL.labels(**self._labels, type="drkey").inc() try: cert = self._verify_drkey_request(req, meta) except SCIONVerificationError as e: logging.warning("Invalid DRKeyRequest from %s. Reason %s: %s", meta, e, req.short_desc()) return sv = self._get_drkey_secret(get_drkey_exp_time(req.p.flags.prefetch)) cert_version = self.trust_store.get_cert(self.addr.isd_as).certs[0].version trc_version = self.trust_store.get_trc(self.addr.isd_as[0]).version rep = get_drkey_reply(sv, self.addr.isd_as, meta.ia, self.private_key, self.signing_key, cert_version, cert, trc_version) self.send_meta(CtrlPayload(DRKeyMgmt(rep), req_id=cpld.req_id), meta) logging.info("DRKeyReply sent to %s: %s [id: %s]", meta, req.short_desc(), cpld.req_id_str()) def _verify_drkey_request(self, req, meta): """ Verify that the first order DRKey request is legit. I.e. the signature is valid, the correct ISD AS is queried, timestamp is recent. :param DRKeyRequest req: the first order DRKey request. :param UDPMetadata meta: the metadata. :returns Certificate of the requester. :rtype: Certificate :raises: SCIONVerificationError """ if self.addr.isd_as != req.isd_as: raise SCIONVerificationError("Request for other ISD-AS: %s" % req.isd_as) if drkey_time() - req.p.timestamp > DRKEY_REQUEST_TIMEOUT: raise SCIONVerificationError("Expired request from %s. %ss old. Max %ss" % ( meta.ia, drkey_time() - req.p.timestamp, DRKEY_REQUEST_TIMEOUT)) trc = self.trust_store.get_trc(meta.ia[0]) chain = self.trust_store.get_cert(meta.ia, req.p.certVer) err = [] if not chain: self._send_cc_request(meta.ia, req.p.certVer) err.append("Certificate not present for %s(v: %s)" % (meta.ia, req.p.certVer)) if not trc: self._send_trc_request(meta.ia[0], req.p.trcVer) err.append("TRC not present for %s(v: %s)" % (meta.ia[0], req.p.trcVer)) if err: raise SCIONVerificationError(", ".join(err)) raw = drkey_signing_input_req(req.isd_as, req.p.flags.prefetch, req.p.timestamp) try: verify_sig_chain_trc(raw, req.p.signature, meta.ia, chain, trc) except SCIONVerificationError as e: raise SCIONVerificationError(str(e)) return chain.certs[0] def process_drkey_reply(self, cpld, meta, from_zk=False): """ Process first order DRKey reply from other ASes. :param DRKeyReply rep: the received DRKey reply :param UDPMetadata meta: the metadata :param Bool from_zk: if the reply has been received from Zookeeper """ dpld = cpld.union rep = dpld.union assert isinstance(rep, DRKeyReply), type(rep) logging.info("DRKeyReply received from %s: %s [id: %s]", meta, rep.short_desc(), cpld.req_id_str()) src = meta or "ZK" try: cert = self._verify_drkey_reply(rep, meta) raw = decrypt_drkey(rep.p.cipher, self.private_key, cert.subject_enc_key_raw) except SCIONVerificationError as e: logging.info("Invalid DRKeyReply from %s. Reason %s: %s", src, e, rep.short_desc()) return except CryptoError as e: logging.info("Unable to decrypt DRKeyReply from %s. Reason %s: %s", src, e, rep.short_desc()) return drkey = FirstOrderDRKey(rep.isd_as, self.addr.isd_as, rep.p.expTime, raw) self.first_order_drkeys[drkey] = drkey if not from_zk: pld_packed = rep.copy().pack() try: self.drkey_cache.store("%s-%s" % (rep.isd_as, rep.p.expTime), pld_packed) except ZkNoConnection: logging.warning("Unable to store DRKey for %s in shared path: " "no connection to ZK" % rep.isd_as) return self.drkey_protocol_requests.put((drkey, None)) def _verify_drkey_reply(self, rep, meta): """ Verify that the first order DRKey reply is legit. I.e. the signature matches, timestamp is recent. :param DRKeyReply rep: the first order DRKey reply. :param UDPMetadata meta: the metadata. :returns Certificate of the responder. :rtype: Certificate :raises: SCIONVerificationError """ if meta and meta.ia != rep.isd_as: raise SCIONVerificationError("Response from other ISD-AS: %s" % rep.isd_as) if drkey_time() - rep.p.timestamp > DRKEY_REQUEST_TIMEOUT: raise SCIONVerificationError("Expired reply from %s. %ss old. Max %ss" % ( rep.isd_as, drkey_time() - rep.p.timestamp, DRKEY_REQUEST_TIMEOUT)) trc = self.trust_store.get_trc(rep.isd_as[0]) chain = self.trust_store.get_cert(rep.isd_as, rep.p.certVerSrc) err = [] if not chain: self._send_cc_request(rep.isd_as, rep.p.certVerSrc) err.append("Certificate not present for %s(v: %s)" % (rep.isd_as, rep.p.certVerSrc)) if not trc: self._send_trc_request(rep.isd_as[0], rep.p.trcVer) err.append("TRC not present for %s(v: %s)" % (rep.isd_as[0], rep.p.trcVer)) if err: raise SCIONVerificationError(", ".join(err)) raw = get_signing_input_rep(rep.isd_as, rep.p.timestamp, rep.p.expTime, rep.p.cipher) try: verify_sig_chain_trc(raw, rep.p.signature, rep.isd_as, chain, trc) except SCIONVerificationError as e: raise SCIONVerificationError(str(e)) return chain.certs[0] def _check_drkey(self, drkey): """ Check if first order DRKey with the same (SrcIA, DstIA, expTime) is available. :param FirstOrderDRKey drkey: the searched DRKey. :returns: if the the first order DRKey is available. :rtype: Bool """ if drkey in self.first_order_drkeys: return True return False def _fetch_drkey(self, drkey, _): """ Fetch missing first order DRKey with the same (SrcIA, DstIA, expTime). :param FirstOrderDRKey drkey: The missing DRKey. """ cert = self.trust_store.get_cert(self.addr.isd_as) trc = self.trust_store.get_trc(self.addr.isd_as[0]) if not cert or not trc: logging.warning("DRKeyRequest for %s not sent. Own CertChain/TRC not present.", drkey.src_ia) return req = get_drkey_request(drkey.src_ia, False, self.signing_key, cert.certs[0].version, trc.version) path_meta = self._get_path_via_sciond(drkey.src_ia) if path_meta: meta = self._build_meta(drkey.src_ia, host=SVCType.CS_A, path=path_meta.fwd_path()) req_id = mk_ctrl_req_id() self.send_meta(CtrlPayload(DRKeyMgmt(req)), meta) logging.info("DRKeyRequest (%s) sent to %s via %s [id: %016x]", req.short_desc(), meta, path_meta, req_id) else: logging.warning("DRKeyRequest (for %s) not sent", req.short_desc()) def _reply_proto_drkey(self, drkey, meta): pass # TODO(roosd): implement in future PR def _get_drkey_secret(self, exp_time): """ Get the drkey secret. A new secret is initialized if no secret is found. :param int exp_time: expiration time of the drkey secret :return: the according drkey secret :rtype: DRKeySecretValue """ sv = self.drkey_secrets.get(exp_time) if not sv: sv = DRKeySecretValue(kdf(self.config.master_as_key, b"Derive DRKey Key"), exp_time) self.drkey_secrets[sv.exp_time] = sv return sv def _init_metrics(self): super()._init_metrics() for type_ in ("trc", "cc", "drkey"): REQS_TOTAL.labels(**self._labels, type=type_).inc(0) IS_MASTER.labels(**self._labels).set(0) def run(self): """ Run an instance of the Cert Server. """ threading.Thread( target=thread_safety_net, args=(self.worker,), name="CS.worker", daemon=True).start() super().run() if __name__ == "__main__": main_wrapper(main_default, CertServer)
import copy import warnings from django.conf import compat_patch_logging_config from django.core import mail from django.test import TestCase, RequestFactory from django.test.utils import override_settings from django.utils.log import CallbackFilter, RequireDebugFalse, getLogger # logging config prior to using filter with mail_admins OLD_LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } class PatchLoggingConfigTest(TestCase): """ Tests for backward-compat shim for #16288. These tests should be removed in Django 1.6 when that shim and DeprecationWarning are removed. """ def test_filter_added(self): """ Test that debug-false filter is added to mail_admins handler if it has no filters. """ config = copy.deepcopy(OLD_LOGGING) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") compat_patch_logging_config(config) self.assertEqual(len(w), 1) self.assertEqual( config["handlers"]["mail_admins"]["filters"], ['require_debug_false']) def test_filter_configuration(self): """ Test that the auto-added require_debug_false filter is an instance of `RequireDebugFalse` filter class. """ config = copy.deepcopy(OLD_LOGGING) with warnings.catch_warnings(record=True): compat_patch_logging_config(config) flt = config["filters"]["require_debug_false"] self.assertEqual(flt["()"], "django.utils.log.RequireDebugFalse") def test_require_debug_false_filter(self): """ Test the RequireDebugFalse filter class. """ filter_ = RequireDebugFalse() with self.settings(DEBUG=True): self.assertEqual(filter_.filter("record is not used"), False) with self.settings(DEBUG=False): self.assertEqual(filter_.filter("record is not used"), True) def test_no_patch_if_filters_key_exists(self): """ Test that the logging configuration is not modified if the mail_admins handler already has a "filters" key. """ config = copy.deepcopy(OLD_LOGGING) config["handlers"]["mail_admins"]["filters"] = [] new_config = copy.deepcopy(config) compat_patch_logging_config(new_config) self.assertEqual(config, new_config) def test_no_patch_if_no_mail_admins_handler(self): """ Test that the logging configuration is not modified if the mail_admins handler is not present. """ config = copy.deepcopy(OLD_LOGGING) config["handlers"].pop("mail_admins") new_config = copy.deepcopy(config) compat_patch_logging_config(new_config) self.assertEqual(config, new_config) class CallbackFilterTest(TestCase): def test_sense(self): f_false = CallbackFilter(lambda r: False) f_true = CallbackFilter(lambda r: True) self.assertEqual(f_false.filter("record"), False) self.assertEqual(f_true.filter("record"), True) def test_passes_on_record(self): collector = [] def _callback(record): collector.append(record) return True f = CallbackFilter(_callback) f.filter("a record") self.assertEqual(collector, ["a record"]) class AdminEmailHandlerTest(TestCase): def get_admin_email_handler(self, logger): # Inspired from regressiontests/views/views.py: send_log() # ensuring the AdminEmailHandler does not get filtered out # even with DEBUG=True. admin_email_handler = [ h for h in logger.handlers if h.__class__.__name__ == "AdminEmailHandler" ][0] return admin_email_handler @override_settings( ADMINS=(('whatever admin', 'admin@example.com'),), EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-' ) def test_accepts_args(self): """ Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX setting are used to compose the email subject. Refs #16736. """ message = "Custom message that says '%s' and '%s'" token1 = 'ping' token2 = 'pong' logger = getLogger('django.request') admin_email_handler = self.get_admin_email_handler(logger) # Backup then override original filters orig_filters = admin_email_handler.filters try: admin_email_handler.filters = [] logger.error(message, token1, token2) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['admin@example.com']) self.assertEqual(mail.outbox[0].subject, "-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'") finally: # Restore original filters admin_email_handler.filters = orig_filters @override_settings( ADMINS=(('whatever admin', 'admin@example.com'),), EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-', INTERNAL_IPS=('127.0.0.1',), ) def test_accepts_args_and_request(self): """ Ensure that the subject is also handled if being passed a request object. """ message = "Custom message that says '%s' and '%s'" token1 = 'ping' token2 = 'pong' logger = getLogger('django.request') admin_email_handler = self.get_admin_email_handler(logger) # Backup then override original filters orig_filters = admin_email_handler.filters try: admin_email_handler.filters = [] rf = RequestFactory() request = rf.get('/') logger.error(message, token1, token2, extra={ 'status_code': 403, 'request': request, } ) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['admin@example.com']) self.assertEqual(mail.outbox[0].subject, "-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'") finally: # Restore original filters admin_email_handler.filters = orig_filters @override_settings( ADMINS=(('admin', 'admin@example.com'),), EMAIL_SUBJECT_PREFIX='', DEBUG=False, ) def test_subject_accepts_newlines(self): """ Ensure that newlines in email reports' subjects are escaped to avoid AdminErrorHandler to fail. Refs #17281. """ message = u'Message \r\n with newlines' expected_subject = u'ERROR: Message \\r\\n with newlines' self.assertEqual(len(mail.outbox), 0) logger = getLogger('django.request') logger.error(message) self.assertEqual(len(mail.outbox), 1) self.assertFalse('\n' in mail.outbox[0].subject) self.assertFalse('\r' in mail.outbox[0].subject) self.assertEqual(mail.outbox[0].subject, expected_subject) @override_settings( ADMINS=(('admin', 'admin@example.com'),), EMAIL_SUBJECT_PREFIX='', DEBUG=False, ) def test_truncate_subject(self): """ RFC 2822's hard limit is 998 characters per line. So, minus "Subject: ", the actual subject must be no longer than 989 characters. Refs #17281. """ message = 'a' * 1000 expected_subject = 'ERROR: aa' + 'a' * 980 self.assertEqual(len(mail.outbox), 0) logger = getLogger('django.request') logger.error(message) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, expected_subject)
#!/usr/bin/env python # # Copyright 2015-2015 breakwa11 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import os import sys import hashlib import logging import binascii import struct import base64 import datetime import random from shadowsocks import common from shadowsocks.obfsplugin import plain from shadowsocks.common import to_bytes, to_str, ord, chr def create_http_simple_obfs(method): return http_simple(method) def create_http_post_obfs(method): return http_post(method) def create_random_head_obfs(method): return random_head(method) obfs_map = { 'http_simple': (create_http_simple_obfs,), 'http_simple_compatible': (create_http_simple_obfs,), 'http_post': (create_http_post_obfs,), 'http_post_compatible': (create_http_post_obfs,), 'random_head': (create_random_head_obfs,), 'random_head_compatible': (create_random_head_obfs,), } def match_begin(str1, str2): if len(str1) >= len(str2): if str1[:len(str2)] == str2: return True return False class http_simple(plain.plain): def __init__(self, method): self.method = method self.has_sent_header = False self.has_recv_header = False self.host = None self.port = 0 self.recv_buffer = b'' self.user_agent = [b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0", b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/44.0", b"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36", b"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0", b"Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)", b"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", b"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)", b"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", b"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36", b"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3", b"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3"] def encode_head(self, buf): hexstr = binascii.hexlify(buf) chs = [] for i in range(0, len(hexstr), 2): chs.append(b"%" + hexstr[i:i+2]) return b''.join(chs) def client_encode(self, buf): if self.has_sent_header: return buf head_size = len(self.server_info.iv) + self.server_info.head_len if len(buf) - head_size > 64: headlen = head_size + random.randint(0, 64) else: headlen = len(buf) headdata = buf[:headlen] buf = buf[headlen:] port = b'' if self.server_info.port != 80: port = b':' + to_bytes(str(self.server_info.port)) body = None hosts = (self.server_info.obfs_param or self.server_info.host) pos = hosts.find("#") if pos >= 0: body = hosts[pos + 1:].replace("\\n", "\r\n") hosts = hosts[:pos] hosts = hosts.split(',') host = random.choice(hosts) http_head = b"GET /" + self.encode_head(headdata) + b" HTTP/1.1\r\n" http_head += b"Host: " + to_bytes(host) + port + b"\r\n" if body: http_head += body + "\r\n\r\n" else: http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n" http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nDNT: 1\r\nConnection: keep-alive\r\n\r\n" self.has_sent_header = True return http_head + buf def client_decode(self, buf): if self.has_recv_header: return (buf, False) pos = buf.find(b'\r\n\r\n') if pos >= 0: self.has_recv_header = True return (buf[pos + 4:], False) else: return (b'', False) def server_encode(self, buf): if self.has_sent_header: return buf header = b'HTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Encoding: gzip\r\nContent-Type: text/html\r\nDate: ' header += to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')) header += b'\r\nServer: nginx\r\nVary: Accept-Encoding\r\n\r\n' self.has_sent_header = True return header + buf def get_data_from_http_header(self, buf): ret_buf = b'' lines = buf.split(b'\r\n') if lines and len(lines) > 4: hex_items = lines[0].split(b'%') if hex_items and len(hex_items) > 1: for index in range(1, len(hex_items)): if len(hex_items[index]) < 2: ret_buf += binascii.unhexlify('0' + hex_items[index]) break elif len(hex_items[index]) > 2: ret_buf += binascii.unhexlify(hex_items[index][:2]) break else: ret_buf += binascii.unhexlify(hex_items[index]) return ret_buf return b'' def not_match_return(self, buf): self.has_sent_header = True self.has_recv_header = True if self.method == 'http_simple': return (b'E'*64, False, False) return (buf, True, False) def server_decode(self, buf): if self.has_recv_header: return (buf, True, False) self.recv_buffer += buf buf = self.recv_buffer if len(buf) > 10: if match_begin(buf, b'GET /') or match_begin(buf, b'POST /'): if len(buf) > 65536: self.recv_buffer = None logging.warn('http_simple: over size') return self.not_match_return(buf) else: #not http header, run on original protocol self.recv_buffer = None logging.debug('http_simple: not match begin') return self.not_match_return(buf) else: return (b'', True, False) if b'\r\n\r\n' in buf: datas = buf.split(b'\r\n\r\n', 1) ret_buf = self.get_data_from_http_header(buf) if len(datas) > 1: ret_buf += datas[1] if len(ret_buf) >= 7: self.has_recv_header = True return (ret_buf, True, False) return self.not_match_return(buf) else: return (b'', True, False) class http_post(http_simple): def __init__(self, method): super(http_post, self).__init__(method) def boundary(self): return b''.join([random.choice(b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for i in range(32)]) def client_encode(self, buf): if self.has_sent_header: return buf head_size = len(self.server_info.iv) + self.server_info.head_len if len(buf) - head_size > 64: headlen = head_size + random.randint(0, 64) else: headlen = len(buf) headdata = buf[:headlen] buf = buf[headlen:] port = b'' if self.server_info.port != 80: port = b':' + to_bytes(str(self.server_info.port)) body = None hosts = (self.server_info.obfs_param or self.server_info.host) pos = hosts.find("#") if pos >= 0: body = hosts[pos + 1:].replace("\\n", "\r\n") hosts = hosts[:pos] hosts = hosts.split(',') host = random.choice(hosts) http_head = b"POST /" + self.encode_head(headdata) + b" HTTP/1.1\r\n" http_head += b"Host: " + to_bytes(host) + port + b"\r\n" if body: http_head += body + "\r\n\r\n" else: http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n" http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\n" http_head += b"Content-Type: multipart/form-data; boundary=" + self.boundary() + b"\r\nDNT: 1\r\n" http_head += "Connection: keep-alive\r\n\r\n" self.has_sent_header = True return http_head + buf def not_match_return(self, buf): self.has_sent_header = True self.has_recv_header = True if self.method == 'http_post': return (b'E'*64, False, False) return (buf, True, False) def server_decode(self, buf): if self.has_recv_header: return (buf, True, False) self.recv_buffer += buf buf = self.recv_buffer if len(buf) > 10: if match_begin(buf, b'GET ') or match_begin(buf, b'POST '): if len(buf) > 65536: self.recv_buffer = None logging.warn('http_post: over size') return self.not_match_return(buf) else: #not http header, run on original protocol self.recv_buffer = None logging.debug('http_post: not match begin') return self.not_match_return(buf) else: return (b'', True, False) if b'\r\n\r\n' in buf: datas = buf.split(b'\r\n\r\n', 1) ret_buf = self.get_data_from_http_header(buf) if len(datas) > 1: ret_buf += datas[1] if len(ret_buf) >= 7: self.has_recv_header = True return (ret_buf, True, False) return self.not_match_return(buf) else: return (b'', True, False) class random_head(plain.plain): def __init__(self, method): self.method = method self.has_sent_header = False self.has_recv_header = False self.raw_trans_sent = False self.raw_trans_recv = False self.send_buffer = b'' def client_encode(self, buf): if self.raw_trans_sent: return buf self.send_buffer += buf if not self.has_sent_header: self.has_sent_header = True data = os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4) crc = (0xffffffff - binascii.crc32(data)) & 0xffffffff return data + struct.pack('<I', crc) if self.raw_trans_recv: ret = self.send_buffer self.send_buffer = b'' self.raw_trans_sent = True return ret return b'' def client_decode(self, buf): if self.raw_trans_recv: return (buf, False) self.raw_trans_recv = True return (b'', True) def server_encode(self, buf): if self.has_sent_header: return buf self.has_sent_header = True return os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4) def server_decode(self, buf): if self.has_recv_header: return (buf, True, False) self.has_recv_header = True crc = binascii.crc32(buf) & 0xffffffff if crc != 0xffffffff: self.has_sent_header = True if self.method == 'random_head': return (b'E'*64, False, False) return (buf, True, False) # (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back) return (b'', False, True)
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from heatclient import exc as heat_exc from oslo_config import cfg from oslo_log import log as logging from sahara import conductor as c from sahara import context from sahara.i18n import _ from sahara.service import engine as e from sahara.service.heat import commons as heat_common from sahara.service.heat import templates as ht from sahara.service import volumes from sahara.utils import cluster as c_u from sahara.utils import cluster_progress_ops as cpo from sahara.utils.openstack import heat conductor = c.API CONF = cfg.CONF LOG = logging.getLogger(__name__) CREATE_STAGES = [c_u.CLUSTER_STATUS_SPAWNING, c_u.CLUSTER_STATUS_WAITING, c_u.CLUSTER_STATUS_PREPARING] SCALE_STAGES = [c_u.CLUSTER_STATUS_SCALING_SPAWNING, c_u.CLUSTER_STATUS_SCALING_WAITING, c_u.CLUSTER_STATUS_SCALING_PREPARING] ROLLBACK_STAGES = [c_u.CLUSTER_STATUS_ROLLBACK_SPAWNING, c_u.CLUSTER_STATUS_ROLLBACK_WAITING, c_u.CLUSTER_STATUS_ROLLBACK__PREPARING] heat_engine_opts = [ cfg.ListOpt('heat_stack_tags', default=['data-processing-cluster'], help="List of tags to be used during operating with stack.") ] CONF.register_opts(heat_engine_opts) class HeatEngine(e.Engine): def get_type_and_version(self): return heat_common.HEAT_ENGINE_VERSION def create_cluster(self, cluster): self._update_rollback_strategy(cluster, shutdown=True) target_count = self._get_ng_counts(cluster) self._nullify_ng_counts(cluster) cluster = self._generate_heat_stack_name(cluster) self._launch_instances(cluster, target_count, CREATE_STAGES) self._update_rollback_strategy(cluster) @staticmethod def _generate_heat_stack_name(cluster): cluster = conductor.cluster_get(context.ctx(), cluster) hsn = cluster.name + cluster.id[:8] extra = cluster.extra.to_dict() if cluster.extra else {} extra['heat_stack_name'] = hsn conductor.cluster_update(context.ctx(), cluster, {'extra': extra}) return conductor.cluster_get(context.ctx(), cluster) def _get_ng_counts(self, cluster): count = {} for node_group in cluster.node_groups: count[node_group.id] = node_group.count return count def _nullify_ng_counts(self, cluster): ctx = context.ctx() for node_group in cluster.node_groups: conductor.node_group_update(ctx, node_group, {"count": 0}) def scale_cluster(self, cluster, target_count): ctx = context.ctx() rollback_count = self._get_ng_counts(cluster) self._update_rollback_strategy(cluster, rollback_count=rollback_count, target_count=target_count) inst_ids = self._launch_instances( cluster, target_count, SCALE_STAGES, update_stack=True, disable_rollback=False) cluster = conductor.cluster_get(ctx, cluster) c_u.clean_cluster_from_empty_ng(cluster) self._update_rollback_strategy(cluster) return inst_ids def rollback_cluster(self, cluster, reason): rollback_info = cluster.rollback_info or {} self._update_rollback_strategy(cluster) if rollback_info.get('shutdown', False): self._rollback_cluster_creation(cluster, reason) LOG.warning("Cluster creation rollback " "(reason: {reason})".format(reason=reason)) return False rollback_count = rollback_info.get('rollback_count', {}).copy() target_count = rollback_info.get('target_count', {}).copy() if rollback_count or target_count: self._rollback_cluster_scaling( cluster, rollback_count, target_count, reason) LOG.warning("Cluster scaling rollback " "(reason: {reason})".format(reason=reason)) return True return False def _update_rollback_strategy(self, cluster, shutdown=False, rollback_count=None, target_count=None): rollback_info = {} if shutdown: rollback_info['shutdown'] = shutdown if rollback_count: rollback_info['rollback_count'] = rollback_count if target_count: rollback_info['target_count'] = target_count cluster = conductor.cluster_update( context.ctx(), cluster, {'rollback_info': rollback_info}) return cluster def _populate_cluster(self, cluster, stack): ctx = context.ctx() old_ids = [i.instance_id for i in c_u.get_instances(cluster)] new_ids = [] for node_group in cluster.node_groups: instances = stack.get_node_group_instances(node_group) for instance in instances: nova_id = instance['physical_id'] if nova_id not in old_ids: name = instance['name'] inst = { "instance_id": nova_id, "instance_name": name } if cluster.use_designate_feature(): inst.update( {"dns_hostname": name + '.' + cluster.domain_name[:-1]}) instance_id = conductor.instance_add(ctx, node_group, inst) new_ids.append(instance_id) return new_ids def _rollback_cluster_creation(self, cluster, ex): """Shutdown all instances and update cluster status.""" self.shutdown_cluster(cluster) def _rollback_cluster_scaling(self, cluster, rollback_count, target_count, ex): """Attempt to rollback cluster scaling. Our rollback policy for scaling is as follows: We shut down nodes created during scaling, but we don't try to to get back decommissioned nodes. I.e. during the rollback we only shut down nodes and not launch them. That approach should maximize the chance of rollback success. """ for ng in rollback_count: if rollback_count[ng] > target_count[ng]: rollback_count[ng] = target_count[ng] self._launch_instances(cluster, rollback_count, ROLLBACK_STAGES, update_stack=True) def shutdown_cluster(self, cluster): """Shutdown specified cluster and all related resources.""" try: heat.delete_stack(cluster) except heat_exc.HTTPNotFound: LOG.warning('Did not find stack for cluster. Trying to delete ' 'cluster manually.') # Stack not found. Trying to delete cluster like direct engine # do it self._shutdown_instances(cluster) self._delete_aa_server_groups(cluster) self._clean_job_executions(cluster) self._remove_db_objects(cluster) @cpo.event_wrapper( True, step=_('Create Heat stack'), param=('cluster', 1)) def _create_instances(self, cluster, target_count, update_stack=False, disable_rollback=True): stack = ht.ClusterStack(cluster) self._update_instance_count(stack, cluster, target_count) stack.instantiate(update_existing=update_stack, disable_rollback=disable_rollback) heat.wait_stack_completion( cluster, is_update=update_stack, last_updated_time=stack.last_updated_time) return self._populate_cluster(cluster, stack) def _launch_instances(self, cluster, target_count, stages, update_stack=False, disable_rollback=True): # create all instances cluster = c_u.change_cluster_status(cluster, stages[0]) inst_ids = self._create_instances( cluster, target_count, update_stack, disable_rollback) # wait for all instances are up and networks ready cluster = c_u.change_cluster_status(cluster, stages[1]) instances = c_u.get_instances(cluster, inst_ids) self._await_networks(cluster, instances) # prepare all instances cluster = c_u.change_cluster_status(cluster, stages[2]) instances = c_u.get_instances(cluster, inst_ids) volumes.mount_to_instances(instances) self._configure_instances(cluster) return inst_ids def _update_instance_count(self, stack, cluster, target_count): ctx = context.ctx() for node_group in cluster.node_groups: count = target_count[node_group.id] stack.add_node_group_extra(node_group.id, count, self._generate_user_data_script) # if number of instances decreases, we need to drop # the excessive ones # instances list doesn't order by creating date, so we should # sort it to make sure deleted instances same as heat deleted. insts = sorted( node_group.instances, key=lambda x: int(x['instance_name'].split('-')[-1])) for i in range(count, node_group.count): conductor.instance_remove(ctx, insts[i])
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the base Layer class, from which all layers inherit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as tf_variables from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # Avoid breaking users who directly import this symbol from this file. # TODO(fchollet): remove this. InputSpec = base_layer.InputSpec # pylint: disable=invalid-name _KERAS_STYLE_SCOPE = False @tf_export(v1=['layers.experimental.keras_style_scope']) @tf_contextlib.contextmanager def keras_style_scope(): """Use Keras-style variable management. All tf.layers and tf RNN cells created in this scope use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this scope is to allow users of existing layers to slowly transition to a Keras layers API without breaking existing functionality. One example of this is when using TensorFlow's RNN classes with Keras Models or Networks. Because Keras models do not properly set variable scopes, users of RNNs may either accidentally share scopes between two different models, or get errors about variables that already exist. Example: ```python class RNNModel(tf.keras.Model): def __init__(self, name): super(RNNModel, self).__init__(name=name) self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell( [tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)]) def call(self, input, state): return self.rnn(input, state) model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # OK output_1, next_state_1 = model_1(input, state) # Raises an error about trying to create an already existing variable. output_2, next_state_2 = model_2(input, state) ``` The solution is to wrap the model construction and execution in a keras-style scope: ```python with keras_style_scope(): model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` Yields: A keras layer style scope. """ global _KERAS_STYLE_SCOPE stack = _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True try: yield finally: _KERAS_STYLE_SCOPE = stack @tf_export(v1=['layers.experimental.set_keras_style']) def set_keras_style(): """Use Keras-style variable management. All tf.layers and tf RNN cells created after keras style ha been enabled use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this function is to allow users of existing layers to slowly transition to Keras layers API without breaking existing functionality. For more details, see the documentation for `keras_style_scope`. Note, once keras style has been set, it is set globally for the entire program and cannot be unset. Example: ```python set_keras_style() model_1 = RNNModel(name="model_1") model_2 = RNNModel(name="model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` """ global _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True def _is_in_keras_style_scope(): global _KERAS_STYLE_SCOPE return _KERAS_STYLE_SCOPE @tf_export(v1=['layers.Layer']) class Layer(base_layer.Layer): """Base layer class. It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` instead. Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). Read-only properties: name: The name of the layer (string). dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). trainable_variables: List of trainable variables. non_trainable_variables: List of non-trainable variables. variables: List of all variables of this layer, trainable and non-trainable. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). Mutable properties: trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. """ def __init__(self, trainable=True, name=None, dtype=None, **kwargs): # For backwards compatibility, legacy layers do not use `ResourceVariable` # by default. self._use_resource_variables = False scope = kwargs.pop('_scope', None) self._reuse = kwargs.pop('_reuse', None) # Avoid an incorrect lint error self._trainable_weights = [] self.built = False super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype, **kwargs) if _is_in_keras_style_scope(): if scope is not None: raise ValueError( 'scope argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(scope)) if self._reuse is not None: raise ValueError( 'reuse argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(self._reuse)) self._keras_style = True else: self._keras_style = False self._call_has_scope_arg = 'scope' in self._call_fn_args if scope: with vs.variable_scope(scope) as captured_scope: self._scope = captured_scope else: self._scope = None self._current_scope = None # We no longer track graph in tf.layers layers. This property is only kept to # maintain API backward compatibility. @property @deprecation.deprecated( date=None, instructions='Stop using this property because tf.layers layers no ' 'longer track their graph.') def graph(self): if context.executing_eagerly(): raise RuntimeError('Layer.graph not supported when executing eagerly.') return None def _init_set_name(self, name): # Determine layer name (non-unique). if isinstance(name, vs.VariableScope): base_name = name.name self._name, _ = self._make_unique_name() else: base_name = name self._name = name if not name: self._name, base_name = self._make_unique_name() self._base_name = base_name def _make_unique_name(self, name_uid_map=None, avoid_names=None, namespace='', zero_based=False): base_name = base_layer.to_snake_case(self.__class__.__name__) name = backend.unique_object_name( base_name, name_uid_map=name_uid_map, avoid_names=avoid_names, namespace=namespace, zero_based=zero_based) return (name, base_name) @property def scope_name(self): if not self._scope: raise ValueError('No name available for layer scope because the layer "' + self._name + '" has not been used yet. The scope name ' + ' is determined the first time the layer instance is ' + 'called. You must therefore call the layer before ' + 'querying `scope_name`.') return self._scope.name def add_loss(self, losses, inputs=None): previous_losses_length = len(self._losses) previous_callable_losses_length = len(self._callable_losses) super(Layer, self).add_loss(losses, inputs=inputs) if not context.executing_eagerly(): # TODO(fchollet): deprecate collection below. new_losses = self._losses[previous_losses_length:] new_callable_losses = self._callable_losses[ previous_callable_losses_length:] for regularizer in new_callable_losses: loss_tensor = regularizer() if loss_tensor is not None: new_losses.append(loss_tensor) _add_elements_to_collection( new_losses, ops.GraphKeys.REGULARIZATION_LOSSES) def _name_scope(self): """Determines op naming for the Layer.""" if self._keras_style: return super(Layer, self)._name_scope() return self._current_scope.original_name_scope def _set_scope(self, scope=None): if self._scope is None: # If constructed with _scope=None, lazy setting of scope. if self._reuse: with vs.variable_scope( scope if scope is not None else self._base_name) as captured_scope: self._scope = captured_scope else: with vs.variable_scope( scope, default_name=self._base_name) as captured_scope: self._scope = captured_scope def add_weight(self, name, shape, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, use_resource=None, synchronization=vs.VariableSynchronization.AUTO, aggregation=vs.VariableAggregation.NONE, partitioner=None, **kwargs): """Adds a new variable to the layer, or gets an existing one; returns it. Arguments: name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: initializer instance (callable). regularizer: regularizer instance (callable). trainable: whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean, stddev). Note, if the current variable scope is marked as non-trainable then this parameter is ignored and any added variables are also marked as non-trainable. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. constraint: constraint instance (callable). use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. partitioner: (optional) partitioner instance (callable). If provided, when the requested variable is created it will be split into multiple partitions according to `partitioner`. In this case, an instance of `PartitionedVariable` is returned. Available partitioners include `tf.compat.v1.fixed_size_partitioner` and `tf.compat.v1.variable_axis_size_partitioner`. For more details, see the documentation of `tf.compat.v1.get_variable` and the "Variable Partitioners and Sharding" section of the API guide. **kwargs: Additional keyword arguments. Returns: The created variable. Usually either a `Variable` or `ResourceVariable` instance. If `partitioner` is not `None`, a `PartitionedVariable` instance is returned. Raises: RuntimeError: If called with partitioned variable regularization and eager execution is enabled. ValueError: When trainable has been set to True with synchronization set as `ON_READ`. """ for kwarg in kwargs: if kwarg != 'experimental_autocast': raise TypeError('Unknown keyword argument:', kwarg) if self._keras_style: return super(Layer, self).add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable and self.trainable, constraint=constraint, use_resource=use_resource, synchronization=vs.VariableSynchronization.AUTO, aggregation=vs.VariableAggregation.NONE, partitioner=partitioner, **kwargs) if synchronization == vs.VariableSynchronization.ON_READ: if trainable: raise ValueError( 'Synchronization value can be set to ' 'VariableSynchronization.ON_READ only for non-trainable variables. ' 'You have specified trainable=True and ' 'synchronization=VariableSynchronization.ON_READ.') else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True def _should_add_regularizer(variable, existing_variable_set): if isinstance(variable, tf_variables.PartitionedVariable): for var in variable: if var in existing_variable_set: return False return True else: return variable not in existing_variable_set init_graph = None if not context.executing_eagerly(): default_graph = ops.get_default_graph() if default_graph.building_function: with ops.init_scope(): # Retrieve the variables from the graph into which variables # will be lifted; if initialization ops will be lifted into # the eager context, then there is nothing to retrieve, since variable # collections are not supported when eager execution is enabled. if not context.executing_eagerly(): init_graph = ops.get_default_graph() existing_variables = set(tf_variables.global_variables()) else: # Initialization ops will not be lifted out of the default graph. init_graph = default_graph existing_variables = set(tf_variables.global_variables()) if dtype is None: dtype = self.dtype or dtypes.float32 self._set_scope(None) reuse = self.built or self._reuse prev_len_trainable = len(self._trainable_weights) with vs.variable_scope( self._scope, reuse=reuse, auxiliary_name_scope=False) as scope: self._current_scope = scope with ops.name_scope(self._name_scope()): use_resource = (use_resource or self._use_resource_variables or scope.use_resource) if initializer is None: initializer = scope.initializer variable = super(Layer, self).add_weight( name, shape, dtype=dtypes.as_dtype(dtype), initializer=initializer, trainable=trainable and self.trainable, constraint=constraint, partitioner=partitioner, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, getter=vs.get_variable, **kwargs) if regularizer: if (ops.executing_eagerly_outside_functions() or _should_add_regularizer(variable, existing_variables)): self._handle_weight_regularization(name, variable, regularizer) if init_graph is not None: # Handle edge case where a custom getter has overridden `trainable`. # There is one known occurrence of this, in unit test # testBasicRNNCellNotTrainable in # contrib.rnn.python.kernel_tests.core_rnn_cell_test with init_graph.as_default(): trainable_variables = tf_variables.trainable_variables() if (trainable and self.trainable and variable not in trainable_variables): # A custom getter / variable scope overrode the trainable flag. extra_trainable_vars = self._trainable_weights[prev_len_trainable:] self._trainable_weights = self._trainable_weights[ :prev_len_trainable] self._non_trainable_weights += extra_trainable_vars return variable def __call__(self, inputs, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Arguments: inputs: input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. **Note**: kwarg `scope` is reserved for use by the layer. Returns: Output tensor(s). Note: - If the layer's `call` method takes a `scope` keyword argument, this argument will be automatically set to the current variable scope. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support. Raises: ValueError: if the layer's `call` method returns None (an invalid value). """ scope = kwargs.pop('scope', None) if self._keras_style: if scope is not None: raise ValueError( 'scope argument not allowed when keras style layers are enabled, ' 'but saw: {}'.format(scope)) return super(Layer, self).__call__(inputs, *args, **kwargs) self._set_scope(scope) if self.built: try: # Some classes which inherit from Layer do not use its constructor, so # rather than initializing to None we check for an AttributeError. scope_context_manager = self._always_reuse_variable_scope except AttributeError: # From this point we will always set reuse=True, so create a "final" # variable scope with this setting. We avoid re-creating variable scopes # after this point as an optimization. self._always_reuse_variable_scope = vs.variable_scope( self._scope, reuse=True, auxiliary_name_scope=False) scope_context_manager = self._always_reuse_variable_scope else: scope_context_manager = vs.variable_scope( self._scope, reuse=self._reuse, auxiliary_name_scope=False) with scope_context_manager as scope: self._current_scope = scope try: call_has_scope_arg = self._call_has_scope_arg except AttributeError: self._call_fn_args = function_utils.fn_args(self.call) self._call_has_scope_arg = 'scope' in self._call_fn_args call_has_scope_arg = self._call_has_scope_arg if call_has_scope_arg: kwargs['scope'] = scope # Actually call layer outputs = super(Layer, self).__call__(inputs, *args, **kwargs) if not context.executing_eagerly(): # Update global default collections. _add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS) return outputs def __deepcopy__(self, memo): no_copy = set(['_graph']) shallow_copy = set(['_scope', '_always_reuse_variable_scope']) cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if k in no_copy: setattr(result, k, v) elif k in shallow_copy: setattr(result, k, copy.copy(v)) elif base_layer.is_tensor_or_tensor_list(v): setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) return result def __setattr__(self, value, name): # By-pass the automatic dependency tracking performed by the parent Layer. super(trackable.Trackable, self).__setattr__(value, name) @property def _is_legacy_layer(self): """Used by keras to check compatibility. This should not be overridden.""" return True def _add_elements_to_collection(elements, collection_list): if context.executing_eagerly(): raise RuntimeError('Using collections from Layers not supported in Eager ' 'mode. Tried to add %s to %s' % (elements, collection_list)) elements = nest.flatten(elements) collection_list = nest.flatten(collection_list) for name in collection_list: collection = ops.get_collection_ref(name) collection_set = set(collection) for element in elements: if element not in collection_set: collection.append(element)
from contextlib import contextmanager import warnings from numba.core import (errors, types, typing, funcdesc, config, pylowering, transforms) from numba.core.compiler_machinery import (FunctionPass, LoweringPass, register_pass) from collections import defaultdict @contextmanager def giveup_context(state, msg): """ Wraps code that would signal a fallback to interpreter mode """ try: yield except Exception as e: if not state.status.can_giveup: raise else: # Clear all references attached to the traceback e = e.with_traceback(None) warnings.warn_explicit('%s: %s' % (msg, e), errors.NumbaWarning, state.func_id.filename, state.func_id.firstlineno) raise @register_pass(mutates_CFG=True, analysis_only=False) class ObjectModeFrontEnd(FunctionPass): _name = "object_mode_front_end" def __init__(self): FunctionPass.__init__(self) def _frontend_looplift(self, state): """ Loop lifting analysis and transformation """ loop_flags = state.flags.copy() outer_flags = state.flags.copy() # Do not recursively loop lift outer_flags.unset('enable_looplift') loop_flags.unset('enable_looplift') if not state.flags.enable_pyobject_looplift: loop_flags.unset('enable_pyobject') loop_flags.unset('enable_ssa') main, loops = transforms.loop_lifting(state.func_ir, typingctx=state.typingctx, targetctx=state.targetctx, locals=state.locals, flags=loop_flags) if loops: # Some loops were extracted if config.DEBUG_FRONTEND or config.DEBUG: for loop in loops: print("Lifting loop", loop.get_source_location()) from numba.core.compiler import compile_ir cres = compile_ir(state.typingctx, state.targetctx, main, state.args, state.return_type, outer_flags, state.locals, lifted=tuple(loops), lifted_from=None, is_lifted_loop=True) return cres def run_pass(self, state): from numba.core.compiler import _EarlyPipelineCompletion # NOTE: That so much stuff, including going back into the compiler, is # captured in a single pass is not ideal. if state.flags.enable_looplift: assert not state.lifted cres = self._frontend_looplift(state) if cres is not None: raise _EarlyPipelineCompletion(cres) # Fallback typing: everything is a python object state.typemap = defaultdict(lambda: types.pyobject) state.calltypes = defaultdict(lambda: types.pyobject) state.return_type = types.pyobject return True @register_pass(mutates_CFG=True, analysis_only=False) class ObjectModeBackEnd(LoweringPass): _name = "object_mode_back_end" def __init__(self): LoweringPass.__init__(self) def _py_lowering_stage(self, targetctx, library, interp, flags): fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function( interp ) with targetctx.push_code_library(library): lower = pylowering.PyLower(targetctx, library, fndesc, interp) lower.lower() if not flags.no_cpython_wrapper: lower.create_cpython_wrapper() env = lower.env call_helper = lower.call_helper del lower from numba.core.compiler import _LowerResult # TODO: move this if flags.no_compile: return _LowerResult(fndesc, call_helper, cfunc=None, env=env) else: # Prepare for execution cfunc = targetctx.get_executable(library, fndesc, env) return _LowerResult(fndesc, call_helper, cfunc=cfunc, env=env) def run_pass(self, state): """ Lowering for object mode """ if state.library is None: codegen = state.targetctx.codegen() state.library = codegen.create_library(state.func_id.func_qualname) # Enable object caching upfront, so that the library can # be later serialized. state.library.enable_object_caching() def backend_object_mode(): """ Object mode compilation """ with giveup_context(state, "Function %s failed at object mode lowering" % (state.func_id.func_name,)): if len(state.args) != state.nargs: # append missing # BUG?: What's going on with nargs here? # check state.nargs vs self.nargs on original code state.args = (tuple(state.args) + (types.pyobject,) * (state.nargs - len(state.args))) return self._py_lowering_stage(state.targetctx, state.library, state.func_ir, state.flags) lowered = backend_object_mode() signature = typing.signature(state.return_type, *state.args) from numba.core.compiler import compile_result state.cr = compile_result( typing_context=state.typingctx, target_context=state.targetctx, entry_point=lowered.cfunc, typing_error=state.status.fail_reason, type_annotation=state.type_annotation, library=state.library, call_helper=lowered.call_helper, signature=signature, objectmode=True, interpmode=False, lifted=state.lifted, fndesc=lowered.fndesc, environment=lowered.env, metadata=state.metadata, reload_init=state.reload_init, ) # Warn, deprecated behaviour, code compiled in objmode without # force_pyobject indicates fallback from nopython mode if not state.flags.force_pyobject: # first warn about object mode and yes/no to lifted loops if len(state.lifted) > 0: warn_msg = ('Function "%s" was compiled in object mode without' ' forceobj=True, but has lifted loops.' % (state.func_id.func_name,)) else: warn_msg = ('Function "%s" was compiled in object mode without' ' forceobj=True.' % (state.func_id.func_name,)) warnings.warn(errors.NumbaWarning(warn_msg, state.func_ir.loc)) url = ("https://numba.pydata.org/numba-doc/latest/reference/" "deprecation.html#deprecation-of-object-mode-fall-" "back-behaviour-when-using-jit") msg = ("\nFall-back from the nopython compilation path to the " "object mode compilation path has been detected, this is " "deprecated behaviour.\n\nFor more information visit %s" % url) warnings.warn(errors.NumbaDeprecationWarning(msg, state.func_ir.loc)) if state.flags.release_gil: warn_msg = ("Code running in object mode won't allow parallel" " execution despite nogil=True.") warnings.warn_explicit(warn_msg, errors.NumbaWarning, state.func_id.filename, state.func_id.firstlineno) return True @register_pass(mutates_CFG=True, analysis_only=False) class CompileInterpMode(LoweringPass): _name = "compile_interp_mode" def __init__(self): LoweringPass.__init__(self) def run_pass(self, state): """ Just create a compile result for interpreter mode """ args = [types.pyobject] * len(state.args) signature = typing.signature(types.pyobject, *args) from numba.core.compiler import compile_result state.cr = compile_result(typing_context=state.typingctx, target_context=state.targetctx, entry_point=state.func_id.func, typing_error=state.status.fail_reason, type_annotation="<Interpreter mode function>", signature=signature, objectmode=False, interpmode=True, lifted=(), fndesc=None,) return True
''' Created on Jan 20, 2016 @author: kashefy ''' from nose.tools import assert_equal, assert_true, assert_false, \ assert_is_not_none, assert_is_instance, assert_greater, assert_list_equal, \ assert_is_not import os import tempfile import shutil from google.protobuf import text_format from caffe.proto.caffe_pb2 import NetParameter from nideep.proto.proto_utils import Parser import nideep.nets.net_merge as mrg import sys CURRENT_MODULE_PATH = os.path.abspath(sys.modules[__name__].__file__) ROOT_PKG_PATH = os.path.dirname(CURRENT_MODULE_PATH) TEST_DATA_DIRNAME = 'test_data' TEST_NET_FILENAME = 'n1.prototxt' TEST_NET_HDF5DATA_FILENAME = 'n1h.prototxt' class TestNetMerge: @classmethod def setup_class(self): self.dir_tmp = tempfile.mkdtemp() @classmethod def teardown_class(self): shutil.rmtree(self.dir_tmp) def test_duplicate(self): fpath = os.path.join(os.path.dirname(ROOT_PKG_PATH), TEST_DATA_DIRNAME, TEST_NET_FILENAME) n1 = Parser().from_net_params_file(fpath) n2 = Parser().from_net_params_file(fpath) n1_tmp = NetParameter(); n1_tmp.CopyFrom(n1) n2_tmp = NetParameter(); n2_tmp.CopyFrom(n2) s = mrg.merge_indep_net_spec([n1_tmp, n2_tmp]) assert_is_not_none(s) assert_is_instance(s, str) assert_greater(len(s), 0) n = NetParameter() text_format.Merge(s, n) assert_is_not_none(n) # Data Layer from first network for l in n.layer: if l.type.lower() == 'data': for l1 in n1.layer: if l1.type.lower() == 'data': dat_phase = [x.phase for x in l.include] # compare test with test and train with train if dat_phase == [x.phase for x in l1.include]: assert_is_not(l.top, l1.top) assert_list_equal(list(l.top), list(l1.top)) assert_equal(l.data_param.source, l1.data_param.source) assert_equal(l.data_param.backend, l1.data_param.backend) assert_equal(l.data_param.batch_size, l1.data_param.batch_size) assert_equal(l.transform_param.scale, l1.transform_param.scale) # For non-data layers # back up merged net for ni in [n1, n2]: for l1 in ni.layer: found = False if l1.type.lower() != 'data': for l in n.layer: if l.type.lower() == l1.type.lower() and \ [t.split('_nidx')[0] for t in l.top] == list(l1.top) and \ [b.split('_nidx')[0] for b in l.bottom] == list(l1.bottom): assert_true(l.name.startswith(l1.name)) fnames1 = [f.name for f in l1.DESCRIPTOR.fields] fnames = [f.name for f in l.DESCRIPTOR.fields] assert_list_equal(fnames, fnames1) l.ClearField('name') l.ClearField('top') l.ClearField('bottom') l1.ClearField('name') l1.ClearField('top') l1.ClearField('bottom') assert_equal(text_format.MessageToString(l), text_format.MessageToString(l1)) found = True else: continue # skip for data layers assert_true(found, "Failed to find %s in merged network!" % (l1.name,)) @staticmethod def test_duplicate_hdf5data(): fpath = os.path.join(os.path.dirname(ROOT_PKG_PATH), TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME) n1 = Parser().from_net_params_file(fpath) n2 = Parser().from_net_params_file(fpath) n1_tmp = NetParameter(); n1_tmp.CopyFrom(n1) n2_tmp = NetParameter(); n2_tmp.CopyFrom(n2) s = mrg.merge_indep_net_spec([n1_tmp, n2_tmp]) assert_is_not_none(s) assert_is_instance(s, str) assert_greater(len(s), 0) n = NetParameter() text_format.Merge(s, n) assert_is_not_none(n) # Data Layer from first network for l in n.layer: if l.type.lower() == 'hdf5data': for l1 in n1.layer: if l1.type.lower() == 'hdf5data': dat_phase = [x.phase for x in l.include] # compare test with test and train with train if dat_phase == [x.phase for x in l1.include]: assert_is_not(l.top, l1.top) assert_list_equal(list(l.top), list(l1.top)) assert_equal(l.data_param.source, l1.data_param.source) assert_equal(l.data_param.backend, l1.data_param.backend) assert_equal(l.data_param.batch_size, l1.data_param.batch_size) assert_equal(l.transform_param.scale, l1.transform_param.scale) # For non-data layers # back up merged net for ni in [n1, n2]: for l1 in ni.layer: found = False if l1.type.lower() != 'hdf5data': for l in n.layer: if l.type.lower() == l1.type.lower() and \ [t.split('_nidx')[0] for t in l.top] == list(l1.top) and \ [b.split('_nidx')[0] for b in l.bottom] == list(l1.bottom): assert_true(l.name.startswith(l1.name)) fnames1 = [f.name for f in l1.DESCRIPTOR.fields] fnames = [f.name for f in l.DESCRIPTOR.fields] assert_list_equal(fnames, fnames1) l.ClearField('name') l.ClearField('top') l.ClearField('bottom') l1.ClearField('name') l1.ClearField('top') l1.ClearField('bottom') assert_equal(text_format.MessageToString(l), text_format.MessageToString(l1)) found = True else: continue # skip for data layers assert_true(found, "Failed to find %s in merged network!" % (l1.name,)) def test_is_singular_layer_type(): assert_true(mrg.is_singular_layer_type('data')) assert_true(mrg.is_singular_layer_type('Data')) assert_true(mrg.is_singular_layer_type('DATA')) assert_true(mrg.is_singular_layer_type('HDFDATA')) assert_true(mrg.is_singular_layer_type('HDFData')) assert_false(mrg.is_singular_layer_type('HDF5Output')) assert_false(mrg.is_singular_layer_type('InnerProduct')) assert_false(mrg.is_singular_layer_type('innerproduct')) assert_false(mrg.is_singular_layer_type('Convolution')) assert_false(mrg.is_singular_layer_type('convolution')) def test_suffix_fmt_idx(): assert_equal(mrg.suffix_fmt_idx(0), '_nidx_00') assert_equal(mrg.suffix_fmt_idx(00), '_nidx_00') assert_equal(mrg.suffix_fmt_idx(1), '_nidx_01') assert_equal(mrg.suffix_fmt_idx(01), '_nidx_01') assert_equal(mrg.suffix_fmt_idx(10), '_nidx_10') assert_equal(mrg.suffix_fmt_idx(99999990), '_nidx_99999990') def test_suffix_fmt_custom(): def custom_suffix(idx): return mrg.suffix_fmt_idx(idx) + 'custom' assert_equal(custom_suffix(0), '_nidx_00' + 'custom') assert_equal(custom_suffix(1), '_nidx_01' + 'custom') assert_equal(custom_suffix(99999990), '_nidx_99999990' + 'custom')
# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/messaging.py """ import datetime import uuid import eventlet import mock import oslo.messaging from oslo_config import fixture as fixture_config from oslo_context import context from oslo_utils import netutils import testscenarios.testcase from ceilometer.event.storage import models as event from ceilometer import messaging from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base class BasePublisherTestCase(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[]) for i in range(0, 5) ] test_sample_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(BasePublisherTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) class RpcOnlyPublisherTest(BasePublisherTestCase): def test_published_no_mock(self): publisher = msg_publisher.RPCPublisher( netutils.urlsplit('rpc://')) endpoint = mock.MagicMock(['record_metering_data']) collector = messaging.get_rpc_server( self.transport, self.CONF.publisher_rpc.metering_topic, endpoint) endpoint.record_metering_data.side_effect = (lambda *args, **kwds: collector.stop()) collector.start() eventlet.sleep() publisher.publish_samples(context.RequestContext(), self.test_sample_data) collector.wait() class Matcher(object): @staticmethod def __eq__(data): for i, sample_item in enumerate(data): if (sample_item['counter_name'] != self.test_sample_data[i].name): return False return True endpoint.record_metering_data.assert_called_once_with( mock.ANY, data=Matcher()) def test_publish_target(self): publisher = msg_publisher.RPCPublisher( netutils.urlsplit('rpc://?target=custom_procedure_call')) cast_context = mock.MagicMock() with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: prepare.return_value = cast_context publisher.publish_samples(mock.MagicMock(), self.test_sample_data) prepare.assert_called_once_with( topic=self.CONF.publisher_rpc.metering_topic) cast_context.cast.assert_called_once_with( mock.ANY, 'custom_procedure_call', data=mock.ANY) def test_published_with_per_meter_topic(self): publisher = msg_publisher.RPCPublisher( netutils.urlsplit('rpc://?per_meter_topic=1')) with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: publisher.publish_samples(mock.MagicMock(), self.test_sample_data) class MeterGroupMatcher(object): def __eq__(self, meters): return len(set(meter['counter_name'] for meter in meters)) == 1 topic = self.CONF.publisher_rpc.metering_topic expected = [mock.call(topic=topic), mock.call().cast(mock.ANY, 'record_metering_data', data=mock.ANY), mock.call(topic=topic + '.test'), mock.call().cast(mock.ANY, 'record_metering_data', data=MeterGroupMatcher()), mock.call(topic=topic + '.test2'), mock.call().cast(mock.ANY, 'record_metering_data', data=MeterGroupMatcher()), mock.call(topic=topic + '.test3'), mock.call().cast(mock.ANY, 'record_metering_data', data=MeterGroupMatcher())] self.assertEqual(expected, prepare.mock_calls) class TestPublisher(testscenarios.testcase.WithScenarios, BasePublisherTestCase): scenarios = [ ('notifier', dict(protocol="notifier", publisher_cls=msg_publisher.SampleNotifierPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ('event_notifier', dict(protocol="notifier", publisher_cls=msg_publisher.EventNotifierPublisher, test_data=BasePublisherTestCase.test_event_data, pub_func='publish_events', attr='event_type')), ('rpc', dict(protocol="rpc", publisher_cls=msg_publisher.RPCPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ] def setUp(self): super(TestPublisher, self).setUp() self.topic = (self.CONF.publisher_notifier.event_topic if self.pub_func == 'publish_events' else self.CONF.publisher_rpc.metering_topic) def test_published_concurrency(self): """Test concurrent access to the local queue of the rpc publisher.""" publisher = self.publisher_cls( netutils.urlsplit('%s://' % self.protocol)) with mock.patch.object(publisher, '_send') as fake_send: def fake_send_wait(ctxt, topic, meters): fake_send.side_effect = mock.Mock() # Sleep to simulate concurrency and allow other threads to work eventlet.sleep(0) fake_send.side_effect = fake_send_wait job1 = eventlet.spawn(getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) job2 = eventlet.spawn(getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) job1.wait() job2.wait() self.assertEqual('default', publisher.policy) self.assertEqual(2, len(fake_send.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_no_policy(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( oslo.messaging.MessageDeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_block(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=default' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( oslo.messaging.MessageDeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_incorrect(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=notexist' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( oslo.messaging.MessageDeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.warn.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_drop_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=drop' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down_up(self): self.rpc_unreachable = True publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.side_effect = mock.MagicMock() getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) topic = self.topic expected = [mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY)] self.assertEqual(expected, fake_send.mock_calls) def test_published_with_policy_sized_queue_and_rpc_down(self): publisher = self.publisher_cls(netutils.urlsplit( '%s://?policy=queue&max_queue_length=3' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 5): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(3, len(publisher.local_queue)) self.assertEqual( 'test-2', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-3', publisher.local_queue[1][2][0][self.attr] ) self.assertEqual( 'test-4', publisher.local_queue[2][2][0][self.attr] ) def test_published_with_policy_default_sized_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = oslo.messaging.MessageDeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 2000): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual( 'test-976', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-1999', publisher.local_queue[1023][2][0][self.attr] )