code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
########################################################################### # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### import os import re import json import datetime from google.auth.transport.requests import Request from google_auth_oauthlib.flow import Flow, InstalledAppFlow from google.oauth2.credentials import Credentials as CredentialsUser from google.oauth2.service_account import Credentials as CredentialsService from starthinker.config import APPLICATION_NAME, APPLICATION_SCOPES from starthinker.util.auth_storage import credentials_storage_get, credentials_storage_put RE_CREDENTIALS_BUCKET = re.compile(r'[a-z0-9_\-\.]+:.+\.json') RE_CREDENTIALS_JSON = re.compile(r'^\s*\{.*\}\s*$', re.DOTALL) def CredentialsFlowWrapper(client, credentials_only=False, **kwargs): # relax scope comparison, order and default scopes are not critical os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1' # parse credentials from file or json if isinstance(client, dict): client_json = client elif RE_CREDENTIALS_JSON.match(client): client_json = json.loads(client) else: with open(client, 'r') as json_file: client_json = json.load(json_file) if credentials_only: return client_json else: if 'installed' in client_json: flow = InstalledAppFlow.from_client_config(client_json, APPLICATION_SCOPES, **kwargs) else: flow = Flow.from_client_config(client_json, APPLICATION_SCOPES, **kwargs) flow.user_agent = APPLICATION_NAME return flow def CredentialsServiceWrapper(service): if isinstance(service, dict): return CredentialsService.from_service_account_info(service) elif RE_CREDENTIALS_JSON.match(service): return CredentialsService.from_service_account_info(json.loads(service)) else: return CredentialsService.from_service_account_file(service) class CredentialsUserWrapper(CredentialsUser): def __init__(self, user=None, client=None): self.user = user self.client = client super(CredentialsUserWrapper, self).__init__(None) self.load() def from_credentials(self, credentials): self.token = credentials.token self.expiry = credentials.expiry self._refresh_token = credentials.refresh_token self._id_token = credentials.token_uri self._token_uri = credentials.token_uri self._client_id = credentials.client_id self._client_secret = credentials.client_secret self._scopes = credentials.scopes def from_json(self, data): self.token = data['access_token'] self.expiry = datetime.datetime.strptime( data['token_expiry'][:19], '%Y-%m-%dT%H:%M:%S').replace(microsecond=0) # this is always UTC self._refresh_token = data['refresh_token'] self._id_token = data['id_token'] self._token_uri = data['token_uri'] self._client_id = data['client_id'] self._client_secret = data['client_secret'] def to_json(self): return { 'access_token': self.token, 'token_expiry': self.expiry.strftime('%Y-%m-%dT%H:%M:%SZ') if self.expiry else None, 'refresh_token': self._refresh_token, 'id_token': self._id_token, 'token_uri': self._token_uri, 'client_id': self._client_id, 'client_secret': self._client_secret, 'scopes': self._scopes, } def load_file(self): if os.path.exists(self.user): with open(self.user, 'r') as json_file: self.from_json(json.load(json_file)) elif self.client: self.load_flow() def load_storage(self): return self.from_json(credentials_storage_get(self.user)) def load_flow(self): flow = CredentialsFlowWrapper(self.client) flow.run_console() self.from_credentials(flow.credentials) self.save() def load(self): if self.user is None: pass elif isinstance(self.user, dict): self.from_json(self.user) elif RE_CREDENTIALS_JSON.match(self.user): self.from_json(json.loads(self.user)) elif RE_CREDENTIALS_BUCKET.match(self.user): self.load_storage() elif self.user: self.load_file() def save_json(self): self.user = json.dumps(self.to_json()) def save_file(self): with open(self.user, 'w') as json_file: json_file.write(json.dumps(self.to_json())) def save_storage(self): credentials_storage_put(self.user, self.to_json()) def save(self, destination=None): if destination is not None: self.user = destination if self.user is None: pass elif isinstance(self.user, dict): self.user = self.to_json() elif RE_CREDENTIALS_BUCKET.match(self.user): self.save_storage() elif RE_CREDENTIALS_JSON.match(self.user): self.save_json() elif self.user: self.save_file() def refresh(self, request=None): self.load() if not self.valid: if request is None: request = Request() super(CredentialsUserWrapper, self).refresh(request) self.expiry = self.expiry.replace( microsecond=0 ) # make parsing more consistent, microseconds are not essential self.save()
google/starthinker
starthinker/util/auth_wrapper.py
Python
apache-2.0
5,824
# Copyright 2016 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import subprocess import os import typing as T parser = argparse.ArgumentParser() parser.add_argument('input') parser.add_argument('output') parser.add_argument('type') parser.add_argument('podir') parser.add_argument('--msgfmt', default='msgfmt') parser.add_argument('--datadirs', default='') parser.add_argument('args', default=[], metavar='extra msgfmt argument', nargs='*') def run(args: T.List[str]) -> int: options = parser.parse_args(args) env = None if options.datadirs: env = os.environ.copy() env.update({'GETTEXTDATADIRS': options.datadirs}) return subprocess.call([options.msgfmt, '--' + options.type, '-d', options.podir, '--template', options.input, '-o', options.output] + options.args, env=env)
mesonbuild/meson
mesonbuild/scripts/msgfmthelper.py
Python
apache-2.0
1,406
from math import sqrt def pythagorean_triplet(wanted_sum): if wanted_sum <= 0: raise ValueError("Wanted sum must be a positive number") result = [] for c in range(wanted_sum - 3, 2, -1): for b in range(c - 1, 1, -1): a = sqrt(c * c - b * b) if a % 1 == 0 and a + b + c == wanted_sum and a < b: result += [(int(a), b, c)] return result
plilja/project-euler
problem_9/pythagorean_triplet.py
Python
apache-2.0
409
import json import requests import pprint postURL = 'http://localhost:3000/posts' productURL = 'http://localhost:3000/products' dataPost = { 'picture': 'https://s-media-cache-ak0.pinimg.com/474x/1a/eb/2e/1aeb2eff3242f5884a8a23e4bdb7946f.jpg', 'description': 'Marcos favorite outfit', 'tags': [1,2], 'brands': [1,2], 'products': [1,2,3,4], 'UserId': 1 } dataProduct = { 'displayName': 'Nike Shoes', 'picture': 'https://s-media-cache-ak0.pinimg.com/474x/51/80/00/5180009b176132bba9729c0f910b4bd7.jpg', 'BrandId': 1 } headers = {'Content-type': 'application/json'} data_json = json.dumps(dataPost) response = requests.post(postURL, data=data_json, headers=headers) data_json = json.dumps(dataProduct) response = requests.post(productURL, data=data_json, headers=headers)
PolyStyle/server-nodejs
testQueries.py
Python
apache-2.0
793
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import typing import pkg_resources import google.auth # type: ignore from google.api_core import gapic_v1 from google.auth import credentials as ga_credentials # type: ignore from google.ads.googleads.v9.resources.types import conversion_action from google.ads.googleads.v9.services.types import conversion_action_service try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-ads",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() class ConversionActionServiceTransport(metaclass=abc.ABCMeta): """Abstract transport class for ConversionActionService.""" AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",) def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" self._host = host # If no credentials are provided, then determine the appropriate # defaults. if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # Save the credentials. self._credentials = credentials # Lifted into its own function so it can be stubbed out during tests. self._prep_wrapped_messages(client_info) def _prep_wrapped_messages(self, client_info): # Precomputed wrapped methods self._wrapped_methods = { self.get_conversion_action: gapic_v1.method.wrap_method( self.get_conversion_action, default_timeout=None, client_info=client_info, ), self.mutate_conversion_actions: gapic_v1.method.wrap_method( self.mutate_conversion_actions, default_timeout=None, client_info=client_info, ), } def close(self): """Closes resources associated with the transport. .. warning:: Only call this method if the transport is NOT shared with other clients - this may cause errors in other clients! """ raise NotImplementedError() @property def get_conversion_action( self, ) -> typing.Callable[ [conversion_action_service.GetConversionActionRequest], conversion_action.ConversionAction, ]: raise NotImplementedError @property def mutate_conversion_actions( self, ) -> typing.Callable[ [conversion_action_service.MutateConversionActionsRequest], conversion_action_service.MutateConversionActionsResponse, ]: raise NotImplementedError __all__ = ("ConversionActionServiceTransport",)
googleads/google-ads-python
google/ads/googleads/v9/services/services/conversion_action_service/transports/base.py
Python
apache-2.0
4,409
# -*- coding: utf-8 -*- from fixture.variables import UserLogin, Profinity from contract_lib import Contract def test_of_add_new_valid_contact(app): """ Validation of add correct new contact with full data """ app.session.login(UserLogin.name, UserLogin.password) app.add_contract() app.add_full_name(Contract(first_name=Profinity.correct_data, last_name=Profinity.correct_data, middle_name=Profinity.correct_data, nickname=Profinity.correct_data)) app.add_title(Contract(title=Profinity.correct_data)) app.add_company(Contract(company_name=Profinity.correct_data)) app.add_address(Contract(address_name=Profinity.correct_data)) app.add_phone_number(Contract(work=Profinity.correct_phone_number, fax=Profinity.correct_phone_number, home=Profinity.correct_phone_number, mobile=Profinity.correct_phone_number)) app.add_email(Contract(email1=Profinity.correct_email, email2=Profinity.correct_email, email3=Profinity.correct_email)) app.add_homepage(Contract(homepage=Profinity.correct_data)) app.add_year() # secondary data app.add_secondary_adress(Contract(address=Profinity.correct_data)) app.add_secondary_home(Contract(phone=Profinity.correct_data)) app.add_secondary_notes(Contract(notes=Profinity.correct_data)) app.submit_contact() app.session.logout() def test_of_add_new_valid_contact_name_only(app): """ Validation of add correct new contact with only full name """ app.session.login(UserLogin.name, UserLogin.password) app.add_contract() app.add_full_name(Contract(first_name=Profinity.correct_data, last_name=Profinity.correct_data, middle_name=Profinity.correct_data, nickname=Profinity.correct_data)) app.submit_contact() app.session.logout()
werbk/task-2.3
tests_contract/test_contract.py
Python
apache-2.0
1,864
from fixture.orm import ORMFixture from model.group import Group db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="") try: l = db.get_contacts_not_in_group_from_db(Group(id='213')) for item in l: print(item) print(len(l)) finally: pass # orm автоматически закрывает соединение с БД, поэтому db.destroy() не нужно
Dob3r/python_training
check_orm_db_connection.py
Python
apache-2.0
417
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from collections import Counter from flask import request from flask_appbuilder import expose from flask_appbuilder.security.decorators import has_access_api from sqlalchemy.orm.exc import NoResultFound from superset import db from superset.connectors.connector_registry import ConnectorRegistry from superset.models.core import Database from superset.typing import FlaskResponse from .base import api, BaseSupersetView, handle_api_exception, json_error_response class Datasource(BaseSupersetView): """Datasource-related views""" @expose("/save/", methods=["POST"]) @has_access_api @api @handle_api_exception def save(self) -> FlaskResponse: data = request.form.get("data") if not isinstance(data, str): return json_error_response("Request missing data field.", status=500) datasource_dict = json.loads(data) datasource_id = datasource_dict.get("id") datasource_type = datasource_dict.get("type") database_id = datasource_dict["database"].get("id") orm_datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) orm_datasource.database_id = database_id if "owners" in datasource_dict and orm_datasource.owner_class is not None: datasource_dict["owners"] = ( db.session.query(orm_datasource.owner_class) .filter(orm_datasource.owner_class.id.in_(datasource_dict["owners"])) .all() ) duplicates = [ name for name, count in Counter( [col["column_name"] for col in datasource_dict["columns"]] ).items() if count > 1 ] if duplicates: return json_error_response( f"Duplicate column name(s): {','.join(duplicates)}", status=409 ) orm_datasource.update_from_object(datasource_dict) data = orm_datasource.data db.session.commit() return self.json_response(data) @expose("/get/<datasource_type>/<datasource_id>/") @has_access_api @api @handle_api_exception def get(self, datasource_type: str, datasource_id: int) -> FlaskResponse: try: orm_datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) if not orm_datasource.data: return json_error_response( "Error fetching datasource data.", status=500 ) return self.json_response(orm_datasource.data) except NoResultFound: return json_error_response("This datasource does not exist", status=400) @expose("/external_metadata/<datasource_type>/<datasource_id>/") @has_access_api @api @handle_api_exception def external_metadata( self, datasource_type: str, datasource_id: int ) -> FlaskResponse: """Gets column info from the source system""" if datasource_type == "druid": datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session ) elif datasource_type == "table": database = ( db.session.query(Database).filter_by(id=request.args.get("db_id")).one() ) table_class = ConnectorRegistry.sources["table"] datasource = table_class( database=database, table_name=request.args.get("table_name"), schema=request.args.get("schema") or None, ) else: raise Exception(f"Unsupported datasource_type: {datasource_type}") external_metadata = datasource.external_metadata() return self.json_response(external_metadata)
airbnb/superset
superset/views/datasource.py
Python
apache-2.0
4,629
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for Stochastic Polyak solver. Including projections for pytress.""" import jaxopt import optax from stochastic_polyak import spsdam_solver from stochastic_polyak import spsdiag_solver from stochastic_polyak import spsL1_solver from stochastic_polyak import spssqrt_solver from stochastic_polyak import ssps_solver def get_solver(flags, config, loss_fun, losses): """Gets the solver used for training based on FLAGS and config. Args: flags: Flags passed config: Hyperparameter configuration for training and evaluation. loss_fun: A loss function that return a real output for each batch losses: Loss function that return a b-dimensional output, one for each element in the batch Returns: solver: The optimizaiton solver solver_param_name: Name of the solver with parameter choices """ momentum = flags.momentum if flags.momentum == 0: # Try to get default momentum from config momentum = config.get("momentum", flags.momentum) delta = flags.slack_delta if flags.slack_delta == -1: # Try to get default delta from config delta = config.get("delta", delta) lmbda = flags.slack_lmbda if flags.slack_lmbda == -1: # Try to get default lmbda from config lmbda = config.get("lmbda", lmbda) # Initialize solver and parameters. if config.solver == "SGD": opt = optax.sgd(config.learning_rate, momentum) solver = jaxopt.OptaxSolver(opt=opt, fun=loss_fun, has_aux=True) elif config.solver == "SPS": solver = jaxopt.PolyakSGD( fun=loss_fun, maxiter=flags.max_steps_per_epoch, momentum=momentum, delta=delta, max_stepsize=config.max_step_size, has_aux=True) solver_param_name = "m-"+str(momentum)+"-d-"+str(delta) elif config.solver == "SPSDam": solver = spsdam_solver.SPSDam(fun=loss_fun, lmbda=lmbda, lmbda_schedule=config.lmbda_schedule, momentum=momentum, has_aux=True) solver_param_name = "m-"+str(momentum)+"-lmbda-"+str(lmbda) elif config.solver == "SPSL1": solver = spsL1_solver.SPSL1( fun=loss_fun, lmbda=lmbda, delta=delta, momentum=momentum, has_aux=True) solver_param_name = "m-"+str(momentum)+"-lmbda-"+str(lmbda)+"-d-"+str(delta) elif config.solver == "SPSsqrt": solver = spssqrt_solver.SPSsqrt( fun=loss_fun, lmbda=lmbda, momentum=momentum, has_aux=True) solver_param_name = "m-"+str(momentum)+"-lmbda-"+str(lmbda) elif config.solver == "SSPS": solver = ssps_solver.SystemStochasticPolyak( fun=losses, delta=delta, learning_rate=config.learning_rate, choose_update=flags.choose_update, has_aux=True) elif config.solver == "SPSDiag": solver = spsdiag_solver.DiagonalStochasticPolyak( fun=losses, learning_rate=config.learning_rate, delta=delta, momentum=momentum, has_aux=True) solver_param_name = "m-"+str(momentum)+"-delta-"+str(delta) else: raise ValueError("Unknown solver: %s" % config.solver) return solver, solver_param_name
google-research/google-research
stochastic_polyak/get_solver.py
Python
apache-2.0
3,787
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import cors from murano.common.i18n import _ from murano import version paste_deploy_opts = [ cfg.StrOpt('flavor', help='Paste flavor'), cfg.StrOpt('config_file', help='Path to Paste config file'), ] bind_opts = [ cfg.StrOpt('bind-host', default='0.0.0.0', help='Address to bind the Murano API server to.'), cfg.PortOpt('bind-port', default=8082, help='Port the bind the Murano API server to.'), ] rabbit_opts = [ cfg.StrOpt('host', default='localhost', help='The RabbitMQ broker address which used for communication ' 'with Murano guest agents.'), cfg.PortOpt('port', default=5672, help='The RabbitMQ broker port.'), cfg.StrOpt('login', default='guest', help='The RabbitMQ login.'), cfg.StrOpt('password', default='guest', help='The RabbitMQ password.'), cfg.StrOpt('virtual_host', default='/', help='The RabbitMQ virtual host.'), cfg.BoolOpt('ssl', default=False, help='Boolean flag to enable SSL communication through the ' 'RabbitMQ broker between murano-engine and guest agents.'), cfg.StrOpt('ca_certs', default='', help='SSL cert file (valid only if SSL enabled).'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections to RabbitMQ'), ] heat_opts = [ cfg.StrOpt('url', help='Optional heat endpoint override'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections and transfers with Heat API.'), cfg.StrOpt('ca_file', help='(SSL) Tells Murano to use the specified certificate file ' 'to verify the peer running Heat API.'), cfg.StrOpt('cert_file', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Heat.'), cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name to ' 'communicate with Heat API.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Heat endpoint type.'), cfg.ListOpt('stack_tags', default=['murano'], help='List of tags to be assigned to heat stacks created ' 'during environment deployment.') ] mistral_opts = [ cfg.StrOpt('url', help='Optional mistral endpoint override'), cfg.StrOpt('endpoint_type', default='publicURL', help='Mistral endpoint type.'), cfg.StrOpt('service_type', default='workflowv2', help='Mistral service type.'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections and transfers with Mistral.'), cfg.StrOpt('ca_cert', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Mistral.') ] neutron_opts = [ cfg.StrOpt('url', help='Optional neutron endpoint override'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections and transfers with Neutron API.'), cfg.StrOpt('ca_cert', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Neutron.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Neutron endpoint type.') ] murano_opts = [ cfg.StrOpt('url', help='Optional murano url in format ' 'like http://0.0.0.0:8082 used by Murano engine'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections and transfers used by ' 'Murano engine.'), cfg.StrOpt('cacert', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Murano API ' 'used by Murano engine.'), cfg.StrOpt('cert_file', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Murano ' 'used by Murano engine.'), cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name ' 'to communicate with Murano API used by ' 'Murano engine.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Murano endpoint type used by Murano engine.'), cfg.ListOpt('enabled_plugins', help="List of enabled Extension Plugins. " "Remove or leave commented to enable all installed " "plugins."), cfg.IntOpt('package_size_limit', default=5, help='Maximum application package size, Mb', deprecated_group='packages_opts'), cfg.IntOpt('limit_param_default', default=20, help='Default value for package pagination in API.', deprecated_group='packages_opts'), cfg.IntOpt('api_limit_max', default=100, help='Maximum number of packages to be returned in a single ' 'pagination request', deprecated_group='packages_opts'), cfg.IntOpt('api_workers', help=_('Number of API workers')), ] networking_opts = [ cfg.IntOpt('max_environments', default=250, help='Maximum number of environments that use a single router ' 'per tenant'), cfg.IntOpt('max_hosts', default=250, help='Maximum number of VMs per environment'), cfg.StrOpt('env_ip_template', default='10.0.0.0', help='Template IP address for generating environment ' 'subnet cidrs'), cfg.ListOpt('default_dns', default=[], help='List of default DNS nameservers to be assigned to ' 'created Networks'), cfg.StrOpt('external_network', default='ext-net', help='ID or name of the external network for routers ' 'to connect to'), cfg.StrOpt('router_name', default='murano-default-router', help='Name of the router that going to be used in order to ' 'join all networks created by Murano'), cfg.BoolOpt('create_router', default=True, help='This option will create a router when one with ' '"router_name" does not exist'), cfg.StrOpt('network_config_file', default='netconfig.yaml', help='If provided networking configuration will be taken ' 'from this file'), cfg.StrOpt('driver', default=None, choices=['neutron', 'nova'], help='Network driver to use. Options are neutron or nova.' 'If not provided, the driver will be detected.'), ] stats_opts = [ cfg.IntOpt('period', default=5, help=_('Statistics collection interval in minutes.' 'Default value is 5 minutes.')) ] engine_opts = [ cfg.BoolOpt('disable_murano_agent', default=False, help=_('Disallow the use of murano-agent')), cfg.StrOpt('class_configs', default='/etc/murano/class-configs', help=_('Path to class configuration files')), cfg.BoolOpt('use_trusts', default=True, help=_("Create resources using trust token rather " "than user's token")), cfg.BoolOpt('enable_model_policy_enforcer', default=False, help=_('Enable model policy enforcer using Congress')), cfg.IntOpt('agent_timeout', default=3600, help=_('Time for waiting for a response from murano agent ' 'during the deployment')), cfg.IntOpt('engine_workers', deprecated_opts=[cfg.DeprecatedOpt('workers', group='engine')], help=_('Number of engine workers')), cfg.ListOpt('load_packages_from', default=[], help=_('List of directories to load local packages from. ' 'If not provided, packages will be loaded only API'), deprecated_group='packages_opts'), cfg.StrOpt('packages_cache', help='Location (directory) for Murano package cache.', deprecated_group='packages_opts'), cfg.BoolOpt('enable_packages_cache', default=True, help=_('Enables murano-engine to persist on disk ' 'packages downloaded during deployments. ' 'The packages would be re-used for consequent ' 'deployments.'), deprecated_group='packages_opts'), cfg.StrOpt('packages_service', default='murano', help=_('The service to store murano packages: murano (stands ' 'for legacy behavior using murano-api) or glance ' '(stands for glance-glare artifact service)'), deprecated_group='packages_opts'), ] # TODO(sjmc7): move into engine opts? metadata_dir = [ cfg.StrOpt('metadata-dir', default='./meta', help='Metadata dir') ] glare_opts = [ cfg.StrOpt('url', help='Optional glare url in format ' 'like http://0.0.0.0:9494 used by Glare API', deprecated_group='glance'), cfg.BoolOpt('insecure', default=False, help='This option explicitly allows Murano to perform ' '"insecure" SSL connections and transfers with Glare API.', deprecated_group='glance'), cfg.StrOpt('ca_file', help='(SSL) Tells Murano to use the specified certificate file ' 'to verify the peer running Glare API.', deprecated_group='glance'), cfg.StrOpt('cert_file', help='(SSL) Tells Murano to use the specified client ' 'certificate file when communicating with Glare.', deprecated_group='glance'), cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name to ' 'communicate with Glare API.', deprecated_group='glance'), cfg.StrOpt('endpoint_type', default='publicURL', help='Glare endpoint type.', deprecated_group='glance') ] file_server = [ cfg.StrOpt('file_server', default='', help='Set a file server.') ] home_region = cfg.StrOpt( 'home_region', default=None, help="Default region name used to get services endpoints.") CONF = cfg.CONF CONF.register_opts(paste_deploy_opts, group='paste_deploy') CONF.register_cli_opts(bind_opts) CONF.register_opts(rabbit_opts, group='rabbitmq') CONF.register_opts(heat_opts, group='heat') CONF.register_opts(mistral_opts, group='mistral') CONF.register_opts(neutron_opts, group='neutron') CONF.register_opts(murano_opts, group='murano') CONF.register_opts(engine_opts, group='engine') CONF.register_opts(file_server) CONF.register_opt(home_region) CONF.register_cli_opts(metadata_dir) CONF.register_opts(stats_opts, group='stats') CONF.register_opts(networking_opts, group='networking') CONF.register_opts(glare_opts, group='glare') def parse_args(args=None, usage=None, default_config_files=None): logging.register_options(CONF) CONF(args=args, project='murano', version=version.version_string, usage=usage, default_config_files=default_config_files) def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Configuration-Session', 'X-Roles', 'X-User-Id', 'X-Tenant-Id'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Configuration-Session', 'X-Roles', 'X-User-Id', 'X-Tenant-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] )
DavidPurcell/murano_temp
murano/common/config.py
Python
apache-2.0
13,696
from panda3d.core import NodePath, Vec4, TextNode, CardMaker, TransparencyAttrib from direct.interval.IntervalGlobal import LerpColorScaleInterval, Sequence, Func, Wait, Parallel from toontown.achievements import Achievements from toontown.toonbase import ToontownGlobals from toontown.toonbase import TTLocalizer achievementModel = loader.loadModel('phase_3.5/models/gui/achievement_set.bam') achievementSfx = base.loadSfx('phase_3.5/audio/sfx/poof_in.ogg') lockTexture = loader.loadTexture('phase_3.5/maps/achievement_lock.png') CategoryModels = { Achievements.BRONZE: achievementModel.find('**/achievement1'), Achievements.SILVER: achievementModel.find('**/achievement2'), Achievements.GOLD: achievementModel.find('**/achievement3'), Achievements.PLATINUM: achievementModel.find('**/achievement4') } class AchievementNode(NodePath): def __init__(self, achievementId, faded=False, locked=False): NodePath.__init__(self, hidden.attachNewNode('achievement-%s-%s' % (achievementId, id(self)))) self.achievementId = achievementId self.category = Achievements.getAchievementCategory(self.achievementId) CategoryModels[self.category].copyTo(self) if not faded: self.generateAchievementInfo() if locked: cm = CardMaker('lock') lock = self.attachNewNode(cm.generate()) lock.setTransparency(TransparencyAttrib.MAlpha) lock.setTexture(lockTexture) lock.setScale(0.35) lock.setPos(1.5, 0, -0.025) lock.setColorScale(0, 0, 0, 0.6) if faded: self.setColorScale(0, 0, 0, 0.1) self.flattenStrong() def generateAchievementInfo(self): acievementInfo = TTLocalizer.getAchievementInfo(self.achievementId) title = TextNode('title') title.setText(acievementInfo[0]) title.setFont(ToontownGlobals.getSignFont()) title.setTextColor(1, 1, 1, 1) title.setAlign(TextNode.ACenter) titleNode = self.attachNewNode(title) titleNode.setScale(0.2) titleNode.setZ(0.2) description = TextNode('description') description.setText(acievementInfo[1]) description.setFont(ToontownGlobals.getSignFont()) description.setTextColor(1, 1, 1, 1) description.setAlign(TextNode.ACenter) descriptionNode = self.attachNewNode(description) descriptionNode.setScale(0.15) descriptionNode.setZ(-0.14) class AchievementPopup(NodePath): def __init__(self, achievementId): NodePath.__init__(self, hidden.attachNewNode('achievement-popup-%s' % id(self))) AchievementNode(achievementId).reparentTo(self) self.reparentTo(base.a2dTopCenter, 4000) self.stash() self.setScale(0.3) self.setZ(-0.18) self.callback = None def setCallback(self, callback): self.callback = callback def doCallback(self): if self.callback is not None: self.callback() def cleanup(self): self.removeNode() def play(self): Sequence( Parallel( Sequence( Func(self.unstash), Func(self.setTransparency, 1), LerpColorScaleInterval(self, 1.2, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0)), Func(self.clearColorScale), Func(self.clearTransparency) ), Func(base.playSfx, achievementSfx) ), Wait(2.5), Sequence( Func(self.setTransparency, 1), LerpColorScaleInterval(self, 0.4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1)), Func(self.clearColorScale), Func(self.clearTransparency), Func(self.stash) ), Func(self.cleanup), Func(self.doCallback) ).start() class AchievementsGUI: def __init__(self): self.queue = [] self.playing = False def showAchievement(self, achievementId): popup = AchievementPopup(achievementId) self.queue.append(popup) if self.playing is False: self.playing = True self.showNext() def showNext(self): if len(self.queue) == 0: self.playing = False return popup = self.queue.pop(0) popup.setCallback(self.showNext) popup.play()
linktlh/Toontown-journey
toontown/achievements/AchievementsGUI.py
Python
apache-2.0
4,538
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import swift.common.utils class MessageTimeout(Timeout): def __init__(self, seconds=None, msg=None): Timeout.__init__(self, seconds=seconds) self.msg = msg def __str__(self): return '%s: %s' % (Timeout.__str__(self), self.msg) class SwiftException(Exception): pass class PutterConnectError(Exception): def __init__(self, status=None): self.status = status class InvalidTimestamp(SwiftException): pass class InsufficientStorage(SwiftException): pass class FooterNotSupported(SwiftException): pass class MultiphasePUTNotSupported(SwiftException): pass class SuffixSyncError(SwiftException): pass class RangeAlreadyComplete(SwiftException): pass class DiskFileError(SwiftException): pass class DiskFileNotOpen(DiskFileError): pass class DiskFileQuarantined(DiskFileError): pass class DiskFileCollision(DiskFileError): pass class DiskFileNotExist(DiskFileError): pass class DiskFileDeleted(DiskFileNotExist): def __init__(self, metadata=None): self.metadata = metadata or {} self.timestamp = swift.common.utils.Timestamp( self.metadata.get('X-Timestamp', 0)) class DiskFileExpired(DiskFileDeleted): pass class DiskFileNoSpace(DiskFileError): pass class DiskFileDeviceUnavailable(DiskFileError): pass class DiskFileXattrNotSupported(DiskFileError): pass class DiskFileBadMetadataChecksum(DiskFileError): pass class DeviceUnavailable(SwiftException): pass class InvalidAccountInfo(SwiftException): pass class PathNotDir(OSError): pass class ChunkReadError(SwiftException): pass class ShortReadError(SwiftException): pass class ChunkReadTimeout(Timeout): pass class ChunkWriteTimeout(Timeout): pass class ConnectionTimeout(Timeout): pass class ResponseTimeout(Timeout): pass class DriveNotMounted(SwiftException): pass class LockTimeout(MessageTimeout): pass class RingLoadError(SwiftException): pass class RingBuilderError(SwiftException): pass class RingValidationError(RingBuilderError): pass class EmptyRingError(RingBuilderError): pass class DuplicateDeviceError(RingBuilderError): pass class UnPicklingError(SwiftException): pass class FileNotFoundError(SwiftException): pass class PermissionError(SwiftException): pass class ListingIterError(SwiftException): pass class ListingIterNotFound(ListingIterError): pass class ListingIterNotAuthorized(ListingIterError): def __init__(self, aresp): self.aresp = aresp class SegmentError(SwiftException): pass class LinkIterError(SwiftException): pass class ReplicationException(Exception): pass class ReplicationLockTimeout(LockTimeout): pass class PartitionLockTimeout(LockTimeout): pass class MimeInvalid(SwiftException): pass class APIVersionError(SwiftException): pass class EncryptionException(SwiftException): pass class UnknownSecretIdError(EncryptionException): pass class QuarantineRequest(SwiftException): pass class ClientException(Exception): def __init__(self, msg, http_scheme='', http_host='', http_port='', http_path='', http_query='', http_status=None, http_reason='', http_device='', http_response_content='', http_headers=None): super(ClientException, self).__init__(msg) self.msg = msg self.http_scheme = http_scheme self.http_host = http_host self.http_port = http_port self.http_path = http_path self.http_query = http_query self.http_status = http_status self.http_reason = http_reason self.http_device = http_device self.http_response_content = http_response_content self.http_headers = http_headers or {} def __str__(self): a = self.msg b = '' if self.http_scheme: b += '%s://' % self.http_scheme if self.http_host: b += self.http_host if self.http_port: b += ':%s' % self.http_port if self.http_path: b += self.http_path if self.http_query: b += '?%s' % self.http_query if self.http_status: if b: b = '%s %s' % (b, self.http_status) else: b = str(self.http_status) if self.http_reason: if b: b = '%s %s' % (b, self.http_reason) else: b = '- %s' % self.http_reason if self.http_device: if b: b = '%s: device %s' % (b, self.http_device) else: b = 'device %s' % self.http_device if self.http_response_content: if len(self.http_response_content) <= 60: b += ' %s' % self.http_response_content else: b += ' [first 60 chars of response] %s' \ % self.http_response_content[:60] return b and '%s: %s' % (a, b) or a class InvalidPidFileException(Exception): pass
swiftstack/swift
swift/common/exceptions.py
Python
apache-2.0
5,786
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, softwar # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from extensions.interactions import base class SetInput(base.BaseInteraction): """Interaction for input of an unordered set of strings.""" name = 'Set Input' description = 'Allows learners to enter an unordered set of strings.' display_mode = base.DISPLAY_MODE_INLINE _dependency_ids = [] answer_type = 'SetOfUnicodeString' instructions = None narrow_instructions = None needs_summary = False # NB: There used to be a UnicodeString-typed parameter here called # 'element_type'. This has since been removed. _customization_arg_specs = [] _answer_visualization_specs = [{ # Table with answer counts for top N answers. 'id': 'FrequencyTable', 'options': { 'column_headers': ['Answer', 'Count'], 'title': 'Top 10 answers' }, 'calculation_id': 'Top10AnswerFrequencies', }, { # Table with most commonly submitted elements of set. 'id': 'FrequencyTable', 'options': { 'column_headers': ['Element', 'Count'], 'title': 'Commonly submitted elements' }, 'calculation_id': 'FrequencyCommonlySubmittedElements', }]
shaz13/oppia
extensions/interactions/SetInput/SetInput.py
Python
apache-2.0
1,817
import hashlib import re import logging from datetime import datetime from django.conf import settings # Avoid shadowing the login() view below. from django.views.decorators.csrf import csrf_protect from django.core.cache import cache from django.core.urlresolvers import reverse from django.contrib import messages from django.shortcuts import render_to_response from django.contrib.sites.models import Site, RequestSite from django.http import HttpResponseRedirect, Http404 from django.template import RequestContext from django.utils.http import urlquote, base36_to_int from django.utils.translation import ugettext as _ from django.views.decorators.cache import never_cache from seahub.auth import REDIRECT_FIELD_NAME, get_backends from seahub.auth import login as auth_login from seahub.auth.decorators import login_required from seahub.auth.forms import AuthenticationForm, CaptchaAuthenticationForm from seahub.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm from seahub.auth.tokens import default_token_generator from seahub.base.accounts import User from seahub.utils import is_ldap_user from seahub.utils.http import is_safe_url from seahub.utils.ip import get_remote_ip from seahub.settings import USER_PASSWORD_MIN_LENGTH, \ USER_STRONG_PASSWORD_REQUIRED, USER_PASSWORD_STRENGTH_LEVEL # Get an instance of a logger logger = logging.getLogger(__name__) LOGIN_ATTEMPT_PREFIX = 'UserLoginAttempt_' def log_user_in(request, user, redirect_to): # Ensure the user-originating redirection url is safe. if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL # Okay, security checks complete. Log the user in. auth_login(request, user) if request.session.test_cookie_worked(): request.session.delete_test_cookie() _clear_login_failed_attempts(request) return HttpResponseRedirect(redirect_to) def _get_login_failed_attempts(username=None, ip=None): """Get login failed attempts base on username and ip. If both username and ip are provided, return the max value. Arguments: - `username`: - `ip`: """ if username is None and ip is None: return 0 username_attempts = ip_attempts = 0 if username: username_attempts = cache.get(LOGIN_ATTEMPT_PREFIX + username, 0) if ip: ip_attempts = cache.get(LOGIN_ATTEMPT_PREFIX + ip, 0) return max(username_attempts, ip_attempts) def _incr_login_faied_attempts(username=None, ip=None): """Increase login failed attempts by 1 for both username and ip. Arguments: - `username`: - `ip`: Returns new value of failed attempts. """ timeout = settings.LOGIN_ATTEMPT_TIMEOUT username_attempts = 0 ip_attempts = 0 if username: try: username_attempts = cache.incr(LOGIN_ATTEMPT_PREFIX + username) except ValueError: cache.set(LOGIN_ATTEMPT_PREFIX + username, 0, timeout) if ip: try: ip_attempts = cache.incr(LOGIN_ATTEMPT_PREFIX + ip) except ValueError: cache.set(LOGIN_ATTEMPT_PREFIX + ip, 0, timeout) return max(username_attempts, ip_attempts) def _clear_login_failed_attempts(request): """Clear login failed attempts records. Arguments: - `request`: """ username = request.user.username ip = get_remote_ip(request) cache.delete(LOGIN_ATTEMPT_PREFIX + username) cache.delete(LOGIN_ATTEMPT_PREFIX + ip) @csrf_protect @never_cache def login(request, template_name='registration/login.html', redirect_if_logged_in=None, redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm): """Displays the login form and handles the login action.""" if request.user.is_authenticated() and redirect_if_logged_in: return HttpResponseRedirect(reverse(redirect_if_logged_in)) redirect_to = request.REQUEST.get(redirect_field_name, '') ip = get_remote_ip(request) if request.method == "POST": if request.REQUEST.get('captcha_0', '') != '': # have captcha form = CaptchaAuthenticationForm(data=request.POST) if form.is_valid(): # captcha & passwod is valid, log user in remember_me = True if request.REQUEST.get( 'remember_me', '') == 'on' else False request.session['remember_me'] = remember_me return log_user_in(request, form.get_user(), redirect_to) else: # show page with captcha and increase failed login attempts _incr_login_faied_attempts(ip=ip) else: form = authentication_form(data=request.POST) if form.is_valid(): # password is valid, log user in remember_me = True if request.REQUEST.get( 'remember_me', '') == 'on' else False request.session['remember_me'] = remember_me return log_user_in(request, form.get_user(), redirect_to) else: username = urlquote(request.REQUEST.get('username', '').strip()) failed_attempt = _incr_login_faied_attempts(username=username, ip=ip) if failed_attempt >= settings.LOGIN_ATTEMPT_LIMIT: logger.warn('Login attempt limit reached, username: %s, ip: %s, attemps: %d' % (username, ip, failed_attempt)) form = CaptchaAuthenticationForm() else: form = authentication_form(data=request.POST) else: ### GET failed_attempt = _get_login_failed_attempts(ip=ip) if failed_attempt >= settings.LOGIN_ATTEMPT_LIMIT: logger.warn('Login attempt limit reached, ip: %s, attempts: %d' % (ip, failed_attempt)) form = CaptchaAuthenticationForm(request) else: form = authentication_form(request) request.session.set_test_cookie() if Site._meta.installed: current_site = Site.objects.get_current() else: current_site = RequestSite(request) enable_signup = getattr(settings, 'ENABLE_SIGNUP', False) multi_tenancy = getattr(settings, 'MULTI_TENANCY', False) if enable_signup: if multi_tenancy: org_account_only = getattr(settings, 'FORCE_ORG_REGISTER', False) if org_account_only: signup_url = reverse('org_register') else: signup_url = reverse('choose_register') else: signup_url = reverse('registration_register') else: signup_url = '' enable_shib_login = getattr(settings, 'ENABLE_SHIB_LOGIN', False) return render_to_response(template_name, { 'form': form, redirect_field_name: redirect_to, 'site': current_site, 'site_name': current_site.name, 'remember_days': settings.LOGIN_REMEMBER_DAYS, 'signup_url': signup_url, 'enable_shib_login': enable_shib_login, }, context_instance=RequestContext(request)) def login_simple_check(request): """A simple check for login called by thirdpart systems(OA, etc). Token generation: MD5(secret_key + foo@foo.com + 2014-1-1).hexdigest() Token length: 32 hexadecimal digits. """ username = request.REQUEST.get('user', '') random_key = request.REQUEST.get('token', '') if not username or not random_key: raise Http404 today = datetime.now().strftime('%Y-%m-%d') expect = hashlib.md5(settings.SECRET_KEY+username+today).hexdigest() if expect == random_key: try: user = User.objects.get(email=username) except User.DoesNotExist: raise Http404 for backend in get_backends(): user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__) auth_login(request, user) return HttpResponseRedirect(settings.SITE_ROOT) else: raise Http404 def logout(request, next_page=None, template_name='registration/logged_out.html', redirect_field_name=REDIRECT_FIELD_NAME): "Logs out the user and displays 'You are logged out' message." from seahub.auth import logout logout(request) if next_page is None: redirect_to = request.REQUEST.get(redirect_field_name, '') if redirect_to: return HttpResponseRedirect(redirect_to) else: return render_to_response(template_name, { 'title': _('Logged out') }, context_instance=RequestContext(request)) else: # Redirect to this page until the session has been cleared. return HttpResponseRedirect(next_page or request.path) def logout_then_login(request, login_url=None): "Logs out the user if he is logged in. Then redirects to the log-in page." if not login_url: login_url = settings.LOGIN_URL return logout(request, login_url) def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME): "Redirects the user to the login page, passing the given 'next' page" if not login_url: login_url = settings.LOGIN_URL return HttpResponseRedirect('%s?%s=%s' % (login_url, urlquote(redirect_field_name), urlquote(next))) # 4 views for password reset: # - password_reset sends the mail # - password_reset_done shows a success message for the above # - password_reset_confirm checks the link the user clicked and # prompts for a new password # - password_reset_complete shows a success message for the above @csrf_protect def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None): if post_reset_redirect is None: post_reset_redirect = reverse('auth_password_reset_done') if request.method == "POST": form = password_reset_form(request.POST) if form.is_valid(): opts = {} opts['use_https'] = request.is_secure() opts['token_generator'] = token_generator if is_admin_site: opts['domain_override'] = request.META['HTTP_HOST'] else: opts['email_template_name'] = email_template_name if not Site._meta.installed: opts['domain_override'] = RequestSite(request).domain try: form.save(**opts) except Exception, e: logger.error(str(e)) messages.error(request, _(u'Failed to send email, please contact administrator.')) return render_to_response(template_name, { 'form': form, }, context_instance=RequestContext(request)) else: return HttpResponseRedirect(post_reset_redirect) else: form = password_reset_form() return render_to_response(template_name, { 'form': form, }, context_instance=RequestContext(request)) def password_reset_done(request, template_name='registration/password_reset_done.html'): return render_to_response(template_name, context_instance=RequestContext(request)) # Doesn't need csrf_protect since no-one can guess the URL def password_reset_confirm(request, uidb36=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ assert uidb36 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('auth_password_reset_complete') try: uid_int = base36_to_int(uidb36) user = User.objects.get(id=uid_int) except (ValueError, User.DoesNotExist): user = None context_instance = RequestContext(request) if token_generator.check_token(user, token): context_instance['validlink'] = True if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(None) else: context_instance['validlink'] = False form = None context_instance['form'] = form return render_to_response(template_name, context_instance=context_instance) def password_reset_complete(request, template_name='registration/password_reset_complete.html'): return render_to_response(template_name, context_instance=RequestContext(request, {'login_url': settings.LOGIN_URL})) @csrf_protect @login_required def password_change(request, template_name='registration/password_change_form.html', post_change_redirect=None, password_change_form=PasswordChangeForm): if post_change_redirect is None: post_change_redirect = reverse('auth_password_change_done') if is_ldap_user(request.user): messages.error(request, _("Can not update password, please contact LDAP admin.")) if request.method == "POST": form = password_change_form(user=request.user, data=request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_change_redirect) else: form = password_change_form(user=request.user) return render_to_response(template_name, { 'form': form, 'min_len': USER_PASSWORD_MIN_LENGTH, 'strong_pwd_required': USER_STRONG_PASSWORD_REQUIRED, 'level': USER_PASSWORD_STRENGTH_LEVEL, }, context_instance=RequestContext(request)) def password_change_done(request, template_name='registration/password_change_done.html'): return render_to_response(template_name, context_instance=RequestContext(request))
Chilledheart/seahub
seahub/auth/views.py
Python
apache-2.0
14,354
# -*- coding: utf-8 -*- __author__ = """Chris Tabor (dxdstudio@gmail.com)""" if __name__ == '__main__': from os import getcwd from os import sys sys.path.append(getcwd()) from random import choice from MOAL.helpers.text import gibberish from MOAL.helpers.display import Section from MOAL.data_structures.abstract.stack import Stack class Queue(Stack): def __init__(self, *args, **kwargs): super(Queue, self).__init__(*args, **kwargs) def __len__(self): return len(self.items) def enqueue(self, item): return self.items.insert(0, item) def dequeue(self): return super(Queue, self).pop() def move_to_end(self): self.enqueue(self.dequeue()) def out_of_range(self): return len(self.items) - 1 < self.num class Dequeue(Queue): """Represents a 'double ended' queue -- a queue that can use either direction as the head/tail, but works the same way.""" def __init__(self, direction): self.direction = direction super(Dequeue, self).__init__() def enqueue(self, item): if self.direction == 'backwards': self.items.insert(len(self.items) - 1, item) else: super(Dequeue, self).enqueue(item) def dequeue(self): if self.direction == 'backwards': self.items.remove() else: super(Dequeue, self).enqueue() class HotPotatoSimulator(Queue): def __init__(self, names, num): super(HotPotatoSimulator, self).__init__() self.num = num self.items = names def adjust_position(self): print('Out of range... adjusting position to a valid index.') # If `self.num` is greater than the length of the list, # keep adjusting until it's in range. while self.out_of_range(): self.num -= 1 self.adjust_position() def move(self): if self.out_of_range(): print('No items in the queue to move!') return self.adjust_position() person = self.items[self.num] print('Moving around circle {} times to: {}'.format(self.num, person)) print(self.items) self.move_to_end() class PrinterQueue(Queue): def add_job(self, name, doc): self.push({'name': name, 'doc': doc}) print('Adding {} to queue for printing...'.format(name)) def print_job(self): print('Printing... {}'.format(self.head()['name'])) if __name__ == '__main__': with Section('Queues'): q = Queue() for _ in range(5): print('en-queuing new item...') q.enqueue(gibberish()) with Section('Double ended queue'): dq = Dequeue('backwards') for _ in range(5): print('en-queuing (dequeue) new item...') dq.enqueue(gibberish()) with Section('Queue rotation example'): hps = HotPotatoSimulator( ['Tuvok', 'Neelix', 'Kim', 'Paris', 'Seven', 'Chakotay'], 20) for _ in range(7): hps.move() with Section('Printer queue example'): pq = PrinterQueue() for _ in range(10): pq.add_job('My_doc_{}.{}'.format( gibberish(), choice( ['doc', 'docx', 'rtf', 'pdf', 'jpg', '.png'])), '<DOC CONTENTS...>') for _ in range(10): pq.print_job()
christabor/MoAL
MOAL/data_structures/abstract/queues.py
Python
apache-2.0
3,389
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from security_monkey.views import AuthenticatedService from security_monkey.views import __check_auth__ from security_monkey.views import ITEM_FIELDS from security_monkey.views import ITEM_COMMENT_FIELDS from security_monkey.views import AUDIT_FIELDS from security_monkey.views import REVISION_FIELDS from security_monkey.datastore import Item from security_monkey.datastore import Account from security_monkey.datastore import Technology from security_monkey.datastore import ItemRevision from security_monkey import db from security_monkey import api from flask.ext.restful import marshal, reqparse from sqlalchemy.sql.expression import cast from sqlalchemy import String class ItemGet(AuthenticatedService): def __init__(self): super(ItemGet, self).__init__() def get(self, item_id): """ .. http:get:: /api/1/item/1234 Get a specific item **Example Request**: .. sourcecode:: http GET /api/1/item/1234 HTTP/1.1 Host: example.com Accept: application/json **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json { "item": { "account": "example_account", "region": "us-east-1", "technology": "elb", "id": 1234, "name": "example_name" }, "revisions": [ { "active": false, "date_created": "2014-04-11 17:05:06.701936", "config": {}, "item_id": 1234, "id": 213784 } ], "auth": { "authenticated": true, "user": "user@example.com" }, "issues": [], "comments": [] } :statuscode 200: no error :statuscode 401: Authenticaiton Error Please login. """ auth, retval = __check_auth__(self.auth_dict) if auth: return retval query = Item.query.filter(Item.id == item_id) result = query.first() # result should be an Item with a list of audit thingers and a list of # revisions retval = {} item_marshaled = marshal(result.__dict__, ITEM_FIELDS) item_marshaled = dict( item_marshaled.items() + {'account': result.account.name}.items() + {'technology': result.technology.name}.items() ) retval['item'] = item_marshaled retval['issues'] = [] retval['auth'] = self.auth_dict comments_marshaled = [] for comment in result.comments: comment_marshaled = marshal(comment, ITEM_COMMENT_FIELDS) comment_marshaled = dict( comment_marshaled.items() + {'user': comment.user.email}.items() ) comments_marshaled.append(comment_marshaled) retval['comments'] = comments_marshaled for issue in result.issues: issue_marshaled = marshal(issue.__dict__, AUDIT_FIELDS) if issue.user is not None: issue_marshaled = dict(issue_marshaled.items() + {'justified_user': issue.user.email}.items() ) retval['issues'].append(issue_marshaled) retval['revisions'] = [] for revision in result.revisions: revision_marshaled = marshal(revision.__dict__, REVISION_FIELDS) revision_marshaled = dict( revision_marshaled.items() + {'config': revision.config}.items() ) retval['revisions'].append(revision_marshaled) return retval, 200 # Returns a list of items optionally filtered by # account, region, name, ctype or id. class ItemList(AuthenticatedService): def __init__(self): super(ItemList, self).__init__() def get(self): """ .. http:get:: /api/1/items Get a list of items matching the given criteria. **Example Request**: .. sourcecode:: http GET /api/1/items HTTP/1.1 Host: example.com Accept: application/json **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json { "items": [ { "account": "example_account", "region": "us-east-1", "technology": "sqs", "id": 14414, "name": "example_name", "num_issues": 3, "issue_score": 9, "unjustified_issue_score": 3, "active" true, "first_seen": "2014-06-17 19:47:07.299760", "last_seen": "2014-06-18 11:53:16.467709" } ], "total": 144, "page": 1, "auth": { "authenticated": true, "user": "user@example.com" } } :statuscode 200: no error :statuscode 401: Authenciation Error. Please Login. """ (auth, retval) = __check_auth__(self.auth_dict) if auth: return retval self.reqparse.add_argument('count', type=int, default=30, location='args') self.reqparse.add_argument('page', type=int, default=1, location='args') self.reqparse.add_argument('regions', type=str, default=None, location='args') self.reqparse.add_argument('accounts', type=str, default=None, location='args') self.reqparse.add_argument('active', type=str, default=None, location='args') self.reqparse.add_argument('names', type=str, default=None, location='args') self.reqparse.add_argument('technologies', type=str, default=None, location='args') self.reqparse.add_argument('searchconfig', type=str, default=None, location='args') self.reqparse.add_argument('ids', type=int, default=None, location='args') args = self.reqparse.parse_args() page = args.pop('page', None) count = args.pop('count', None) for k, v in args.items(): if not v: del args[k] # Read more about filtering: # http://docs.sqlalchemy.org/en/rel_0_7/orm/query.html query = Item.query.join((ItemRevision, Item.latest_revision_id == ItemRevision.id)) if 'regions' in args: regions = args['regions'].split(',') query = query.filter(Item.region.in_(regions)) if 'accounts' in args: accounts = args['accounts'].split(',') query = query.join((Account, Account.id == Item.account_id)) query = query.filter(Account.name.in_(accounts)) if 'technologies' in args: technologies = args['technologies'].split(',') query = query.join((Technology, Technology.id == Item.tech_id)) query = query.filter(Technology.name.in_(technologies)) if 'names' in args: names = args['names'].split(',') query = query.filter(Item.name.in_(names)) if 'ids' in args: ids = args['ids'].split(',') query = query.filter(Item.id.in_(ids)) if 'active' in args: active = args['active'].lower() == "true" query = query.filter(ItemRevision.active == active) if 'searchconfig' in args: searchconfig = args['searchconfig'] query = query.filter(cast(ItemRevision.config, String).ilike('%{}%'.format(searchconfig))) query = query.order_by(ItemRevision.date_created.desc()) items = query.paginate(page, count) marshaled_dict = {} marshaled_dict['page'] = items.page marshaled_dict['total'] = items.total marshaled_dict['auth'] = self.auth_dict marshaled_items = [] for item in items.items: num_issues = len(item.issues) issue_score = 0 unjustified_issue_score = 0 for issue in item.issues: issue_score = issue_score + issue.score if not issue.justified: unjustified_issue_score += issue.score first_seen = str(item.revisions[-1].date_created) last_seen = str(item.revisions[0].date_created) active = item.revisions[0].active item_marshaled = {} item_marshaled = marshal(item.__dict__, ITEM_FIELDS) item_marshaled = dict(item_marshaled.items() + { 'account': item.account.name, 'technology': item.technology.name, 'num_issues': num_issues, 'issue_score': issue_score, 'unjustified_issue_score': unjustified_issue_score, 'active': active, 'first_seen': first_seen, 'last_seen': last_seen #'last_rev': item.revisions[0].config, }.items()) marshaled_items.append(item_marshaled) marshaled_dict['items'] = marshaled_items marshaled_dict['count'] = len(marshaled_items) return marshaled_dict, 200
lucab/security_monkey
security_monkey/views/item.py
Python
apache-2.0
10,797
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'NtpAccessAfEnum' : _MetaInfoEnum('NtpAccessAfEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', { 'ipv4':'IPV4', 'ipv6':'IPV6', }, 'Cisco-IOS-XR-ip-ntp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg']), 'NtpPeerEnum' : _MetaInfoEnum('NtpPeerEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', { 'peer':'PEER', 'server':'SERVER', }, 'Cisco-IOS-XR-ip-ntp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg']), 'NtpdscpEnum' : _MetaInfoEnum('NtpdscpEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', { 'ntp-precedence':'NTP_PRECEDENCE', 'ntpdscp':'NTPDSCP', }, 'Cisco-IOS-XR-ip-ntp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg']), 'NtpAccessEnum' : _MetaInfoEnum('NtpAccessEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', { 'peer':'PEER', 'serve':'SERVE', 'serve-only':'SERVE_ONLY', 'query-only':'QUERY_ONLY', }, 'Cisco-IOS-XR-ip-ntp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg']), 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4.PeerTypeIpv4' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4.PeerTypeIpv4', False, [ _MetaInfoClassMember('peer-type', REFERENCE_ENUM_CLASS, 'NtpPeerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpPeerEnum', [], [], ''' Peer or Server ''', 'peer_type', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('authentication-key', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Authentication Key ''', 'authentication_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('burst', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Use burst mode ''', 'burst', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('iburst', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Use iburst mode ''', 'iburst', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('max-poll', ATTRIBUTE, 'int' , None, None, [(4, 17)], [], ''' Maxinum poll interval ''', 'max_poll', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('min-poll', ATTRIBUTE, 'int' , None, None, [(4, 17)], [], ''' Minimum poll interval ''', 'min_poll', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('ntp-version', ATTRIBUTE, 'int' , None, None, [(2, 4)], [], ''' NTP version ''', 'ntp_version', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('preferred-peer', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Preferred peer ''', 'preferred_peer', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None, [], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'], ''' Source interface of this peer ''', 'source_interface', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-type-ipv4', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4', False, [ _MetaInfoClassMember('address-ipv4', ATTRIBUTE, 'str' , None, None, [], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'], ''' IPv4 Address of a peer ''', 'address_ipv4', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('peer-type-ipv4', REFERENCE_LIST, 'PeerTypeIpv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4.PeerTypeIpv4', [], [], ''' Configure an IPv4 NTP server or peer ''', 'peer_type_ipv4', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-ipv4', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv4S', False, [ _MetaInfoClassMember('peer-ipv4', REFERENCE_LIST, 'PeerIpv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4', [], [], ''' Configure an IPv4 NTP server or peer ''', 'peer_ipv4', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-ipv4s', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6.PeerTypeIpv6' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6.PeerTypeIpv6', False, [ _MetaInfoClassMember('peer-type', REFERENCE_ENUM_CLASS, 'NtpPeerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpPeerEnum', [], [], ''' Peer or Server ''', 'peer_type', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('address-ipv6', ATTRIBUTE, 'str' , None, None, [], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'], ''' IPv6 address ''', 'address_ipv6', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('authentication-key', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Authentication Key ''', 'authentication_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('burst', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Use burst mode ''', 'burst', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('iburst', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Use iburst mode ''', 'iburst', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('max-poll', ATTRIBUTE, 'int' , None, None, [(4, 17)], [], ''' Maxinum poll interval ''', 'max_poll', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('min-poll', ATTRIBUTE, 'int' , None, None, [(4, 17)], [], ''' Minimum poll interval ''', 'min_poll', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('ntp-version', ATTRIBUTE, 'int' , None, None, [(2, 4)], [], ''' NTP version ''', 'ntp_version', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('preferred-peer', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Preferred peer ''', 'preferred_peer', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None, [], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'], ''' Source interface of this peer ''', 'source_interface', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-type-ipv6', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6', False, [ _MetaInfoClassMember('address-ipv6', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Address of a peer ''', 'address_ipv6', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('peer-type-ipv6', REFERENCE_LIST, 'PeerTypeIpv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6.PeerTypeIpv6', [], [], ''' Configure a NTP server or peer ''', 'peer_type_ipv6', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-ipv6', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf.PeerIpv6S', False, [ _MetaInfoClassMember('peer-ipv6', REFERENCE_LIST, 'PeerIpv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6', [], [], ''' Configure a NTP server or peer ''', 'peer_ipv6', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-ipv6s', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs.PeerVrf' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs.PeerVrf', False, [ _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('peer-ipv4s', REFERENCE_CLASS, 'PeerIpv4S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv4S', [], [], ''' Configures IPv4 NTP Peers or Servers ''', 'peer_ipv4s', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('peer-ipv6s', REFERENCE_CLASS, 'PeerIpv6S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf.PeerIpv6S', [], [], ''' Configuration NTP Peers or Servers of IPV6 ''', 'peer_ipv6s', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-vrf', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.PeerVrfs' : { 'meta_info' : _MetaInfoClass('Ntp.PeerVrfs', False, [ _MetaInfoClassMember('peer-vrf', REFERENCE_LIST, 'PeerVrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs.PeerVrf', [], [], ''' Configures NTP Peers or Servers for a single VRF. The 'default' must also be specified for default VRF ''', 'peer_vrf', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'peer-vrfs', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.DscpIpv4' : { 'meta_info' : _MetaInfoClass('Ntp.DscpIpv4', False, [ _MetaInfoClassMember('dscp-or-precedence-value', ATTRIBUTE, 'int' , None, None, [(0, 63)], [], ''' If Mode is set to 'NTPPRECEDENCE(0)' specify Precedence value , if Mode is set to 'NTPDSCP(1)' specify DSCP ''', 'dscp_or_precedence_value', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'NtpdscpEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpdscpEnum', [], [], ''' NTPPRECEDENCE (0) to specify Precedence value NTPDSCP (1) to specify DSCP value ''', 'mode', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'dscp-ipv4', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.DscpIpv6' : { 'meta_info' : _MetaInfoClass('Ntp.DscpIpv6', False, [ _MetaInfoClassMember('dscp-or-precedence-value', ATTRIBUTE, 'int' , None, None, [(0, 63)], [], ''' If Mode is set to 'NTPPRECEDENCE(0)' specify Precedence value , if Mode is set to 'NTPDSCP(1)' specify DSCP ''', 'dscp_or_precedence_value', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'NtpdscpEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpdscpEnum', [], [], ''' NTPPRECEDENCE(0) to specify Precedence value NTPDSCP(1) to specify DSCP value ''', 'mode', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'dscp-ipv6', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Sources.Source' : { 'meta_info' : _MetaInfoClass('Ntp.Sources.Source', False, [ _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None, [], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'], ''' Source Interface for NTP ''', 'source_interface', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'source', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Sources' : { 'meta_info' : _MetaInfoClass('Ntp.Sources', False, [ _MetaInfoClassMember('source', REFERENCE_LIST, 'Source' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Sources.Source', [], [], ''' Configure NTP source interface ''', 'source', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'sources', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Authentication.Keies.Key' : { 'meta_info' : _MetaInfoClass('Ntp.Authentication.Keies.Key', False, [ _MetaInfoClassMember('key-number', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Authentication Key number ''', 'key_number', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('authentication-key', ATTRIBUTE, 'str' , None, None, [], [], ''' Authentication key - maximum 32 characters ''', 'authentication_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'key', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Authentication.Keies' : { 'meta_info' : _MetaInfoClass('Ntp.Authentication.Keies', False, [ _MetaInfoClassMember('key', REFERENCE_LIST, 'Key' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Authentication.Keies.Key', [], [], ''' Authentication key for trusted time sources ''', 'key', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'keies', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Authentication.TrustedKeies.TrustedKey' : { 'meta_info' : _MetaInfoClass('Ntp.Authentication.TrustedKeies.TrustedKey', False, [ _MetaInfoClassMember('key-number', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Key number ''', 'key_number', 'Cisco-IOS-XR-ip-ntp-cfg', True), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'trusted-key', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Authentication.TrustedKeies' : { 'meta_info' : _MetaInfoClass('Ntp.Authentication.TrustedKeies', False, [ _MetaInfoClassMember('trusted-key', REFERENCE_LIST, 'TrustedKey' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Authentication.TrustedKeies.TrustedKey', [], [], ''' Configure NTP trusted key ''', 'trusted_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'trusted-keies', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.Authentication' : { 'meta_info' : _MetaInfoClass('Ntp.Authentication', False, [ _MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Enable NTP authentication keys ''', 'enable', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('keies', REFERENCE_CLASS, 'Keies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Authentication.Keies', [], [], ''' Authentication Key Table ''', 'keies', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('trusted-keies', REFERENCE_CLASS, 'TrustedKeies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Authentication.TrustedKeies', [], [], ''' Key numbers for trusted time sources ''', 'trusted_keies', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'authentication', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients.MulticastClient' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients.MulticastClient', False, [ _MetaInfoClassMember('ip-address', REFERENCE_UNION, 'str' , None, None, [], [], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True, [ _MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None, [], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None, [], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True), ]), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'multicast-client', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients', False, [ _MetaInfoClassMember('multicast-client', REFERENCE_LIST, 'MulticastClient' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients.MulticastClient', [], [], ''' Listen to NTP multicasts ''', 'multicast_client', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'multicast-clients', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers.MulticastServer' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers.MulticastServer', False, [ _MetaInfoClassMember('ip-address', REFERENCE_UNION, 'str' , None, None, [], [], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True, [ _MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None, [], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None, [], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'], ''' IP address of a multicast group ''', 'ip_address', 'Cisco-IOS-XR-ip-ntp-cfg', True), ]), _MetaInfoClassMember('authentication-key', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Authentication key ''', 'authentication_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('ttl', ATTRIBUTE, 'int' , None, None, [(1, 255)], [], ''' TTL ''', 'ttl', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('version', ATTRIBUTE, 'int' , None, None, [(2, 4)], [], ''' NTP version ''', 'version', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'multicast-server', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers', False, [ _MetaInfoClassMember('multicast-server', REFERENCE_LIST, 'MulticastServer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers.MulticastServer', [], [], ''' Configure NTP multicast group server peer ''', 'multicast_server', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'multicast-servers', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast', False, [ _MetaInfoClassMember('multicast-clients', REFERENCE_CLASS, 'MulticastClients' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients', [], [], ''' Configures multicast client peers ''', 'multicast_clients', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('multicast-servers', REFERENCE_CLASS, 'MulticastServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers', [], [], ''' Configures multicast server peers ''', 'multicast_servers', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'interface-multicast', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast.Broadcast' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast.Broadcast', False, [ _MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None, [], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'], ''' Destination broadcast IPv4 address ''', 'address', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('authentication-key', ATTRIBUTE, 'int' , None, None, [(1, 65535)], [], ''' Authentication key ''', 'authentication_key', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('ntp-version', ATTRIBUTE, 'int' , None, None, [(2, 4)], [], ''' NTP version ''', 'ntp_version', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'broadcast', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast', False, [ _MetaInfoClassMember('broadcast', REFERENCE_CLASS, 'Broadcast' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast.Broadcast', [], [], ''' Configure NTP broadcast ''', 'broadcast', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('broadcast-client', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Listen to NTP broadcasts ''', 'broadcast_client', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'interface-broadcast', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable.Interface' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable.Interface', False, [ _MetaInfoClassMember('interface', ATTRIBUTE, 'str' , None, None, [], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'], ''' interface ''', 'interface', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Disable NTP ''', 'disable', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('interface-broadcast', REFERENCE_CLASS, 'InterfaceBroadcast' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast', [], [], ''' Configure NTP broadcast service ''', 'interface_broadcast', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('interface-multicast', REFERENCE_CLASS, 'InterfaceMulticast' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast', [], [], ''' Configure NTP multicast service ''', 'interface_multicast', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'interface', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables.InterfaceTable' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables.InterfaceTable', False, [ _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable.Interface', [], [], ''' Name of the interface ''', 'interface', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'interface-table', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.InterfaceTables' : { 'meta_info' : _MetaInfoClass('Ntp.InterfaceTables', False, [ _MetaInfoClassMember('interface-table', REFERENCE_LIST, 'InterfaceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables.InterfaceTable', [], [], ''' NTP per interface configuration ''', 'interface_table', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'interface-tables', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable.AccessGroup' : { 'meta_info' : _MetaInfoClass('Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable.AccessGroup', False, [ _MetaInfoClassMember('access-group-type', REFERENCE_ENUM_CLASS, 'NtpAccessEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpAccessEnum', [], [], ''' Access group type ''', 'access_group_type', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Access list name - maximum 32 characters ''', 'access_list_name', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'access-group', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable' : { 'meta_info' : _MetaInfoClass('Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable', False, [ _MetaInfoClassMember('af', REFERENCE_ENUM_CLASS, 'NtpAccessAfEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'NtpAccessAfEnum', [], [], ''' Address family ''', 'af', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('access-group', REFERENCE_LIST, 'AccessGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable.AccessGroup', [], [], ''' Configure NTP access group ''', 'access_group', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'access-group-af-table', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.AccessGroupTables.AccessGroupTable' : { 'meta_info' : _MetaInfoClass('Ntp.AccessGroupTables.AccessGroupTable', False, [ _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-ip-ntp-cfg', True), _MetaInfoClassMember('access-group-af-table', REFERENCE_LIST, 'AccessGroupAfTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable', [], [], ''' Configure NTP access address family ''', 'access_group_af_table', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'access-group-table', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp.AccessGroupTables' : { 'meta_info' : _MetaInfoClass('Ntp.AccessGroupTables', False, [ _MetaInfoClassMember('access-group-table', REFERENCE_LIST, 'AccessGroupTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.AccessGroupTables.AccessGroupTable', [], [], ''' Control NTP access ''', 'access_group_table', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'access-group-tables', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, 'Ntp' : { 'meta_info' : _MetaInfoClass('Ntp', False, [ _MetaInfoClassMember('access-group-tables', REFERENCE_CLASS, 'AccessGroupTables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.AccessGroupTables', [], [], ''' Control NTP access ''', 'access_group_tables', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Authentication', [], [], ''' Configure NTP Authentication keys ''', 'authentication', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('broadcast-delay', ATTRIBUTE, 'int' , None, None, [(1, 999999)], [], ''' Estimated round-trip delay ''', 'broadcast_delay', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('dscp-ipv4', REFERENCE_CLASS, 'DscpIpv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.DscpIpv4', [], [], ''' Set IP DSCP value for outgoing NTP IPV4 packets ''', 'dscp_ipv4', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('dscp-ipv6', REFERENCE_CLASS, 'DscpIpv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.DscpIpv6', [], [], ''' Set IP DSCP value for outgoing NTP IPV6 packets ''', 'dscp_ipv6', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('interface-tables', REFERENCE_CLASS, 'InterfaceTables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.InterfaceTables', [], [], ''' NTP per interface configuration ''', 'interface_tables', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('log-internal-sync', ATTRIBUTE, 'Empty' , None, None, [], [], ''' To enable logging internal sync conflicts ''', 'log_internal_sync', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('master', ATTRIBUTE, 'int' , None, None, [(1, 15)], [], ''' Act as NTP master clock ''', 'master', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('max-associations', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' Set maximum number of associations ''', 'max_associations', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('peer-vrfs', REFERENCE_CLASS, 'PeerVrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.PeerVrfs', [], [], ''' Configures NTP Peers or Servers ''', 'peer_vrfs', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('sources', REFERENCE_CLASS, 'Sources' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg', 'Ntp.Sources', [], [], ''' Configure NTP source interface ''', 'sources', 'Cisco-IOS-XR-ip-ntp-cfg', False), _MetaInfoClassMember('update-calendar', ATTRIBUTE, 'Empty' , None, None, [], [], ''' To enable calendar update with NTP time ''', 'update_calendar', 'Cisco-IOS-XR-ip-ntp-cfg', False), ], 'Cisco-IOS-XR-ip-ntp-cfg', 'ntp', _yang_ns._namespaces['Cisco-IOS-XR-ip-ntp-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_ntp_cfg' ), }, } _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4.PeerTypeIpv4']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv4S.PeerIpv4']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv4S']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6.PeerTypeIpv6']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv6S.PeerIpv6']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv6S']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv4S']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf.PeerIpv6S']['meta_info'].parent =_meta_table['Ntp.PeerVrfs.PeerVrf']['meta_info'] _meta_table['Ntp.PeerVrfs.PeerVrf']['meta_info'].parent =_meta_table['Ntp.PeerVrfs']['meta_info'] _meta_table['Ntp.Sources.Source']['meta_info'].parent =_meta_table['Ntp.Sources']['meta_info'] _meta_table['Ntp.Authentication.Keies.Key']['meta_info'].parent =_meta_table['Ntp.Authentication.Keies']['meta_info'] _meta_table['Ntp.Authentication.TrustedKeies.TrustedKey']['meta_info'].parent =_meta_table['Ntp.Authentication.TrustedKeies']['meta_info'] _meta_table['Ntp.Authentication.Keies']['meta_info'].parent =_meta_table['Ntp.Authentication']['meta_info'] _meta_table['Ntp.Authentication.TrustedKeies']['meta_info'].parent =_meta_table['Ntp.Authentication']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients.MulticastClient']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers.MulticastServer']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastClients']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast.MulticastServers']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast.Broadcast']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceMulticast']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface.InterfaceBroadcast']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable.Interface']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable.Interface']['meta_info'].parent =_meta_table['Ntp.InterfaceTables.InterfaceTable']['meta_info'] _meta_table['Ntp.InterfaceTables.InterfaceTable']['meta_info'].parent =_meta_table['Ntp.InterfaceTables']['meta_info'] _meta_table['Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable.AccessGroup']['meta_info'].parent =_meta_table['Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable']['meta_info'] _meta_table['Ntp.AccessGroupTables.AccessGroupTable.AccessGroupAfTable']['meta_info'].parent =_meta_table['Ntp.AccessGroupTables.AccessGroupTable']['meta_info'] _meta_table['Ntp.AccessGroupTables.AccessGroupTable']['meta_info'].parent =_meta_table['Ntp.AccessGroupTables']['meta_info'] _meta_table['Ntp.PeerVrfs']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.DscpIpv4']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.DscpIpv6']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.Sources']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.Authentication']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.InterfaceTables']['meta_info'].parent =_meta_table['Ntp']['meta_info'] _meta_table['Ntp.AccessGroupTables']['meta_info'].parent =_meta_table['Ntp']['meta_info']
abhikeshav/ydk-py
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ip_ntp_cfg.py
Python
apache-2.0
48,727
import paho.mqtt.client as MQTT from enum import Enum from cow_mq.util import Util, DataType from datetime import datetime import time import copy import logging import json import threading class SendType(Enum): NONE = 0 SYNC = 1 ASYNC = 2 class ResponseStatus(Enum): NONE = 0 SUCCESS = 1 TIMEOUT = 2 class ResponseData: status = ResponseStatus.NONE data_bytes = None def __repr__(self): return '<ResponseData status:{}, data_bytes:{}>'.format( self.status, self.data_bytes) class Client: class SendData: domain = None rule = None completed = False send_type = SendType.NONE rsp_data = None rsp_callback = None timeout_timer = None def __init__(self, domain, rule, completed=False, send_type=SendType.NONE, rsp_data=ResponseData(), rsp_callback=None, timeout_timer=None): self.domain = domain self.rule = rule self.completed = completed self.send_type = send_type self.rsp_data = rsp_data self.rsp_callback = rsp_callback self.timeout_timer = timeout_timer mqtt_client = None mqtt_ip = None mqtt_port = None mqtt_username = None mqtt_password = None mqtt_tls_ca_certs = None mqtt_tls_certfile = None mqtt_tls_keyfile = None on_connect = None on_disconnect = None on_subscribe = None on_message = None on_server_connect = None on_server_disconnect = None topic_domain_rule_dic = {} registered_server_data = [] def __init__(self, config, logging_level=logging.WARNING): self.logger = logging.getLogger('CowMQ Client') self.logger.setLevel(logging_level) ch = logging.StreamHandler() ch.setLevel(logging_level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) self.logger.addHandler(ch) mqtt_ip = Util.get_cow_mq_ip_from_config(config) mqtt_port = Util.get_cow_mq_port_from_config(config) mqtt_username = Util.get_cow_mq_username_from_config(config) mqtt_password = Util.get_cow_mq_password_from_config(config) mqtt_tls_ca_certs = Util.get_cow_mq_tls_ca_certs_from_config( config) mqtt_tls_certfile = Util.get_cow_mq_tls_certfile_from_config( config) mqtt_tls_keyfile = Util.get_cow_mq_tls_keyfile_from_config( config) self.mqtt_ip = mqtt_ip self.mqtt_port = mqtt_port self.mqtt_username = mqtt_username self.mqtt_password = mqtt_password # self.mqtt_client = MQTT.Client(transport='websockets') self.mqtt_client = MQTT.Client() self.mqtt_client.on_connect = self.on_connect_mqtt self.mqtt_client.on_disconnect = self.on_disconnect_mqtt self.mqtt_client.on_subscribe = self.on_subscribe_mqtt self.mqtt_client.on_message = self.on_message_mqtt if (mqtt_username is not None and mqtt_password is not None): self.mqtt_client.username_pw_set( self.mqtt_username, self.mqtt_password) if mqtt_tls_ca_certs is not None: self.mqtt_client.tls_set(mqtt_tls_ca_certs, mqtt_tls_certfile, mqtt_tls_keyfile) self.mqtt_client.connect(self.mqtt_ip, self.mqtt_port) self.mqtt_client.loop_start() def on_connect_mqtt(self, client, userdata, flags, rc): self.logger.debug("connect rc:{}".format(rc)) if self.on_connect: self.on_connect(self, client, userdata, flags, rc) def on_disconnect_mqtt(self, client, userdata, rc): self.logger.debug("disconnect rc:{}".format(rc)) self.mqtt_client.loop_stop() if self.on_disconnect: self.on_disconnect(self, client, userdata, rc) def on_subscribe_mqtt(self, client, userdata, mid, granted_qos): # self.logger.debug("subscribe mid:{}, granted_qos:{}".format( # mid, granted_qos)) if self.on_subscribe: self.on_subscribe(self, client, userdata, mid, granted_qos) def on_message_mqtt(self, client, userdata, msg): topic = msg.topic payload = msg.payload data_type, data_bytes, rsp_topic = Util.decode(payload) if data_type == DataType.NONE: self.logger.debug("on_message topic:{}, payload:{}".format( topic, payload)) if self.on_message: self.on_message(self, client, userdata, msg) return if data_type == DataType.INFO: if Util.is_connected_topic(topic): domain = Util.get_domain_from_connected_topic(topic) data_str = data_bytes.decode('utf-8') data = json.loads(data_str) connected = data['connected'] if connected: self.logger.debug('server({}) connected'.format(domain)) if self.on_server_connect: self.on_server_connect(domain) else: self.logger.debug('server({}) disconnected'.format(domain)) if self.on_server_disconnect: self.on_server_disconnect(domain) return if topic not in self.topic_domain_rule_dic: return send_data = self.topic_domain_rule_dic[topic] send_data.rsp_data.status = ResponseStatus.SUCCESS send_data.rsp_data.data_bytes = data_bytes send_data.completed = True # self.logger.debug('''message: # domain:{}, # rule:{}, # data_type:{}, # rsp_topic:{}, # data_bytes:{} # '''.format(send_data.domain, send_data.rule, # data_type, rsp_topic, # send_data.rsp_data)) if send_data.send_type == SendType.ASYNC: send_data.timeout_timer.cancel() del self.topic_domain_rule_dic[topic] self.logger.debug( 'async_send receive domain:{}, rule:{}, payload:{}'. format(send_data.domain, send_data.rule, send_data.rsp_data)) if send_data.rsp_callback: send_data.rsp_callback( send_data.domain, send_data.rule, send_data.rsp_data) def subscribe(self, topic, qos=0): self.mqtt_client.subscribe(topic, qos) def unsubscribe(self, topic): self.mqtt_client.unsubscribe(topic) def publish(self, topic, payload=None, qos=0, retain=False): self.mqtt_client.publish(topic, payload, qos, retain) def register_server_connected(self, domain): if not Util.can_use_domain(domain): self.logger.error('domain is wrong format: {}'.format(domain)) raise Exception('Server domain is wrong format') connected_topic = Util.generate_connected_topic(domain) self.mqtt_client.subscribe(connected_topic, qos=1) self.registered_server_data.append(domain) return True def unregister_server_connected(self, domain): if not Util.can_use_domain(domain): self.logger.error('domain is wrong format: {}'.format(domain)) raise Exception('Server domain is wrong format') connected_topic = Util.generate_connected_topic(domain) self.mqtt_client.unsubscribe(connected_topic, qos=1) self.registered_server_data.remove(domain) return True def registered_server_connected_list(self): return copy.deepcopy(self.registered_server_data) def sync_send(self, domain, rule, payload, timeout=30): if not Util.can_use_domain(domain): self.logger.error('domain({}) is wrong format'.format(domain)) raise Exception('Server domain is wrong format') if rule is not None and not Util.can_use_rule(rule): self.logger.error('Rule({}) can not be use'.format(rule)) raise Exception( 'Rule({}) can not be use'.format(rule)) topic = Util.generate_request_topic(domain, rule) rsp_topic = Util.generate_response_topic(domain) data_bytes = Util.encode(DataType.REQ, payload, rsp_topic) self.topic_domain_rule_dic[rsp_topic] = Client.SendData( domain, rule, send_type=SendType.SYNC) self.mqtt_client.subscribe(rsp_topic, qos=1) self.mqtt_client.publish(topic, payload=data_bytes, qos=1) self.logger.debug('sync_send topic:{}, payload:{}'.format( topic, payload)) start_time = datetime.now() while True: send_data = self.topic_domain_rule_dic[rsp_topic] delta = datetime.now() - start_time if delta.total_seconds() > timeout: send_data.rsp_data.status = ResponseStatus.TIMEOUT send_data.completed = True break if send_data.completed: break time.sleep(0.01) del self.topic_domain_rule_dic[rsp_topic] self.logger.debug('sync_send receive topic:{}, payload:{}'.format( topic, send_data.rsp_data)) return send_data.rsp_data def async_send(self, domain, rule, payload, callback, timeout=30): if not Util.can_use_domain(domain): self.logger.error('domain({}) is wrong format'.format(domain)) raise Exception('Server domain is wrong format') if rule is not None and not Util.can_use_rule(rule): self.logger.error('Rule({}) can not be use'.format(rule)) raise Exception( 'Rule({}) can not be use'.format(rule)) topic = Util.generate_request_topic(domain, rule) rsp_topic = Util.generate_response_topic(domain) data_bytes = Util.encode(DataType.REQ, payload, rsp_topic) self.topic_domain_rule_dic[rsp_topic] = Client.SendData( domain, rule, send_type=SendType.ASYNC, rsp_callback=callback) self.mqtt_client.subscribe(rsp_topic, qos=1) self.mqtt_client.publish(topic, payload=data_bytes, qos=1) self.logger.debug('async_send topic:{}, payload:{}'.format( topic, payload)) send_data = self.topic_domain_rule_dic[rsp_topic] t = threading.Timer(timeout, self.async_timeout, [rsp_topic, send_data]) send_data.timeout_timer = t t.start() return True def async_timeout(self, rsp_topic, send_data): if send_data.completed: return send_data.completed = True send_data.rsp_data.status = ResponseStatus.TIMEOUT del self.topic_domain_rule_dic[rsp_topic] self.logger.debug('async_send receive domain:{}, rule:{}, payload:{}'. format(send_data.domain, send_data.rule, send_data.rsp_data)) if send_data.rsp_callback: send_data.rsp_callback( send_data.domain, send_data.rule, send_data.rsp_data)
duncanHsu/CowMQ-Python
cow_mq/client.py
Python
apache-2.0
11,395
""" Just playing around with clustering. This file is "divisive hierarchical clustering". """ import pandas as pd import numpy as np import matplotlib.pyplot as plt print("Creating some data to work with...") np.random.seed(123) variables = ['X', 'Y', 'Z'] labels = ["ID_0", "ID_1", "ID_2", "ID_3", "ID_4"] X = np.random.random_sample([5, 3]) * 10 df = pd.DataFrame(X, columns=variables, index=labels) print(df) print("Calculating the distance matrix for the given data...") from scipy.spatial.distance import pdist, squareform row_dist = pd.DataFrame( squareform(pdist(df, metric="euclidean")), columns=labels, index=labels ) print(row_dist) print("Creating clusters based on the data...") from scipy.cluster.hierarchy import linkage row_clusters = linkage(pdist(df, metric="euclidean"), method="complete") data_frame_view = pd.DataFrame( row_clusters, columns=["row label 1", "row label 2", "distance", "no. of items in cluster"], index=["cluster %d" % (i + 1) for i in range(row_clusters.shape[0])] ) print(data_frame_view) print("Creating tree diagram of clusters...") from scipy.cluster.hierarchy import dendrogram row_dendr = dendrogram(row_clusters, labels=labels) plt.tight_layout() plt.ylabel("Euclidean distance") plt.show()
MaxStrange/swedish_chef
learning/clustering/div_hi_clust.py
Python
apache-2.0
1,493
# -*- coding: ascii -*- r""" :Copyright: Copyright 2007 - 2015 Andr\xe9 Malo or his licensors, as applicable :License: Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================== HTML forms reloaded ===================== Form helper classes. """ if __doc__: # pylint: disable = redefined-builtin __doc__ = __doc__.encode('ascii').decode('unicode_escape') __author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape') __docformat__ = "restructuredtext en" __all__ = ['normalize_newlines', 'normalize_whitespaces', 'HTMLForm'] import re as _re from ._adapters import NullParameterAdapter from ._input_field_generator import make_input def normalize_newlines(): """ Make newline normalizer """ SUB_U = _re.compile(ur'\r?\n|\r').sub SUB_S = _re.compile(r'\r?\n|\r').sub def normalize_newlines(value): """ Normalize the newlines of a string All newlines are converted to \\n. :Parameters: `value` : ``basestring`` The text to normalize :Return: The normalized value, the type depends on the input type :Rtype: ``basestring`` """ # pylint: disable = redefined-outer-name if isinstance(value, unicode): subber, repl = SUB_U, u"\n" else: subber, repl = SUB_S, "\n" return subber(repl, value) return normalize_newlines normalize_newlines = normalize_newlines() def normalize_whitespaces(): """ Make whitespace normalizer """ SUB_U = _re.compile(ur'\s').sub SUB_S = _re.compile(r'\s').sub def normalize_whitespaces(value): """ Normalize the whitespaces of a string All whitespaces are converted to regular space. :Parameters: `value` : ``basestring`` The text to normalize :Return: The normalized value, the type depends on the input type :Rtype: ``basestring`` """ # pylint: disable = redefined-outer-name if isinstance(value, unicode): subber, repl = SUB_U, u" " else: subber, repl = SUB_S, " " return subber(repl, value) return normalize_whitespaces normalize_whitespaces = normalize_whitespaces() class HTMLForm(object): """ HTML form helper class :IVariables: `_action` : ``basestring`` form action `_method` : ``basestring`` form method `_param` : `ParameterAdapterInterface` Parameter adapter `_upload` : ``bool`` Upload form? `_charset` : ``basestring`` Accepted character set for submission `_xhtml` : ``bool`` Use XHTML attributes (vs. short attributes)? `_pre_proc` : `PreProcInterface` Pre set node processing callable `_post_proc` : `PostProcInterface` Post set node processing callable """ def __init__(self, action=None, method='get', param=None, upload=False, accept_charset='utf-8', xhtml=True, pre_proc=None, post_proc=None): """ Initialization If you set `upload` to ``True``, the method will be ignored and be set to ``post`` automatically. :Parameters: `action` : ``basestring`` Form action URL `method` : ``basestring`` form submission method `param` : `ParameterAdapterInterface` Parameter adapter. If unset or ``None``, no values will be taken out of the request. This is useful for initial requests showing empty forms as there will be no special handling required for this case. `upload` : ``bool`` Is this an upload form? `accept_charset` : ``basestring`` Accepted charset(s) for submission, if there are multiple charsets given, they have to be unique and space separated. `xhtml` : ``bool`` Use XHTML attributes (vs. short attributes)? `pre_proc` : `PreProcInterface` Pre set node processing callable `post_proc` : `PostProcInterface` Post set node processing callable """ self._action = action self._method = upload and 'post' or method if param is None: param = NullParameterAdapter() self._param = param self._upload = upload self._charset = accept_charset self._xhtml = bool(xhtml) if pre_proc is None: pre_proc_ = None else: def pre_proc_(method, node, *args): """ Pre proc wrapper """ node, kwargs = pre_proc(method, node, dict(args)) return (node,) + tuple([ kwargs.get(key, val) for key, val in args ]) self._pre_proc = pre_proc_ self._post_proc = post_proc def param(self): """ Parameter adapter getter """ return self._param param = property(param, doc="Parameter adapter the form is using") def is_xhtml(self): """ XHTML flag getter """ return self._xhtml is_xhtml = property(is_xhtml, doc="XHTML flag setting of the form") def is_upload(self): """ Upload flag getter """ return self._upload is_upload = property(is_upload, doc="Upload flag setting of the form") def accept_charset(self): """ Accept-charset getter """ return self._charset accept_charset = property( accept_charset, doc="Accepted charset of the form" ) def action(self): """ Form action getter """ return self._action action = property(action, doc="Configured form action") def method(self): """ Form method getter """ return self._method method = property(method, doc="Configured form method") normalize_newlines = staticmethod(normalize_newlines) normalize_whitespaces = staticmethod(normalize_whitespaces) def form(self, node, hidden=None, hidden_="hidden", autocomplete=None, novalidate=None, raw=False): """ Fill in the form starttag The following attributes are possibly set: - ``action`` (only if it's not ``None``) - ``method`` - ``accept-charset`` (only if it's not ``None``) - ``enctype`` (only on upload forms) - ``autocomplete`` - ``novalidate`` Rendering hidden fields ~~~~~~~~~~~~~~~~~~~~~~~ You can use this method to set a list of hidden fields at once. It iterates over `hidden` and multiplies the node named by `hidden_` accordingly. The `hidden` iterable contains tuples of variable length, namely from 1 to 3, like:: [ ('foo', 'bar'), ('zonk', '"plop"', True), ('x',), ] If `hidden` is empty, the hidden node will be deleted. Field item tuples ----------------- The first (and maybe only) item is the name of the field. This is always set unconditionally. The second item is the value of the field. If the field does not have a value at all - the second and third items are left out, leaving the name only. If the value is ``None`` it's taken out of the request and filled into the field. The third parameter is ignored in this case. If the name does not appear in the request, the field is skipped (not rendered). If the request contains more than one value under that name, a hidden field is generated for each of them. In all other cases the value is written into the ``value`` attribute. The third item determines whether the value should be treated as raw or not. If it's unset, the `raw` parameter of the method applies. :Parameters: `node` : `tdi.nodetree.Node` The ``<form>`` node `hidden` : iterable Hidden fields to set. If unset or ``None``, no hidden fields are touched. If it's an empty list, the hidden node is removed. `hidden_` : ``basestring`` Name of the hidden field node, relative to the form `node` (dotted notation) `autocomplete` : ``bool`` Set the default autocomplete state of the form (HTML5). If omitted or ``None``, any autocomplete attribute present won't be touched. `novalidate` : ``bool`` Set the default novalidate attribute of the form (HTML5). If omitted or ``None``, any novalidate attribute present won't be touched. `raw` : ``bool`` Default "rawness" value for the hidden field list """ # pylint: disable = too-many-branches pre_proc = self._pre_proc if pre_proc is not None: node, hidden, hidden_, raw = pre_proc( 'form', node, ('hidden', hidden), ('hidden_', hidden_), ('raw', raw), ) if self._action is not None: node[u'action'] = self._action node[u'method'] = self._method if self._charset is not None: node[u'accept-charset'] = self._charset if autocomplete is not None: node[u'autocomplete'] = autocomplete and u'on' or u'off' if self._upload: node[u'enctype'] = u'multipart/form-data' if novalidate is not None: if novalidate: node[u'novalidate'] = self._xhtml and u'novalidate' or None else: del node[u'novalidate'] post_proc = self._post_proc if post_proc is not None: post_proc('form', node, dict( hidden=hidden, hidden_=hidden_, raw=raw )) if hidden is not None: partnodes = hidden_.split('.') partnodes.reverse() hiddennode = node(partnodes.pop()) while partnodes: hiddennode = hiddennode(partnodes.pop()) # hidden fields param = self._param filtered = [] for field in hidden: name, value, thisraw = field[0], field[1:2], field[2:3] if value: value = value[0] if value is None: rval = param.getlist(name) filtered.extend([(name, val, False) for val in rval]) else: filtered.append((name, value, (thisraw or [raw])[0])) else: filtered.append((name, None, None)) for subnode, param in hiddennode.iterate(filtered): self.hidden(subnode, *param) def hidden(self, node, name, value=None, raw=False): """ Render a hidden field Hidden field values are never taken out of the request. The reason for that seemingly inconsistent behaviour is that hidden fields have no assigned semantics. In other words, the method can't know, *how* to correctly retrieve the value out of the request. :Parameters: `node` : `tdi.nodetree.Node` The hidden field node `name` : ``basestring`` Name of the hidden field `value` : ``basestring`` Optional value of the hidden field - if omitted or ``None``, the value attribute is completey removed `raw` : ``bool`` Is `value` raw (not to be escaped) """ pre_proc = self._pre_proc if pre_proc is not None: node, name, value, raw = pre_proc( 'hidden', node, ('name', name), ('value', value), ('raw', raw), ) node[u'type'] = u'hidden' node[u'name'] = name if value is None: del node[u'value'] elif raw: node.raw[u'value'] = value else: node[u'value'] = value post_proc = self._post_proc if post_proc is not None: post_proc('hidden', node, dict(name=name, value=value, raw=raw)) text = make_input( 'text', '', 'name', 'value', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'pattern', 'dirname', 'autofocus', 'raw', ) search = make_input( 'search', '(HTML5)', 'name', 'value', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'pattern', 'dirname', 'autofocus', 'raw', ) tel = make_input( 'tel', '(HTML5)', 'name', 'value', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'pattern', 'autofocus', 'raw', ) url = make_input( 'url', '(HTML5)', 'name', 'value', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'pattern', 'autofocus', 'raw', ) email = make_input( 'email', '(HTML5)', 'name', 'value', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'pattern', 'multiple', 'autofocus', 'raw', ) password = make_input( 'password', '', 'name', 'maxlength', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'pattern', 'autofocus', ) datetime = make_input( # pylint: disable = bad-continuation 'datetime', '(HTML5)\n\n ' '(e.g. ``1979-10-14T12:00:00.001-04:00``)', # noqa 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) date = make_input( 'date', '(HTML5)\n\n (e.g. ``1979-10-14``)', 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) month = make_input( 'month', '(HTML5)\n\n (e.g. ``1979-10``)', 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) week = make_input( 'week', '(HTML5)\n\n (e.g. ``1979-W42``)', 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) time = make_input( 'time', '(HTML5)\n\n (e.g. ``12:00:00.001``)', 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) datetime_local = make_input( # pylint: disable = bad-continuation 'datetime-local', '(HTML5)\n\n ' '(e.g. ``1979-10-14T12:00:00.001``)', # noqa 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) number = make_input( 'number', '(HTML5)', 'name', 'value', 'readonly', 'disabled', 'required', 'autocomplete', 'placeholder', 'list', 'max', 'min', 'step', 'autofocus', 'raw', ) range = make_input( 'range', '(HTML5)', 'name', 'value', 'disabled', 'autocomplete', 'list', 'max', 'autofocus', 'min', 'step', 'raw', ) color = make_input( 'color', '(HTML5)\n\n (e.g. ``#D4D0C8``)', 'name', 'value', 'disabled', 'autocomplete', 'list', 'raw', 'autofocus', ) checkbox = make_input( 'checkbox', '', 'name', 'value', 'disabled', 'required', 'selected', 'autofocus', value_default=u'1', multi_selected=True, ) radio = make_input( 'radio', '', 'name', 'value', 'disabled', 'required', 'selected', 'autofocus', value_default=None, multi_selected=False, ) file = make_input( 'file', '', 'name', 'accept', 'disabled', 'required', 'multiple', 'autofocus', assert_upload=True, ) submit = make_input( 'submit', '', 'name', 'value', 'disabled', 'action', 'enctype', 'method', 'novalidate', 'target', 'autofocus', simple_value=True, name_optional=True, ) image = make_input( 'image', '', 'name', 'disabled', 'alt', 'src', 'width', 'height', 'action', 'enctype', 'method', 'novalidate', 'target', 'autofocus', name_optional=True, ) reset = make_input( 'reset', '', 'value', 'disabled', 'autofocus', simple_value=True, ) button = make_input( 'button', '', 'name', 'value', 'disabled', 'autofocus', simple_value=True, name_optional=True, ) def textarea(self, node, name, value=None, maxlength=None, readonly=None, disabled=None, required=None, placeholder=None, dirname=None, autofocus=None, raw=False): """ Render a 'textarea' input control :Parameters: `node` : `tdi.nodetree.Node` The 'textarea' node `name` : ``basestring`` The name of the 'textarea' field `value` : ``basestring`` Optional value. If ``None``, it's taken out of the request. If it does not appear in the request, it's treated like an empty string. The `raw` parameter is ignored in this case. `maxlength` : ``int`` Maximum length. If omitted or ``None``, the attribute is *deleted*. `readonly` : ``bool`` Readonly field? If unset or ``None``, the attribute is left untouched. `disabled` : ``bool`` Disabled field? If unset or ``None``, the attribute is left untouched. `required` : ``bool`` Required field? (HTML5). If omitted or ``None``, the attribute is left untouched. `placeholder` : ``basestring`` Placeholder value (HTML5). If omitted or ``None``, the attribute is left untouched. `dirname` : ``basestring`` Direction submission name (HTML5). If omitted or ``None``, the attribute is left untouched. `autofocus` : ``bool`` Set autofocus? (HTML5). If omitted or ``None``, the attribute is left untouched. `raw` : ``bool`` Is the value to be treated raw? """ # pylint: disable = too-many-arguments, too-many-branches pre_proc = self._pre_proc if pre_proc is not None: ( node, name, value, maxlength, readonly, disabled, required, placeholder, dirname, autofocus, raw ) = pre_proc( 'textarea', node, ('name', name), ('value', value), ('maxlength', maxlength), ('readonly', readonly), ('disabled', disabled), ('required', required), ('placeholder', placeholder), ('dirname', dirname), ('autofocus', autofocus), ('raw', raw), ) if name is not None: node[u'name'] = name if readonly is not None: if readonly: node[u'readonly'] = self._xhtml and u'readonly' or None else: del node[u'readonly'] if disabled is not None: if disabled: node[u'disabled'] = self._xhtml and u'disabled' or None else: del node[u'disabled'] if required is not None: if required: node[u'required'] = self._xhtml and u'required' or None else: del node[u'required'] if autofocus is not None: if autofocus: node[u'autofocus'] = self._xhtml and u'autofocus' or None else: del node[u'autofocus'] if placeholder is not None: node[u'placeholder'] = placeholder if dirname is not None: node[u'dirname'] = dirname if value is None: value, raw = self._param.getfirst(name, u''), False if not raw: value = self.normalize_newlines(value).rstrip() if maxlength is not None: value = value[:int(maxlength)] node[u'maxlength'] = unicode(maxlength) else: del node[u'maxlength'] if raw: node.raw.content = value else: node.content = value post_proc = self._post_proc if post_proc is not None: post_proc('textarea', node, dict( name=name, value=value, maxlength=maxlength, readonly=readonly, disabled=disabled, required=required, placeholder=placeholder, dirname=dirname, autofocus=autofocus, raw=raw )) def select(self, node, name, options=None, selected=None, option="option", disabled=None, required=None, autofocus=None, multiple=False): r""" Render a 'select' input control This method actually renders two nodes, namely the ``select`` element and the ``option`` element:: <select tdi="node"> <option tdi="*option">foo</option> </select> The option node is repeated as necessary (matching the entries of the `options` parameter). If `options` is empty, the whole ``select`` node is emptied. The option is usually flagged with an asterisk, so it doesn't trigger an automatic render-method call. :Parameters: `node` : `tdi.nodetree.Node` The 'select' input node `name` : ``basestring`` The name of the 'select' field `options` : iterable The list of option values. Each item is expected to be a 2-tuple of the option value and its description. The value is what's put into the option's ``value`` attribute and submitted by the browser if the option is selected. The description is the visible part of the option. If the value is ``None``, it's treated unset and the description is submitted as selected value instead. If `options` is ``None``, only the ``select`` element will be touched. `selected` : ``basestring`` or iterable The pre-selected value. If it's unset or ``None``, it's taken out of the request. If it does not appear in the request, there just won't be any pre-selected option. If `multiple` is true, `selected` is expected to be an *iterable* of ``basestring``\s. `option` : ``str`` The node of the ``option`` node, relative to the ``select`` node. The parameter is expected in dotted notation. `disabled` : ``bool`` Disabled field? If unset or ``None``, the attribute is left untouched. `required` : ``bool`` Required field? (HTML5). If omitted or ``None``, the attribute is left untouched. `autofocus` : ``bool`` Set autofocus? (HTML5). If omitted or ``None``, the attribute is left untouched. `multiple` : ``bool`` Is it a multiselect box? `selected` is expected to be an ``iterable`` containing multiple selected values in this case. """ # pylint: disable = too-many-locals, too-many-branches pre_proc = self._pre_proc if pre_proc is not None: ( node, name, options, selected, option, disabled, required, autofocus, multiple ) = pre_proc( 'select', node, ('name', name), ('options', options), ('selected', selected), ('option', option), ('disabled', disabled), ('required', required), ('autofocus', autofocus), ('multiple', multiple), ) if name is not None: node[u'name'] = name if disabled is not None: if disabled: node[u'disabled'] = self._xhtml and u'disabled' or None else: del node[u'disabled'] if required is not None: if required: node[u'required'] = self._xhtml and u'required' or None else: del node[u'required'] if autofocus is not None: if autofocus: node[u'autofocus'] = self._xhtml and u'autofocus' or None else: del node[u'autofocus'] if options is not None: options = list(options) partnodes = option.split('.') partnodes.reverse() optnode = node(partnodes.pop()) while partnodes: optnode = optnode(partnodes.pop()) if multiple: node[u'multiple'] = self._xhtml and u'multiple' or None if options is not None: if selected is None: selected = self._param.getlist(name) selected_ = dict([(item, None) for item in selected]) else: del node[u'multiple'] # just in case if options is not None: if selected is None: selected = self._param.getfirst(name) selected_ = {selected: None} post_proc = self._post_proc if post_proc is not None: post_proc('select', node, dict( name=name, options=options, selected=selected, option=option, disabled=disabled, required=required, autofocus=autofocus, multiple=multiple )) if options is not None: for subnode, tup in optnode.iterate(options): value, desc, disabled = tup[0], tup[1], tup[2:] if value is not None: is_selected = unicode(value) in selected_ else: is_selected = unicode(desc) in selected_ self.option( subnode, value, description=desc, selected=is_selected, disabled=disabled and disabled[0] or None, ) def datalist(self, node, id=None, options=None, option="option"): """ Render a 'datalist' element (especially its options) This method actually renders two nodes, namely the ``datalist`` element and the ``option`` element:: <datalist tdi="node"> <option tdi="*option" /> </datalist> The option node is repeated as necessary (matching the entries of the `options` parameter). If `options` is empty, the whole ``datalist`` node is emptied. The option is usually flagged with an asterisk, so it doesn't trigger an automatic render-method call. :Parameters: `node` : `tdi.nodetree.Node` The 'datalist' node `id` : ``basestring`` The ``id`` attribute of the 'datalist' field. If omitted or ``None``, the attribute is left untouched. `options` : iterable The list of option values. Each item is expected to be a 2-tuple of the option value and its description. The value is what's put into the option's ``value`` attribute. The description is the visible part of the option and put into the 'label' attribute. If the value is ``None``, it's treated as unset. If `options` is ``None``, only the ``datalist`` element will be touched. `option` : ``str`` The node of the ``option`` node, relative to the ``select`` node. The parameter is expected in dotted notation. """ # pylint: disable = invalid-name, redefined-builtin pre_proc = self._pre_proc if pre_proc is not None: ( node, id, options, option ) = pre_proc( 'datalist', node, ('id', id), ('options', options), ('option', option), ) if id is not None: node[u'id'] = id if options is not None: options = list(options) partnodes = option.split('.') partnodes.reverse() optnode = node(partnodes.pop()) while partnodes: optnode = optnode(partnodes.pop()) post_proc = self._post_proc if post_proc is not None: post_proc('datalist', node, dict( id=id, options=options, option=option )) if options is not None: for subnode, tup in optnode.iterate(options): value, desc, disabled = tup[0], tup[1], tup[2:] self.option( subnode, value, label=desc, disabled=disabled and disabled[0] or None, ) def option(self, node, value, description=None, selected=None, disabled=None, label=None): """ Render a single option :Parameters: `node` : `tdi.nodetree.Node` The option node `value` : ``basestring`` The option value, if ``None``, the attribute will be removed. `description` : ``basestring`` The visible part of the option. If omitted or ``None``, the element's content is left untouched. `selected` : ``bool`` Is the option selected? If unset or ``None`` the attribute will be left untouched. `disabled` : ``bool`` Is this option disabled? If unset or ``None``, the attribute will be left untouched. `label` : ``basestring`` Label attribute (HTML5). If omitted or ``None``, any existing attribute is deleted. """ # pylint: disable = too-many-branches pre_proc = self._pre_proc if pre_proc is not None: ( node, value, description, selected, disabled, label ) = pre_proc( 'option', node, ('value', value), ('description', description), ('selected', selected), ('disabled', disabled), ('label', label), ) if value is None: del node[u'value'] else: node[u'value'] = value if label is None: del node[u'label'] else: node[u'label'] = label if selected is not None: if selected: node[u'selected'] = self._xhtml and u'selected' or None else: del node[u'selected'] if disabled is not None: if disabled: node[u'disabled'] = self._xhtml and u'disabled' or None else: del node[u'disabled'] if description is not None: node.content = description post_proc = self._post_proc if post_proc is not None: post_proc('option', node, dict( value=value, description=description, selected=selected, disabled=disabled, label=label, )) def keygen(self, node, name, keytype=None, challenge=None, disabled=None, autofocus=None): """ Render a 'keygen' input control :Parameters: `node` : `tdi.nodetree.Node` The 'keygen' node `name` : ``basestring`` The name of the 'keygen' field `keytype` : ``basestring`` Optional keytype. If omitted or ``None``, the attribute is left untouched. `challenge` : ``basestring`` Optional challenge value. If omitted or ``None``, the attribute is left untouched. `disabled` : ``bool`` Disabled field? If unset or ``None``, the attribute is left untouched. `autofocus` : ``bool`` Set autofocus? (HTML5). If omitted or ``None``, the attribute is left untouched. """ pre_proc = self._pre_proc if pre_proc is not None: ( node, name, keytype, challenge, disabled, autofocus ) = pre_proc( 'keygen', node, ('name', name), ('keytype', keytype), ('challenge', challenge), ('disabled', disabled), ('autofocus', autofocus), ) if name is not None: node[u'name'] = name if disabled is not None: if disabled: node[u'disabled'] = self._xhtml and u'disabled' or None else: del node[u'disabled'] if autofocus is not None: if autofocus: node[u'autofocus'] = self._xhtml and u'autofocus' or None else: del node[u'autofocus'] if keytype is not None: node[u'keytype'] = keytype if challenge is not None: node[u'challenge'] = challenge post_proc = self._post_proc if post_proc is not None: post_proc('keygen', node, dict( name=name, keytype=keytype, challenge=challenge, disabled=disabled, autofocus=autofocus ))
ndparker/tdi
tdi/tools/htmlform/_main.py
Python
apache-2.0
34,348
import json from unittest import TestCase from flask import Flask from flask_controllers.GameServerController import GameServerController from flask_helpers.VersionHelpers import VersionHelpers from python_cowbull_server import app from python_cowbull_server.Configurator import Configurator from flask_helpers.ErrorHandler import ErrorHandler from Persistence.PersistenceEngine import PersistenceEngine class TestGameServerController(TestCase): def setUp(self): self.info = VersionHelpers() app.testing = True self.app = app.test_client() self.c = Configurator() self.c.execute_load(self.app.application) # Force use of File persister p = {"engine_name": "file", "parameters": {}} self.app.application.config["PERSISTER"] = PersistenceEngine(**p) if self.info.major < 3: self.json_raises = ValueError else: self.json_raises = json.JSONDecodeError def test_gsc_init(self): GameServerController() def test_gsc_bad_init(self): self.app.application.config["PERSISTER"] = None try: GameServerController() except ValueError as ve: self.assertIn("No persistence engine is defined", str(ve)) def test_gsc_valid_init(self): gsc = GameServerController() self.assertIsNone(gsc.game_version) self.assertIsInstance(gsc.handler, ErrorHandler) def test_gsc_get_game(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status, '200 OK') def test_gsc_get_game_bad_mode(self): gsc = GameServerController() with self.app as c: response = c.get('/v1/game?mode=reallyreallytough') self.assertEqual(response.status, '400 BAD REQUEST') self.assertIn("Mode reallyreallytough not found", str(response.data)) def test_gsc_get_game_bad_persister(self): p = self.app.application.config["PERSISTER"] with self.app: with self.assertRaises(TypeError): self.app.application.config["PERSISTER"] = PersistenceEngine( engine_name="foobar", parameters={ "host": "foobar", "port": 27017, "db": "cowbull" } ) self.app.application.config["PERSISTER"] = p def test_gsc_get_game_no_persister(self): p = self.app.application.config["PERSISTER"] with self.app as c: with self.assertRaises(KeyError): self.app.application.config["PERSISTER"] = PersistenceEngine( engine_name="redis", parameters={ "host": "local", "port": 6379, "db": "cowbull" } ) c.get('/v1/game') self.app.application.config["PERSISTER"] = p def test_gsc_get_game_badparam_persister(self): p = self.app.application.config["PERSISTER"] with self.app: with self.assertRaises(TypeError): self.app.application.config["PERSISTER"] = PersistenceEngine( engine_name="redis", parameters={ "host": "local", "port": 6379, "db": "cowbull", "foo": "bar" } ) self.app.application.config["PERSISTER"] = p def test_gsc_post_game(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key, "digits": [0, 1, 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '200') def test_gsc_post_bad_key(self): with self.app as c: key = '1234' game_data = { "key": key, "digits": [0, 1, 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("The request must contain a valid game key", str(response.data)) def test_gsc_post_bad_digits(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key, "digits": ['X', 'Y', 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') def test_gsc_post_no_digits(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("The request must contain an array of digits", str(response.data)) def test_gsc_post_num_digits(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key, "digits": [0, 1, 2, 3, 4, 5] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("The DigitWord objects are of different lengths", str(response.data)) def test_gsc_post_hilo_digits(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key, "digits": [-10, 21, 32, 43] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("A digit must be a string representation or integer of a number", str(response.data)) def test_gsc_post_type_digits(self): with self.app as c: response = c.get('/v1/game') self.assertEqual(response.status[0:3], '200') key = json.loads(response.data)["key"] game_data = { "key": key, "digits": {"foo": "bar"} } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("A digit must be a string representation or integer of a number", str(response.data)) def test_gsc_post_no_json(self): with self.app as c: response = c.post( '/v1/game', content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("For some reason the json_dict is None!", str(response.data)) def test_gsc_post_bad_json(self): with self.app as c: response = c.post( '/v1/game', data=json.dumps({"keys": "1234"}), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("For some reason the json_dict does not contain a key", str(response.data)) def test_gsc_post_bad_gamekey(self): with self.app as c: key = '1234' game_data = { "key": key, "digits": ['X', 'Y', 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("Unable to open the key file", str(response.data)) def test_gsc_post_badtype_gamekey(self): with self.app as c: key = 1234 game_data = { "key": key, "digits": ['X', 'Y', 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("For some reason the json_dict does not contain a key!", str(response.data)) def test_gsc_post_no_gamekey(self): with self.app as c: game_data = { "digits": ['X', 'Y', 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("For some reason the json_dict does not contain a key", str(response.data)) def test_gsc_post_type_gamekey(self): with self.app as c: game_data = { "key": None, "digits": ['X', 'Y', 2, 3] } response = c.post( '/v1/game', data=json.dumps(game_data), content_type="application/json" ) self.assertEqual(response.status[0:3], '400') self.assertIn("For some reason the json_dict does not contain a key!", str(response.data))
dsandersAzure/python_cowbull_server
unittests/TestGameServerController.py
Python
apache-2.0
10,659
# Copyright 2012 OpenStack Foundation. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import argparse import logging from neutronclient.common import exceptions from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronV20 from neutronclient.openstack.common.gettextutils import _ def _format_fixed_ips(port): try: return '\n'.join([utils.dumps(ip) for ip in port['fixed_ips']]) except Exception: return '' class ListPort(neutronV20.ListCommand): """List ports that belong to a given tenant.""" resource = 'port' log = logging.getLogger(__name__ + '.ListPort') _formatters = {'fixed_ips': _format_fixed_ips, } list_columns = ['id', 'name', 'mac_address', 'fixed_ips'] pagination_support = True sorting_support = True class ListRouterPort(neutronV20.ListCommand): """List ports that belong to a given tenant, with specified router.""" resource = 'port' log = logging.getLogger(__name__ + '.ListRouterPort') _formatters = {'fixed_ips': _format_fixed_ips, } list_columns = ['id', 'name', 'mac_address', 'fixed_ips'] pagination_support = True sorting_support = True def get_parser(self, prog_name): parser = super(ListRouterPort, self).get_parser(prog_name) parser.add_argument( 'id', metavar='router', help=_('ID or name of router to look up')) return parser def get_data(self, parsed_args): neutron_client = self.get_client() neutron_client.format = parsed_args.request_format _id = neutronV20.find_resourceid_by_name_or_id( neutron_client, 'router', parsed_args.id) self.values_specs.append('--device_id=%s' % _id) return super(ListRouterPort, self).get_data(parsed_args) class ShowPort(neutronV20.ShowCommand): """Show information of a given port.""" resource = 'port' log = logging.getLogger(__name__ + '.ShowPort') class UpdatePortSecGroupMixin(object): def add_arguments_secgroup(self, parser): group_sg = parser.add_mutually_exclusive_group() group_sg.add_argument( '--security-group', metavar='SECURITY_GROUP', default=[], action='append', dest='security_groups', help=_('Security group associated with the port ' '(This option can be repeated)')) group_sg.add_argument( '--no-security-groups', action='store_true', help=_('Associate no security groups with the port')) def _resolv_sgid(self, secgroup): return neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'security_group', secgroup) def args2body_secgroup(self, parsed_args, port): if parsed_args.security_groups: port['security_groups'] = [self._resolv_sgid(sg) for sg in parsed_args.security_groups] elif parsed_args.no_security_groups: port['security_groups'] = [] class UpdateExtraDhcpOptMixin(object): def add_arguments_extradhcpopt(self, parser): group_sg = parser.add_mutually_exclusive_group() group_sg.add_argument( '--extra-dhcp-opt', default=[], action='append', dest='extra_dhcp_opts', help=_('Extra dhcp options to be assigned to this port: ' 'opt_name=<dhcp_option_name>,opt_value=<value>, ' '(This option can be repeated.)')) def args2body_extradhcpopt(self, parsed_args, port): ops = [] if parsed_args.extra_dhcp_opts: # the extra_dhcp_opt params (opt_name & opt_value) # must come in pairs, if there is a parm error # both must be thrown out. opt_ele = {} edo_err_msg = _("Invalid --extra-dhcp-opt option, can only be: " "opt_name=<dhcp_option_name>,opt_value=<value>, " "(This option can be repeated.") for opt in parsed_args.extra_dhcp_opts: if opt.split('=')[0] in ['opt_value', 'opt_name']: opt_ele.update(utils.str2dict(opt)) if (('opt_name' in opt_ele) and ('opt_value' in opt_ele)): if opt_ele['opt_value'] == 'null': opt_ele['opt_value'] = None ops.append(opt_ele) opt_ele = {} else: raise exceptions.CommandError(edo_err_msg) else: raise exceptions.CommandError(edo_err_msg) if ops: port.update({'extra_dhcp_opts': ops}) class CreatePort(neutronV20.CreateCommand, UpdatePortSecGroupMixin, UpdateExtraDhcpOptMixin): """Create a port for a given tenant.""" resource = 'port' log = logging.getLogger(__name__ + '.CreatePort') def add_known_arguments(self, parser): parser.add_argument( '--name', help=_('Name of this port')) parser.add_argument( '--admin-state-down', dest='admin_state', action='store_false', help=_('Set admin state up to false')) parser.add_argument( '--admin_state_down', dest='admin_state', action='store_false', help=argparse.SUPPRESS) parser.add_argument( '--mac-address', help=_('MAC address of this port')) parser.add_argument( '--mac_address', help=argparse.SUPPRESS) parser.add_argument( '--device-id', help=_('Device id of this port')) parser.add_argument( '--device_id', help=argparse.SUPPRESS) parser.add_argument( '--fixed-ip', metavar='subnet_id=SUBNET,ip_address=IP_ADDR', action='append', help=_('Desired IP and/or subnet for this port: ' 'subnet_id=<name_or_id>,ip_address=<ip>, ' '(This option can be repeated.)')) parser.add_argument( '--fixed_ip', action='append', help=argparse.SUPPRESS) self.add_arguments_secgroup(parser) self.add_arguments_extradhcpopt(parser) parser.add_argument( 'network_id', metavar='NETWORK', help=_('Network id or name this port belongs to')) def args2body(self, parsed_args): _network_id = neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'network', parsed_args.network_id) body = {'port': {'admin_state_up': parsed_args.admin_state, 'network_id': _network_id, }, } if parsed_args.mac_address: body['port'].update({'mac_address': parsed_args.mac_address}) if parsed_args.device_id: body['port'].update({'device_id': parsed_args.device_id}) if parsed_args.tenant_id: body['port'].update({'tenant_id': parsed_args.tenant_id}) if parsed_args.name: body['port'].update({'name': parsed_args.name}) ips = [] if parsed_args.fixed_ip: for ip_spec in parsed_args.fixed_ip: ip_dict = utils.str2dict(ip_spec) if 'subnet_id' in ip_dict: subnet_name_id = ip_dict['subnet_id'] _subnet_id = neutronV20.find_resourceid_by_name_or_id( self.get_client(), 'subnet', subnet_name_id) ip_dict['subnet_id'] = _subnet_id ips.append(ip_dict) if ips: body['port'].update({'fixed_ips': ips}) self.args2body_secgroup(parsed_args, body['port']) self.args2body_extradhcpopt(parsed_args, body['port']) return body class DeletePort(neutronV20.DeleteCommand): """Delete a given port.""" resource = 'port' log = logging.getLogger(__name__ + '.DeletePort') class UpdatePort(neutronV20.UpdateCommand, UpdatePortSecGroupMixin, UpdateExtraDhcpOptMixin): """Update port's information.""" resource = 'port' log = logging.getLogger(__name__ + '.UpdatePort') def add_known_arguments(self, parser): self.add_arguments_secgroup(parser) self.add_arguments_extradhcpopt(parser) def args2body(self, parsed_args): body = {'port': {}} self.args2body_secgroup(parsed_args, body['port']) self.args2body_extradhcpopt(parsed_args, body['port']) return body
vijayendrabvs/ssl-python-neutronclient
neutronclient/neutron/v2_0/port.py
Python
apache-2.0
9,156
""" 1. Napisz funkcje do obliczania mediany, sredniej, wariancji 2. Sparsuj dane z some_data.csv 3. Sprawdz jakie istnieja typy samochodow, jakie roczniki, jakie modele 4. Sprawdz dystrybucje w/w samochodow 5. Kto ma wiecej jakich samochodow? """
andrzejkrawczyk/python-course
part_1/zadania/data_processing/samochody.py
Python
apache-2.0
266
#!/usr/bin/env python # # VM Backup extension # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.7+ # import inspect import os import sys import traceback from time import sleep scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) maindir = os.path.abspath(os.path.join(scriptdir, '../../')) sys.path.append(maindir) transitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions')) sys.path.append(transitionsdir) from oscrypto import * from encryptstates import * from Common import * from CommandExecutor import * from DiskUtil import * from transitions import * class Ubuntu1404EncryptionStateMachine(OSEncryptionStateMachine): states = [ State(name='uninitialized'), State(name='prereq', on_enter='on_enter_state'), State(name='stripdown', on_enter='on_enter_state'), State(name='unmount_oldroot', on_enter='on_enter_state'), State(name='split_root_partition', on_enter='on_enter_state'), State(name='encrypt_block_device', on_enter='on_enter_state'), State(name='patch_boot_system', on_enter='on_enter_state'), State(name='completed'), ] transitions = [ { 'trigger': 'skip_encryption', 'source': 'uninitialized', 'dest': 'completed' }, { 'trigger': 'enter_prereq', 'source': 'uninitialized', 'dest': 'prereq' }, { 'trigger': 'enter_stripdown', 'source': 'prereq', 'dest': 'stripdown', 'before': 'on_enter_state', 'conditions': 'should_exit_previous_state' }, { 'trigger': 'enter_unmount_oldroot', 'source': 'stripdown', 'dest': 'unmount_oldroot', 'before': 'on_enter_state', 'conditions': 'should_exit_previous_state' }, { 'trigger': 'retry_unmount_oldroot', 'source': 'unmount_oldroot', 'dest': 'unmount_oldroot', 'before': 'on_enter_state' }, { 'trigger': 'enter_split_root_partition', 'source': 'unmount_oldroot', 'dest': 'split_root_partition', 'before': 'on_enter_state', 'conditions': 'should_exit_previous_state' }, { 'trigger': 'enter_encrypt_block_device', 'source': 'split_root_partition', 'dest': 'encrypt_block_device', 'before': 'on_enter_state', 'conditions': 'should_exit_previous_state' }, { 'trigger': 'enter_patch_boot_system', 'source': 'encrypt_block_device', 'dest': 'patch_boot_system', 'before': 'on_enter_state', 'conditions': 'should_exit_previous_state' }, { 'trigger': 'stop_machine', 'source': 'patch_boot_system', 'dest': 'completed', 'conditions': 'should_exit_previous_state' }, ] def on_enter_state(self): super(Ubuntu1404EncryptionStateMachine, self).on_enter_state() def should_exit_previous_state(self): # when this is called, self.state is still the "source" state in the transition return super(Ubuntu1404EncryptionStateMachine, self).should_exit_previous_state() def __init__(self, hutil, distro_patcher, logger, encryption_environment): super(Ubuntu1404EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment) self.state_objs = { 'prereq': PrereqState(self.context), 'stripdown': StripdownState(self.context), 'unmount_oldroot': UnmountOldrootState(self.context), 'split_root_partition': SplitRootPartitionState(self.context), 'encrypt_block_device': EncryptBlockDeviceState(self.context), 'patch_boot_system': PatchBootSystemState(self.context), } self.state_machine = Machine(model=self, states=Ubuntu1404EncryptionStateMachine.states, transitions=Ubuntu1404EncryptionStateMachine.transitions, initial='uninitialized') def start_encryption(self): proc_comm = ProcessCommunicator() self.command_executor.Execute(command_to_execute="mount", raise_exception_on_failure=True, communicator=proc_comm) if '/dev/mapper/osencrypt' in proc_comm.stdout: self.logger.log("OS volume is already encrypted") self.skip_encryption() self.log_machine_state() return self.log_machine_state() self.enter_prereq() self.log_machine_state() self.enter_stripdown() self.log_machine_state() oldroot_unmounted_successfully = False attempt = 1 while not oldroot_unmounted_successfully: self.logger.log("Attempt #{0} to unmount /oldroot".format(attempt)) try: if attempt == 1: self.enter_unmount_oldroot() elif attempt > 10: raise Exception("Could not unmount /oldroot in 10 attempts") else: self.retry_unmount_oldroot() self.log_machine_state() except Exception as e: message = "Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}".format(attempt, e, traceback.format_exc()) self.logger.log(msg=message) self.hutil.do_status_report(operation='EnableEncryptionOSVolume', status=CommonVariables.extension_error_status, status_code=str(CommonVariables.unmount_oldroot_error), message=message) sleep(10) raise Exception(message) else: oldroot_unmounted_successfully = True finally: attempt += 1 self.enter_split_root_partition() self.log_machine_state() self.enter_encrypt_block_device() self.log_machine_state() self.enter_patch_boot_system() self.log_machine_state() self.stop_machine() self.log_machine_state()
jasonzio/azure-linux-extensions
VMEncryption/main/oscrypto/ubuntu_1404/Ubuntu1404EncryptionStateMachine.py
Python
apache-2.0
7,320
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains common script setup and teardown code. Note: In this context script is every module which is not long running and can be executed from the command line (e.g. st2-submit-debug-info, st2-register-content, etc.). """ from __future__ import absolute_import import logging as stdlib_logging from oslo_config import cfg from st2common import log as logging from st2common.database_setup import db_setup from st2common.database_setup import db_teardown from st2common.logging.filters import LogLevelFilter from st2common.transport.bootstrap_utils import register_exchanges_with_retry __all__ = [ 'setup', 'teardown', 'db_setup', 'db_teardown' ] LOG = logging.getLogger(__name__) def register_common_cli_options(): """ Register common CLI options. """ cfg.CONF.register_cli_opt(cfg.BoolOpt('verbose', short='v', default=False)) def setup(config, setup_db=True, register_mq_exchanges=True): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() if cfg.CONF.debug: cfg.CONF.verbose = True # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() def teardown(): """ Common teardown function. """ db_teardown()
tonybaloney/st2
st2common/st2common/script_setup.py
Python
apache-2.0
3,066
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Restricts products that will be included in a campaign with a ProductScope. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ # Import appropriate modules from the client library. from googleads import adwords CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE' def main(client, campaign_id): campaign_criterion_service = client.GetService( 'CampaignCriterionService', version='v201802') product_scope = { 'xsi_type': 'ProductScope', # This set of dimensions is for demonstration purposes only. It would be # extremely unlikely that you want to include so many dimensions in your # product scope. 'dimensions': [ { 'xsi_type': 'ProductBrand', 'value': 'Nexus' }, { 'xsi_type': 'ProductCanonicalCondition', 'condition': 'NEW' }, { 'xsi_type': 'ProductCustomAttribute', 'type': 'CUSTOM_ATTRIBUTE_0', 'value': 'my attribute value' }, { 'xsi_type': 'ProductOfferId', 'value': 'book1' }, { 'xsi_type': 'ProductType', 'type': 'PRODUCT_TYPE_L1', 'value': 'Media' }, { 'xsi_type': 'ProductType', 'type': 'PRODUCT_TYPE_L2', 'value': 'Books' }, # The value for the bidding category is a fixed ID for the "Luggage # & Bags" category. You can retrieve IDs for categories from the # ConstantDataService. See the "GetProductCategoryTaxonomy" example # for more details. { 'xsi_type': 'ProductBiddingCategory', 'type': 'BIDDING_CATEGORY_L1', 'value': '-5914235892932915235' } ] } campaign_criterion = { 'campaignId': campaign_id, 'criterion': product_scope } operations = [{ 'operator': 'ADD', 'operand': campaign_criterion }] # Make the request result = campaign_criterion_service.mutate(operations) for criterion in result['value']: print ('Created a ProductScope criterion with Id: %s' % criterion['criterion']['id']) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, CAMPAIGN_ID)
Aloomaio/googleads-python-lib
examples/adwords/v201802/shopping/add_product_scope.py
Python
apache-2.0
3,218
#!/usr/bin/python # -*- coding: utf-8 -*- # a helper to return a relative filepath to path from start # Usage: python relpath.py path [start] from __future__ import print_function import os import sys argv = sys.argv path = argv[1] start = argv[2] if len(argv) >= 3 else os.curdir print(os.path.relpath(path, start))
joinAero/XCalculator
sample/hellodjinni/tools/relpath.py
Python
apache-2.0
322
import importlib import os from typing import List import pandas as pd import pkg_resources import pytest import ibis import ibis.common.exceptions as com import ibis.util as util from .base import BackendTest def _random_identifier(suffix): return f'__ibis_test_{suffix}_{util.guid()}' def _get_all_backends() -> List[str]: """ Return the list of known backend names. """ return [ entry_point.name for entry_point in pkg_resources.iter_entry_points( group='ibis.backends', name=None ) ] def _backend_name_to_class(backend_str: str): """ Convert a backend string to the test configuration class for the backend. """ try: backend_package = getattr(ibis, backend_str).__module__ except AttributeError: raise ValueError( f'Unknown backend {backend_str}. ' f'Known backends: {_get_all_backends()}' ) conftest = importlib.import_module(f'{backend_package}.tests.conftest') return conftest.TestConf def _get_backends_to_test(): """ Get a list of `TestConf` classes of the backends to test. The list of backends can be specified by the user with the `PYTEST_BACKENDS` environment variable. - If the variable is undefined or empty, then no backends are returned - Otherwise the variable must contain a space-separated list of backends to test """ backends_raw = os.environ.get('PYTEST_BACKENDS') if not backends_raw: return [] backends = backends_raw.split() return [ pytest.param( _backend_name_to_class(backend), marks=[getattr(pytest.mark, backend), pytest.mark.backend], id=backend, ) for backend in sorted(backends) ] def pytest_runtest_call(item): """Dynamically add various custom markers.""" nodeid = item.nodeid backend = item.funcargs["backend"] assert isinstance(backend, BackendTest), "backend has type {!r}".format( type(backend).__name__ ) for marker in item.iter_markers(name="only_on_backends"): if backend.name() not in marker.args[0]: pytest.skip( f"only_on_backends: {backend} is not in {marker.args[0]} " f"{nodeid}" ) for marker in item.iter_markers(name="skip_backends"): (backend_types,) = map(tuple, marker.args) if backend.name() in marker.args[0]: pytest.skip(f"skip_backends: {backend} {nodeid}") for marker in item.iter_markers(name="skip_missing_feature"): features = marker.args[0] missing_features = [ feature for feature in features if not getattr(backend, feature) ] if missing_features: pytest.skip( f'Backend {backend} is missing features {missing_features} ' f'needed to run {nodeid}' ) for marker in item.iter_markers(name="xfail_backends"): if backend.name() in marker.args[0]: item.add_marker( pytest.mark.xfail( reason=f'{backend} in xfail list: {marker.args[0]}', **marker.kwargs, ) ) for marker in item.iter_markers(name="xpass_backends"): if backend.name() not in marker.args[0]: item.add_marker( pytest.mark.xfail( reason=f'{backend} not in xpass list: {marker.args[0]}', **marker.kwargs, ) ) for marker in item.iter_markers(name='min_spark_version'): min_version = marker.args[0] if backend.name() in ['spark', 'pyspark']: from distutils.version import LooseVersion import pyspark if LooseVersion(pyspark.__version__) < LooseVersion(min_version): item.add_marker( pytest.mark.xfail( reason=f'Require minimal spark version {min_version}, ' f'but is {pyspark.__version__}', **marker.kwargs, ) ) @pytest.hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): """Dynamically add an xfail marker for specific backends.""" outcome = yield try: outcome.get_result() except ( com.OperationNotDefinedError, com.UnsupportedOperationError, com.UnsupportedBackendType, NotImplementedError, ) as e: markers = list(pyfuncitem.iter_markers(name="xfail_unsupported")) assert ( len(markers) == 1 ), "More than one xfail_unsupported marker found on test {}".format( pyfuncitem ) (marker,) = markers backend = pyfuncitem.funcargs["backend"] assert isinstance( backend, BackendTest ), f"backend has type {type(backend).__name__!r}" pytest.xfail(reason=f'{type(backend).__name__}: {e}') pytestmark = pytest.mark.backend @pytest.fixture(params=_get_backends_to_test(), scope='session') def backend(request, data_directory): """ Instance of BackendTest. """ # See #3021 # TODO Remove this to backend_test, since now that a `Backend` class exists return request.param(data_directory) @pytest.fixture(scope='session') def con(backend): """ Instance of Client, already connected to the db (if applies). """ # See #3021 # TODO Rename this to `backend` when the existing `backend` is renamed to # `backend_test`, and when `connect` returns `Backend` and not `Client` return backend.connection @pytest.fixture(scope='session') def alltypes(backend): return backend.functional_alltypes @pytest.fixture(scope='session') def sorted_alltypes(backend, alltypes): return alltypes.sort_by('id') @pytest.fixture(scope='session') def batting(backend): return backend.batting @pytest.fixture(scope='session') def awards_players(backend): return backend.awards_players @pytest.fixture(scope='session') def geo(backend): if backend.geo is None: pytest.skip(f'Geo Spatial type not supported for {backend}.') return backend.geo @pytest.fixture def analytic_alltypes(alltypes): return alltypes @pytest.fixture(scope='session') def df(alltypes): return alltypes.execute() @pytest.fixture(scope='session') def sorted_df(backend, df): return df.sort_values('id').reset_index(drop=True) @pytest.fixture(scope='session') def batting_df(batting): return batting.execute(limit=None) @pytest.fixture(scope='session') def awards_players_df(awards_players): return awards_players.execute(limit=None) @pytest.fixture(scope='session') def geo_df(geo): # Currently geo is implemented just for OmniSciDB if geo is not None: return geo.execute(limit=None) return None @pytest.fixture def temp_table(con) -> str: """ Return a temporary table name. Parameters ---------- con : ibis.backends.base.Client Yields ------ name : string Random table name for a temporary usage. """ name = _random_identifier('table') try: yield name finally: try: con.drop_table(name, force=True) except NotImplementedError: pass @pytest.fixture def temp_view(con) -> str: """Return a temporary view name. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient Yields ------ name : string Random view name for a temporary usage. """ name = _random_identifier('view') try: yield name finally: try: con.drop_view(name, force=True) except NotImplementedError: pass @pytest.fixture(scope='session') def current_data_db(con, backend) -> str: """Return current database name.""" try: return con.current_database except NotImplementedError: pytest.skip( f"{backend.name()} backend doesn't have current_database method." ) @pytest.fixture def alternate_current_database(con, backend, current_data_db: str) -> str: """Create a temporary database and yield its name. Drops the created database upon completion. Parameters ---------- con : ibis.backends.base.Client current_data_db : str Yields ------- str """ name = _random_identifier('database') try: con.create_database(name) except NotImplementedError: pytest.skip( f'{backend.name()} backend doesn\'t have create_database method.' ) try: yield name finally: con.set_database(current_data_db) con.drop_database(name, force=True) @pytest.fixture def test_employee_schema() -> ibis.schema: sch = ibis.schema( [ ('first_name', 'string'), ('last_name', 'string'), ('department_name', 'string'), ('salary', 'float64'), ] ) return sch @pytest.fixture def test_employee_data_1(): df = pd.DataFrame( { 'first_name': ['A', 'B', 'C'], 'last_name': ['D', 'E', 'F'], 'department_name': ['AA', 'BB', 'CC'], 'salary': [100.0, 200.0, 300.0], } ) return df @pytest.fixture def test_employee_data_2(): df2 = pd.DataFrame( { 'first_name': ['X', 'Y', 'Z'], 'last_name': ['A', 'B', 'C'], 'department_name': ['XX', 'YY', 'ZZ'], 'salary': [400.0, 500.0, 600.0], } ) return df2
cloudera/ibis
ibis/backends/tests/conftest.py
Python
apache-2.0
9,642
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import tensorflow.compat.v1 as tf from tensorflow.contrib import summary as contrib_summary from tensorflow.contrib import tpu as contrib_tpu gfile = tf.gfile USE_MOVING_AVERAGE = 'USE_MOVING_AVERAGE' def get_lr(curr_step, params): """Compute learning rate at step depends on `params`.""" lr = tf.constant(params.learning_rate, dtype=tf.float32) if 'num_warmup_steps' in params and params.num_warmup_steps > 0: num_warmup_steps = tf.cast(params.num_warmup_steps, dtype=tf.float32) step = tf.cast(curr_step, dtype=tf.float32) warmup_lr = params.learning_rate * step / num_warmup_steps lr = tf.cond(tf.less(step, num_warmup_steps), lambda: warmup_lr, lambda: lr) return lr def strip_var_name(var_name): """Strips variable name of sub-strings blocking variable name matching.""" # Strip trailing number, e.g. convert # 'lstm/W_0:0' to 'lstm/W_0'. var_name = re.sub(r':\d+$', '', var_name) # Strip partitioning info, e.g. convert # 'W_0/part_3/Adagrad' to 'W_0/Adagrad'. var_name = re.sub(r'/part_\d+', '', var_name) return var_name def create_estimator(params, model_dir, model_fn): """Create a `TPUEstimator`.""" tpu_config = contrib_tpu.TPUConfig( iterations_per_loop=params.save_every, num_cores_per_replica=2, per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2, # pylint: disable=line-too-long input_partition_dims=[{ 'x': [1, 2], 'y': [1, 2] }, None], tpu_job_name=params.tpu_job_name, ) session_config = tf.ConfigProto( operation_timeout_in_ms=int(6e9), allow_soft_placement=True, isolate_session_state=True) run_config = contrib_tpu.RunConfig( tpu_config=tpu_config, master=params.master, session_config=session_config, log_step_count_steps=None, keep_checkpoint_max=5, save_checkpoints_steps=params.save_every) estimator = contrib_tpu.TPUEstimator( model_fn=model_fn, model_dir=model_dir, train_batch_size=params.train_batch_size, eval_batch_size=params.eval_batch_size, config=run_config, params=params, use_tpu=params.use_tpu, eval_on_tpu=True) return estimator def build_host_call_fn(params, names_and_tensors): """Wrapper to build `host_call` for `TPUEstimator`. Args: params: a `tf.contrib.train.HParams` object. names_and_tensors: list of elemens such as `("loss", loss)`. These are the tensors' names and values. Returns: A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`. """ names, tensors = zip(*names_and_tensors) def host_call_fn(global_step, *tensors): """Training host call.""" global_step = global_step[0] with contrib_summary.create_file_writer(params.output_dir).as_default(): with contrib_summary.record_summaries_every_n_global_steps( n=params.log_every, global_step=global_step): for i, tensor in enumerate(tensors): if 'images' not in names[i]: contrib_summary.scalar(names[i], tensor[0], step=global_step) return contrib_summary.all_summary_ops() global_step = tf.reshape(tf.train.get_or_create_global_step(), [1]) tensors = [tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0) for t in tensors] return (host_call_fn, [global_step] + tensors)
google-research/google-research
enas_lm/src/tpu/utils.py
Python
apache-2.0
4,102
# Copyright 2013 Rackspace Hosting Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.extensions import securitygroup as sg_ext from neutron import quota from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from quark.db import api as db_api from quark.drivers import registry from quark.environment import Capabilities from quark import exceptions as q_exc from quark import ipam from quark import network_strategy from quark import plugin_views as v from quark import tags from quark import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) PORT_TAG_REGISTRY = tags.PORT_TAG_REGISTRY STRATEGY = network_strategy.STRATEGY # HACK(amir): RM9305: do not allow a tenant to associate a network to a port # that does not belong to them unless it is publicnet or servicenet # NOTE(blogan): allow advanced services, such as lbaas, the ability # to associate a network to a port that does not belong to them def _raise_if_unauthorized(context, net): if (not STRATEGY.is_provider_network(net["id"]) and net["tenant_id"] != context.tenant_id and not context.is_advsvc): raise n_exc.NotAuthorized() def _get_net_driver(network, port=None): port_driver = None if port and port.get("network_plugin"): port_driver = port.get("network_plugin") try: return registry.DRIVER_REGISTRY.get_driver( network["network_plugin"], port_driver=port_driver) except Exception as e: raise n_exc.BadRequest(resource="ports", msg="invalid network_plugin: %s" % e) def _get_ipam_driver(network, port=None): network_id = network["id"] network_strategy = network["ipam_strategy"] # Ask the net driver for a IPAM strategy to use # with the given network/default strategy. net_driver = _get_net_driver(network, port=port) strategy = net_driver.select_ipam_strategy( network_id, network_strategy) # If the driver has no opinion about which strategy to use, # we use the one specified by the network. if not strategy: strategy = network_strategy try: return ipam.IPAM_REGISTRY.get_strategy(strategy) except Exception as e: raise n_exc.BadRequest(resource="ports", msg="invalid ipam_strategy: %s" % e) # NOTE(morgabra) Backend driver operations return a lot of stuff. We use a # small subset of this data, so we filter out things we don't care about # so we can avoid any collisions with real port data. def _filter_backend_port(backend_port): # Collect a list of allowed keys in the driver response required_keys = ["uuid", "bridge"] tag_keys = [tag for tag in PORT_TAG_REGISTRY.tags.keys()] allowed_keys = required_keys + tag_keys for k in backend_port.keys(): if k not in allowed_keys: del backend_port[k] def split_and_validate_requested_subnets(context, net_id, segment_id, fixed_ips): subnets = [] ip_addresses = {} for fixed_ip in fixed_ips: subnet_id = fixed_ip.get("subnet_id") ip_address = fixed_ip.get("ip_address") if not subnet_id: raise n_exc.BadRequest(resource="fixed_ips", msg="subnet_id required") if ip_address: ip_addresses[ip_address] = subnet_id else: subnets.append(subnet_id) subnets = ip_addresses.values() + subnets sub_models = db_api.subnet_find(context, id=subnets, scope=db_api.ALL) if len(sub_models) == 0: raise n_exc.SubnetNotFound(subnet_id=subnets) for s in sub_models: if s["network_id"] != net_id: raise n_exc.InvalidInput( error_message="Requested subnet doesn't belong to requested " "network") if segment_id and segment_id != s["segment_id"]: raise q_exc.AmbiguousNetworkId(net_id=net_id) return ip_addresses, subnets def create_port(context, port): """Create a port Create a port which is a connection point of a device (e.g., a VM NIC) to attach to a L2 Neutron network. : param context: neutron api request context : param port: dictionary describing the port, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_port for tenant %s" % context.tenant_id) port_attrs = port["port"] admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "use_forbidden_mac_range", "network_plugin", "instance_node_id"] utils.filter_body(context, port_attrs, admin_only=admin_only) port_attrs = port["port"] mac_address = utils.pop_param(port_attrs, "mac_address", None) use_forbidden_mac_range = utils.pop_param(port_attrs, "use_forbidden_mac_range", False) segment_id = utils.pop_param(port_attrs, "segment_id") fixed_ips = utils.pop_param(port_attrs, "fixed_ips") if "device_id" not in port_attrs: port_attrs['device_id'] = "" device_id = port_attrs['device_id'] # NOTE(morgabra) This should be instance.node from nova, only needed # for ironic_driver. if "instance_node_id" not in port_attrs: port_attrs['instance_node_id'] = "" instance_node_id = port_attrs['instance_node_id'] net_id = port_attrs["network_id"] port_id = uuidutils.generate_uuid() net = db_api.network_find(context, None, None, None, False, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) _raise_if_unauthorized(context, net) # NOTE (Perkins): If a device_id is given, try to prevent multiple ports # from being created for a device already attached to the network if device_id: existing_ports = db_api.port_find(context, network_id=net_id, device_id=device_id, scope=db_api.ONE) if existing_ports: raise n_exc.BadRequest( resource="port", msg="This device is already connected to the " "requested network via another port") # Try to fail early on quotas and save ourselves some db overhead if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) if not STRATEGY.is_provider_network(net_id): # We don't honor segmented networks when they aren't "shared" segment_id = None port_count = db_api.port_count_all(context, network_id=[net_id], tenant_id=[context.tenant_id]) quota.QUOTAS.limit_check( context, context.tenant_id, ports_per_network=port_count + 1) else: if not segment_id: raise q_exc.AmbiguousNetworkId(net_id=net_id) network_plugin = utils.pop_param(port_attrs, "network_plugin") if not network_plugin: network_plugin = net["network_plugin"] port_attrs["network_plugin"] = network_plugin ipam_driver = _get_ipam_driver(net, port=port_attrs) net_driver = _get_net_driver(net, port=port_attrs) # NOTE(morgabra) It's possible that we select a driver different than # the one specified by the network. However, we still might need to use # this for some operations, so we also fetch it and pass it along to # the backend driver we are actually using. base_net_driver = _get_net_driver(net) # TODO(anyone): security groups are not currently supported on port create. # Please see JIRA:NCP-801 security_groups = utils.pop_param(port_attrs, "security_groups") if security_groups is not None: raise q_exc.SecurityGroupsNotImplemented() group_ids, security_groups = _make_security_group_list(context, security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) addresses = [] backend_port = None with utils.CommandManager().execute() as cmd_mgr: @cmd_mgr.do def _allocate_ips(fixed_ips, net, port_id, segment_id, mac): fixed_ip_kwargs = {} if fixed_ips: if (STRATEGY.is_provider_network(net_id) and not context.is_admin): raise n_exc.NotAuthorized() ips, subnets = split_and_validate_requested_subnets(context, net_id, segment_id, fixed_ips) fixed_ip_kwargs["ip_addresses"] = ips fixed_ip_kwargs["subnets"] = subnets ipam_driver.allocate_ip_address( context, addresses, net["id"], port_id, CONF.QUARK.ipam_reuse_after, segment_id=segment_id, mac_address=mac, **fixed_ip_kwargs) @cmd_mgr.undo def _allocate_ips_undo(addr): LOG.info("Rolling back IP addresses...") if addresses: for address in addresses: try: with context.session.begin(): ipam_driver.deallocate_ip_address(context, address) except Exception: LOG.exception("Couldn't release IP %s" % address) @cmd_mgr.do def _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=False): mac = ipam_driver.allocate_mac_address( context, net["id"], port_id, CONF.QUARK.ipam_reuse_after, mac_address=mac_address, use_forbidden_mac_range=use_forbidden_mac_range) return mac @cmd_mgr.undo def _allocate_mac_undo(mac): LOG.info("Rolling back MAC address...") if mac: try: with context.session.begin(): ipam_driver.deallocate_mac_address(context, mac["address"]) except Exception: LOG.exception("Couldn't release MAC %s" % mac) @cmd_mgr.do def _allocate_backend_port(mac, addresses, net, port_id): backend_port = net_driver.create_port( context, net["id"], port_id=port_id, security_groups=group_ids, device_id=device_id, instance_node_id=instance_node_id, mac_address=mac, addresses=addresses, base_net_driver=base_net_driver) _filter_backend_port(backend_port) return backend_port @cmd_mgr.undo def _allocate_back_port_undo(backend_port): LOG.info("Rolling back backend port...") try: backend_port_uuid = None if backend_port: backend_port_uuid = backend_port.get("uuid") net_driver.delete_port(context, backend_port_uuid) except Exception: LOG.exception( "Couldn't rollback backend port %s" % backend_port) @cmd_mgr.do def _allocate_db_port(port_attrs, backend_port, addresses, mac): port_attrs["network_id"] = net["id"] port_attrs["id"] = port_id port_attrs["security_groups"] = security_groups LOG.info("Including extra plugin attrs: %s" % backend_port) port_attrs.update(backend_port) with context.session.begin(): new_port = db_api.port_create( context, addresses=addresses, mac_address=mac["address"], backend_key=backend_port["uuid"], **port_attrs) return new_port @cmd_mgr.undo def _allocate_db_port_undo(new_port): LOG.info("Rolling back database port...") if not new_port: return try: with context.session.begin(): db_api.port_delete(context, new_port) except Exception: LOG.exception( "Couldn't rollback db port %s" % backend_port) # addresses, mac, backend_port, new_port mac = _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=use_forbidden_mac_range) _allocate_ips(fixed_ips, net, port_id, segment_id, mac) backend_port = _allocate_backend_port(mac, addresses, net, port_id) new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac) return v._make_port_dict(new_port) def update_port(context, id, port): """Update values of a port. : param context: neutron api request context : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise n_exc.PortNotFound(port_id=id) port_dict = port["port"] fixed_ips = port_dict.pop("fixed_ips", None) admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "device_id"] always_filter = ["network_id", "backend_key", "network_plugin"] utils.filter_body(context, port_dict, admin_only=admin_only, always_filter=always_filter) # Pre-check the requested fixed_ips before making too many db trips. # Note that this is the only check we need, since this call replaces # the entirety of the IP addresses document if fixed_ips are provided. if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) new_security_groups = utils.pop_param(port_dict, "security_groups") if new_security_groups is not None: if (Capabilities.TENANT_NETWORK_SG not in CONF.QUARK.environment_capabilities): if not STRATEGY.is_provider_network(port_db["network_id"]): raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled() if new_security_groups is not None and not port_db["device_id"]: raise q_exc.SecurityGroupsRequireDevice() group_ids, security_group_mods = _make_security_group_list( context, new_security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) if fixed_ips is not None: # NOTE(mdietz): we want full control over IPAM since # we're allocating by subnet instead of # network. ipam_driver = ipam.IPAM_REGISTRY.get_strategy( ipam.QuarkIpamANY.get_name()) addresses, subnet_ids = [], [] ip_addresses = {} for fixed_ip in fixed_ips: subnet_id = fixed_ip.get("subnet_id") ip_address = fixed_ip.get("ip_address") if not (subnet_id or ip_address): raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id or ip_address required") if ip_address and not subnet_id: raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id required for ip_address allocation") if subnet_id and ip_address: ip_netaddr = None try: ip_netaddr = netaddr.IPAddress(ip_address).ipv6() except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") ip_addresses[ip_netaddr] = subnet_id else: subnet_ids.append(subnet_id) port_ips = set([netaddr.IPAddress(int(a["address"])) for a in port_db["ip_addresses"]]) new_ips = set([a for a in ip_addresses.keys()]) ips_to_allocate = list(new_ips - port_ips) ips_to_deallocate = list(port_ips - new_ips) for ip in ips_to_allocate: if ip in ip_addresses: # NOTE: Fix for RM10187 - we were losing the list of IPs if # more than one IP was to be allocated. Track an # aggregate list instead, and add it to the running total # after each allocate allocated = [] ipam_driver.allocate_ip_address( context, allocated, port_db["network_id"], port_db["id"], reuse_after=None, ip_addresses=[ip], subnets=[ip_addresses[ip]]) addresses.extend(allocated) for ip in ips_to_deallocate: ipam_driver.deallocate_ips_by_port( context, port_db, ip_address=ip) for subnet_id in subnet_ids: ipam_driver.allocate_ip_address( context, addresses, port_db["network_id"], port_db["id"], reuse_after=CONF.QUARK.ipam_reuse_after, subnets=[subnet_id]) # Need to return all existing addresses and the new ones if addresses: port_dict["addresses"] = port_db["ip_addresses"] port_dict["addresses"].extend(addresses) # NOTE(morgabra) Updating network_plugin on port objects is explicitly # disallowed in the api, so we use whatever exists in the db. net_driver = _get_net_driver(port_db.network, port=port_db) base_net_driver = _get_net_driver(port_db.network) # TODO(anyone): What do we want to have happen here if this fails? Is it # ok to continue to keep the IPs but fail to apply security # groups? Is there a clean way to have a multi-status? Since # we're in a beta-y status, I'm going to let this sit for # a future patch where we have time to solve it well. kwargs = {} if new_security_groups is not None: kwargs["security_groups"] = security_group_mods net_driver.update_port(context, port_id=port_db["backend_key"], mac_address=port_db["mac_address"], device_id=port_db["device_id"], base_net_driver=base_net_driver, **kwargs) port_dict["security_groups"] = security_group_mods with context.session.begin(): port = db_api.port_update(context, port_db, **port_dict) # NOTE(mdietz): fix for issue 112, we wanted the IPs to be in # allocated_at order, so get a fresh object every time if port_db in context.session: context.session.expunge(port_db) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) return v._make_port_dict(port_db) def get_port(context, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_dict(results) def get_ports(context, limit=None, sorts=None, marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_ports for tenant %s filters %s fields %s" % (context.tenant_id, filters, fields)) if filters is None: filters = {} if "ip_address" in filters: if not context.is_admin: raise n_exc.NotAuthorized() ips = [] try: ips = [netaddr.IPAddress(ip) for ip in filters.pop("ip_address")] except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") query = db_api.port_find_by_ip_address(context, ip_address=ips, scope=db_api.ALL, **filters) ports = [] for ip in query: ports.extend(ip.ports) else: ports = db_api.port_find(context, limit, sorts, marker, fields=fields, join_security_groups=True, **filters) return v._make_ports_list(ports, fields) def get_ports_count(context, filters=None): """Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_ports_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.port_count_all(context, join_security_groups=True, **filters) def delete_port(context, id): """Delete a port. : param context: neutron api request context : param id: UUID representing the port to delete. """ LOG.info("delete_port %s for tenant %s" % (id, context.tenant_id)) port = db_api.port_find(context, id=id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=id) if 'device_id' in port: # false is weird, but ignore that LOG.info("delete_port %s for tenant %s has device %s" % (id, context.tenant_id, port['device_id'])) backend_key = port["backend_key"] mac_address = netaddr.EUI(port["mac_address"]).value ipam_driver = _get_ipam_driver(port["network"], port=port) ipam_driver.deallocate_mac_address(context, mac_address) ipam_driver.deallocate_ips_by_port( context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) net_driver = _get_net_driver(port["network"], port=port) base_net_driver = _get_net_driver(port["network"]) net_driver.delete_port(context, backend_key, device_id=port["device_id"], mac_address=port["mac_address"], base_net_driver=base_net_driver) with context.session.begin(): db_api.port_delete(context, port) def _diag_port(context, port, fields): p = v._make_port_dict(port) net_driver = _get_net_driver(port.network, port=port) if 'config' in fields: p.update(net_driver.diag_port( context, port["backend_key"], get_status='status' in fields)) return p def diagnose_port(context, id, fields): if not context.is_admin: raise n_exc.NotAuthorized() if id == "*": return {'ports': [_diag_port(context, port, fields) for port in db_api.port_find(context).all()]} db_port = db_api.port_find(context, id=id, scope=db_api.ONE) if not db_port: raise n_exc.PortNotFound(port_id=id) port = _diag_port(context, db_port, fields) return {'ports': port} def _make_security_group_list(context, group_ids): if not group_ids or not utils.attr_specified(group_ids): return ([], []) group_ids = list(set(group_ids)) groups = [] for gid in group_ids: group = db_api.security_group_find(context, id=gid, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=gid) groups.append(group) return (group_ids, groups)
rackerlabs/quark
quark/plugin_modules/ports.py
Python
apache-2.0
26,372
from .error_info import TypescriptErrorInfo from .error_list import TypescriptProjectErrorList, TypescriptGoToError from .go_to_definition import TypescriptGoToDefinitionCommand from .go_to_type import TypescriptGoToTypeCommand from .nav_to import TypescriptNavToCommand from .quick_info import TypescriptQuickInfo, TypescriptQuickInfoDoc from .save import TypescriptSave from .show_doc import TypescriptShowDoc from .signature import TypescriptSignaturePanel, TypescriptSignaturePopup from .format import ( TypescriptFormatBrackets, TypescriptFormatDocument, TypescriptFormatLine, TypescriptFormatOnKey, TypescriptFormatSelection, TypescriptPasteAndFormat, TypescriptAutoIndentOnEnterBetweenCurlyBrackets ) from .references import ( TypescriptFindReferencesCommand, TypescriptGoToRefCommand, TypescriptNextRefCommand, TypescriptPopulateRefs, TypescriptPrevRefCommand ) from .rename import ( TypescriptDelayedRenameFile, TypescriptFinishRenameCommand, TypescriptRenameCommand ) from .build import TypescriptBuildCommand from .settings import ( TypescriptOpenPluginDefaultSettingFile, TypescriptOpenTsDefaultSettingFile, TypescriptOpenTsreactDefaultSettingFile ) __all__ = [ "TypescriptAutoIndentOnEnterBetweenCurlyBrackets", "TypescriptErrorInfo", "TypescriptProjectErrorList", "TypescriptGoToError", "TypescriptFormatBrackets", "TypescriptFormatDocument", "TypescriptFormatLine", "TypescriptFormatOnKey", "TypescriptFormatSelection", "TypescriptPasteAndFormat", "TypescriptGoToDefinitionCommand", "TypescriptGoToTypeCommand", "TypescriptGoToRefCommand", "TypescriptNavToCommand", "TypescriptQuickInfo", "TypescriptQuickInfoDoc", "TypescriptFindReferencesCommand", "TypescriptGoToDefinitionCommand", "TypescriptNextRefCommand", "TypescriptPopulateRefs", "TypescriptPrevRefCommand", "TypescriptDelayedRenameFile", "TypescriptFinishRenameCommand", "TypescriptRenameCommand", "TypescriptSave", "TypescriptShowDoc", "TypescriptSignaturePanel", "TypescriptSignaturePopup", "TypescriptBuildCommand", "TypescriptOpenPluginDefaultSettingFile", "TypescriptOpenTsDefaultSettingFile", "TypescriptOpenTsreactDefaultSettingFile" ]
zhengbli/TypeScript-Sublime-Plugin
typescript/commands/__init__.py
Python
apache-2.0
2,322
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'GroupEnum' : _MetaInfoEnum('GroupEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'address-family-group':'ADDRESS_FAMILY_GROUP', 'session-group':'SESSION_GROUP', 'neighbor-group':'NEIGHBOR_GROUP', 'neighbor':'NEIGHBOR', 'error-group':'ERROR_GROUP', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'AttachPointDirectionEnum' : _MetaInfoEnum('AttachPointDirectionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'in':'IN', 'out':'OUT', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'SubAddressFamilyEnum' : _MetaInfoEnum('SubAddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'unicast':'UNICAST', 'multicast':'MULTICAST', 'label':'LABEL', 'tunnel':'TUNNEL', 'vpn':'VPN', 'mdt':'MDT', 'vpls':'VPLS', 'rt-constraint':'RT_CONSTRAINT', 'mvpn':'MVPN', 'flow':'FLOW', 'vpn-mcast':'VPN_MCAST', 'saf-none':'SAF_NONE', 'saf-unknown':'SAF_UNKNOWN', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'AddressFamilyEnum' : _MetaInfoEnum('AddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'ipv4':'IPV4', 'ipv6':'IPV6', 'l2vpn':'L2VPN', 'af-none':'AF_NONE', 'af-unknown':'AF_UNKNOWN', 'ls':'LS', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'ObjectStatusEnum' : _MetaInfoEnum('ObjectStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'active':'ACTIVE', 'inactive':'INACTIVE', 'unused':'UNUSED', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'RoutingPolicy.Limits' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Limits', False, [ _MetaInfoClassMember('compiled-policies-length', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' The total compiled length of all policies ''', 'compiled_policies_length', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-lines-of-policy-limit', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Number of lines of configuration for policies/sets currently allowed ''', 'current_lines_of_policy_limit', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-lines-of-policy-used', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Current number of lines configured for all policies and sets ''', 'current_lines_of_policy_used', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-number-of-policies-limit', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Number of policies currently allowed ''', 'current_number_of_policies_limit', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-number-of-policies-used', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Current number of policies configured ''', 'current_number_of_policies_used', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('maximum-lines-of-policy', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Maximum lines of configuration allowable for all policies and sets ''', 'maximum_lines_of_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('maximum-number-of-policies', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Maximum number of policies allowable ''', 'maximum_number_of_policies', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'limits', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'directly-used-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets', False, [ _MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None, [], [], ''' Domain of sets ''', 'set_domain', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Names of sets in this domain ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets', False, [ _MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets', [], [], ''' List of sets in several domains ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'all-used-sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets', False, [ _MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None, [], [], ''' Domain of sets ''', 'set_domain', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Names of sets in this domain ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets', False, [ _MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets', [], [], ''' List of sets in several domains ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'directly-used-sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'all-used-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses', False, [ _MetaInfoClassMember('all-used-policies', REFERENCE_CLASS, 'AllUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies', [], [], ''' Policies used by this policy, or by policies that it uses ''', 'all_used_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('all-used-sets', REFERENCE_CLASS, 'AllUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets', [], [], ''' Sets used by this policy, or by policies that it uses ''', 'all_used_sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('directly-used-policies', REFERENCE_CLASS, 'DirectlyUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies', [], [], ''' Policies that this policy uses directly ''', 'directly_used_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('directly-used-sets', REFERENCE_CLASS, 'DirectlyUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets', [], [], ''' Sets that this policy uses directly ''', 'directly_used_sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'policy-uses', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Route policy name ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('policy-uses', REFERENCE_CLASS, 'PolicyUses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses', [], [], ''' Information about which policies and sets this policy uses ''', 'policy_uses', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'route-policy', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies', False, [ _MetaInfoClassMember('route-policy', REFERENCE_LIST, 'RoutePolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy', [], [], ''' Information about an individual policy ''', 'route_policy', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'route-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policies', REFERENCE_CLASS, 'RoutePolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies', [], [], ''' Information about individual policies ''', 'route_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'ospf-area', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-opaque', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-seg-nh', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-soo', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'tag', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'prefix', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'community', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.AsPath', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'as-path', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityBandwidth' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityBandwidth', False, [ _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-bandwidth', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityRt' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityRt', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-rt', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Rd' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Rd', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'rd', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityCost' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityCost', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-cost', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets', False, [ _MetaInfoClassMember('as-path', REFERENCE_CLASS, 'AsPath' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.AsPath', [], [], ''' Information about AS Path sets ''', 'as_path', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('community', REFERENCE_CLASS, 'Community' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community', [], [], ''' Information about Community sets ''', 'community', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-bandwidth', REFERENCE_CLASS, 'ExtendedCommunityBandwidth' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityBandwidth', [], [], ''' Information about Extended Community Bandwidth sets ''', 'extended_community_bandwidth', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-cost', REFERENCE_CLASS, 'ExtendedCommunityCost' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityCost', [], [], ''' Information about Extended Community Cost sets ''', 'extended_community_cost', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-opaque', REFERENCE_CLASS, 'ExtendedCommunityOpaque' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque', [], [], ''' Information about Extended Community Opaque sets ''', 'extended_community_opaque', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-rt', REFERENCE_CLASS, 'ExtendedCommunityRt' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityRt', [], [], ''' Information about Extended Community RT sets ''', 'extended_community_rt', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-seg-nh', REFERENCE_CLASS, 'ExtendedCommunitySegNh' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh', [], [], ''' Information about Extended Community SegNH sets ''', 'extended_community_seg_nh', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('extended-community-soo', REFERENCE_CLASS, 'ExtendedCommunitySoo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo', [], [], ''' Information about Extended Community SOO sets ''', 'extended_community_soo', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('ospf-area', REFERENCE_CLASS, 'OspfArea' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea', [], [], ''' Information about OSPF Area sets ''', 'ospf_area', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('prefix', REFERENCE_CLASS, 'Prefix' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix', [], [], ''' Information about AS Path sets ''', 'prefix', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('rd', REFERENCE_CLASS, 'Rd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Rd', [], [], ''' Information about RD sets ''', 'rd', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('tag', REFERENCE_CLASS, 'Tag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag', [], [], ''' Information about Tag sets ''', 'tag', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy', False, [ _MetaInfoClassMember('limits', REFERENCE_CLASS, 'Limits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Limits', [], [], ''' Information about configured limits and the current values ''', 'limits', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('policies', REFERENCE_CLASS, 'Policies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies', [], [], ''' Information about configured route policies ''', 'policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets', [], [], ''' Information about configured sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'routing-policy', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, } _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies.RoutePolicy']['meta_info'].parent =_meta_table['RoutingPolicy.Policies.RoutePolicies']['meta_info'] _meta_table['RoutingPolicy.Policies.RoutePolicies']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info'] _meta_table['RoutingPolicy.Policies.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info'] _meta_table['RoutingPolicy.Policies.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info'] _meta_table['RoutingPolicy.Policies.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Policies']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Tag']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Prefix']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info'] _meta_table['RoutingPolicy.Sets.Community.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Community']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.AsPath']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.Rd']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy.Reference']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached.Binding']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.UsedBy']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set.Attached']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets.Set']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Sets']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Unused']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Inactive']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost.Active']['meta_info'].parent =_meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'] _meta_table['RoutingPolicy.Sets.OspfArea']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityOpaque']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySegNh']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunitySoo']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Tag']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Prefix']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Community']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.AsPath']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityBandwidth']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityRt']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.Rd']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Sets.ExtendedCommunityCost']['meta_info'].parent =_meta_table['RoutingPolicy.Sets']['meta_info'] _meta_table['RoutingPolicy.Limits']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info'] _meta_table['RoutingPolicy.Policies']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info'] _meta_table['RoutingPolicy.Sets']['meta_info'].parent =_meta_table['RoutingPolicy']['meta_info']
abhikeshav/ydk-py
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_policy_repository_oper.py
Python
apache-2.0
256,335
""" R.P.D. Ransom's Python Daemon """ import os import pathlib import subprocess import asyncio from subprocess import ( run, ) from typing import ( List, Any, ) from functools import ( partial, ) ##BEGIN aliases # modules aio=asyncio subps=subprocess # classes Ex = Exception Exc = Exception # defns home=pathlib.Path.home user_home_dir=pathlib.Path.home ##END class Svc: repo_name: str build_cmds: List[str] run_cmd: List[str] daemon: Any def __init__(self, repo_name): self.repo_name = repo_name self.build_cmds = [] self.run_cmd = 'false' self.daemon = None def checkout_svc__( svc, repos_cache_dir, github_username, git_tag=None, ): repo_name = svc.repo_name git_url = ( 'https://github.com/' + github_username + '/' + repo_name) res = run([ 'git', 'clone', git_url, ], cwd=repos_cache_dir, stdout=subps.PIPE, stderr=subps.PIPE, check=True, ) if git_tag is None: return repo_cache_dir = os.path.join( repos_cache_dir, repo_name, ) res = run([ 'git', 'checkout', git_tag, ], cwd=repo_cache_dir, stdout=subps.PIPE, stderr=subps.PIPE, check=True, ) def make_shellout( cwd interactive=False, ): async def shellout(cmd): proc = aio.create_subprocess_shell( cmd, cwd=cwd, ) stdout, stderr = await proc.communicate() res = await proc.wait() if res != 0: raise Exception() return { 'res': res, 'stdout': stdout, 'stderr': stderr, } async def interactive_shellout(cmd): proc = aio.create_subprocess_shell( cmd, cwd=cwd, ) return proc if interactive: return interactive_shellout return shellout async def checkout_repo( repo_name, repos_cache_dir, github_username='ransomw', git_tag=None, ): git_url = ( 'https://github.com/' + github_username + '/' + repo_name) repo_cache_dir = os.path.join( repos_cache_dir, repo_name, ) if not os.path.exists(repo_cache_dir): shellout = make_shellout(repos_cache_dir) await shellout( ['git', 'clone', git_url,], ) del shellout shellout = make_shellout(repo_cache_dir) if git_tag is not None: await shellout( ['git', 'checkout', git_tag,]) await shellout(['git', 'pull',]) async def build_and_run__contacts_store( repo_cache_dir, ): shellout = make_shellout(repo_cache_dir) await shellout( ['node', 'bin/initdb',]) shellout = make_shellout( repo_cache_dir, interactive=True,) proc = await shellout( ['node', 'bin/run', '-p', str(port),]) def build_service(svc, repo_cache_dir): build_cmds = svc.build_cmds for build_cmd in build_cmds: if isinstance(build_cmd, (str,)): build_cmd = shlex.split( build_cmd) if not isinstance(build_cmd, (list,)): raise Exception() res = run(build_cmd, cwd=repo_cache_dir, stdout=subps.PIPE, stderr=subps.PIPE, check=True, ) async def _read_stdio(daemon): while True: with daemon['rw_lock']: pass aio.sleep(1.5) def run_service(svc, repo_cache_dir): run_cmd = svc.run_cmd if isinstance(run_cmd, (str,)): run_cmd = shlex.split(run_cmd) res = subps.Popen(run_cmd, cwd=repo_cache_dir, stdout=subps.PIPE, stderr=subps.PIPE, ) daemon = { 'popen': res, 'stdout': [], 'stderr': [], 'rw_lock': aio.Lock(), } reader_coproc = _read_stdio(daemon) svc.dameon = dict( **daemon, reader=reader_coproc, ) return svc class Config: svcs: List[Svc] repos_cache = ( os.path.join( user_home_dir(), '.cache', 'rpd__github_cache', )) ###
ransomw/dotfiles
pyutils/rpd/__init__.py
Python
apache-2.0
4,432
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2019 EMBL - European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from django.conf import settings from django.contrib.auth import get_user_model from model_bakery import baker from emgapi import models as emg_models __all__ = ['apiclient', 'api_version', 'biome', 'biome_human', 'super_study', 'studies', 'samples', 'study', 'study_private', 'sample', 'sample_private', 'run_status', 'analysis_status', 'pipeline', 'pipelines', 'experiment_type', 'experiment_type_assembly', 'runs', 'run', 'run_v5', 'runjob_pipeline_v1', 'run_emptyresults', 'run_with_sample', 'analysis_results', 'run_multiple_analysis', 'var_names', 'analysis_metadata_variable_names', 'genome_catalogue', 'genome', 'assemblies', 'legacy_mapping'] @pytest.fixture def apiclient(): from rest_framework.test import APIClient return APIClient() @pytest.fixture def api_version(): return 'v{}'.format(settings.REST_FRAMEWORK['DEFAULT_VERSION']) @pytest.fixture def biome(): return emg_models.Biome.objects.create( biome_id=1, biome_name='bar', lft=0, rgt=1, depth=2, lineage='root:foo:bar', ) @pytest.fixture def biome_human(): return emg_models.Biome.objects.create( biome_id=1, biome_name='Human', lft=0, rgt=1, depth=2, lineage='root:Host-associated:Human', ) @pytest.fixture def genome_catalogue(biome_human): return emg_models.GenomeCatalogue.objects.create( biome=biome_human, name='Mandalorian Genomes v1.0', catalogue_id='mandalor-1-0', version='1.0' ) @pytest.fixture def genome(biome_human, genome_catalogue): return emg_models.Genome.objects.create( accession='MGYG000000001', biome=biome_human, catalogue=genome_catalogue, length=1, num_contigs=1, n_50=1, gc_content=1.0, type=emg_models.Genome.MAG, completeness=1.0, contamination=1.0, rna_5s=1.0, rna_16s=1.0, rna_23s=1.0, trnas=1.0, nc_rnas=1, num_proteins=1, eggnog_coverage=1.0, ipr_coverage=1.0, taxon_lineage='d__Test;', ) @pytest.fixture def super_study(study, study_private, biome): ss = emg_models.SuperStudy.objects.create( super_study_id=1, title='Human Microbiome', description='Just a test description', url_slug='human-microbiome', ) emg_models.SuperStudyBiome.objects.create(biome=biome, super_study=ss) emg_models.SuperStudyStudy.objects.create(study=study, super_study=ss) emg_models.SuperStudyStudy.objects.create(study=study_private, super_study=ss) return ss @pytest.fixture def studies(biome): studies = [] for pk in range(1, 50): studies.append( emg_models.Study( biome=biome, study_id=pk, secondary_accession='SRP0{:0>3}'.format(pk), centre_name='Centre Name', is_public=1, public_release_date=None, study_name='Example study name %i' % pk, study_status='FINISHED', data_origination='HARVESTED', submission_account_id='User-123', result_directory='2017/05/SRP{:0>3}'.format(pk), last_update='1970-01-01 00:00:00', first_created='1970-01-01 00:00:00', project_id='PRJDB0{:0>3}'.format(pk), ) ) return emg_models.Study.objects.bulk_create(studies) @pytest.fixture def samples(biome, studies): samples = [] for s in studies: pk = s.study_id samples.append( emg_models.Sample( biome=biome, sample_id=pk, accession='ERS0{:0>3}'.format(pk), is_public=1, species='homo sapiense', sample_name='Example sample name %i' % pk, latitude=12.3456, longitude=456.456, last_update='1970-01-01 00:00:00', geo_loc_name='INSTITUTE', ) ) samples = emg_models.Sample.objects.bulk_create(samples) rels = list() for st, sm in zip(studies, samples): rels.append(emg_models.StudySample(study=st, sample=sm)) emg_models.StudySample.objects.bulk_create(rels) return samples @pytest.fixture def study(biome): return emg_models.Study.objects.create( biome=biome, study_id=1234, secondary_accession='SRP01234', centre_name='Centre Name', is_public=1, public_release_date=None, study_name='Example study name SRP01234', study_abstract='abcdefghijklmnoprstuvwyz', study_status='FINISHED', data_origination='HARVESTED', submission_account_id='User-123', result_directory='2017/05/SRP01234', last_update='1970-01-01 00:00:00', first_created='1970-01-01 00:00:00', project_id='PRJDB1234', ) @pytest.fixture def study_private(biome): return emg_models.Study.objects.create( biome=biome, study_id=222, secondary_accession='SRP00000', centre_name='Centre Name', is_public=0, public_release_date=None, study_name='Example study name SRP00000', study_abstract='00000', study_status='FINISHED', data_origination='HARVESTED', submission_account_id='User-123', result_directory='2017/05/SRP00000', last_update='1970-01-01 00:00:00', first_created='1970-01-01 00:00:00', project_id='PRJDB0000', ) @pytest.fixture def sample(biome, study): sample = emg_models.Sample( biome=biome, pk=111, accession='ERS01234', primary_accession='SAMS01234', is_public=1, species='homo sapiense', sample_name='Example sample name ERS01234', sample_desc='abcdefghijklmnoprstuvwyz', latitude=12.3456, longitude=456.456, last_update='1970-01-01 00:00:00', analysis_completed='1970-01-01', collection_date='1970-01-01', environment_feature='abcdef', environment_material='abcdef', geo_loc_name='Geo Location', sample_alias='ERS01234', ) sample.save() rel = emg_models.StudySample(study=study, sample=sample) rel.save() return sample @pytest.fixture def sample_private(biome, study): sample = emg_models.Sample( biome=biome, pk=222, accession='ERS00000', primary_accession='SAMS00000', is_public=0, species='homo sapiense', sample_name='Example sample name ERS00000', sample_desc='abcdefghijklmnoprstuvwyz', latitude=12.3456, longitude=456.456, last_update='1970-01-01 00:00:00', analysis_completed='1970-01-01', collection_date='1970-01-01', environment_feature='abcdef', environment_material='abcdef', geo_loc_name='INSTITUTE', sample_alias='ERS00000', ) sample.save() rel = emg_models.StudySample(study=study, sample=sample) rel.save() return sample @pytest.fixture def analysis_status(): return emg_models.AnalysisStatus.objects.create( pk=3, analysis_status='3', ) @pytest.fixture def run_status(): status, _ = emg_models.Status.objects.get_or_create( pk=4, status='public', ) return status @pytest.fixture def pipeline(pipelines): """Return Pipeline Version 4.1 """ return pipelines.filter(release_version='4.1').first() @pytest.fixture def pipelines(): pipeline_version = [1.0, 4.0, 4.1, 5.0] i = 1 for pipe in pipeline_version: p, _ = emg_models.Pipeline.objects.get_or_create( pk=i, release_version=str(pipe), release_date='1970-01-01') i += 1 return emg_models.Pipeline.objects.all() @pytest.fixture def experiment_type(): return emg_models.ExperimentType.objects.create( pk=1, experiment_type='metagenomic' ) @pytest.fixture def experiment_type_assembly(): experiment_type, _ = emg_models.ExperimentType.objects.get_or_create( pk=2, experiment_type='assembly' ) return experiment_type @pytest.fixture def runs(study, samples, run_status, analysis_status, pipeline, experiment_type): jobs = [] for s in samples: pk = s.sample_id run, created = emg_models.Run.objects.get_or_create( sample=s, study=study, accession='ABC_{:0>3}'.format(pk), secondary_accession='DEF_{:0>3}'.format(pk), status_id=run_status, experiment_type=experiment_type, ) _aj = emg_models.AnalysisJob( sample=s, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_1.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00', ) jobs.append(_aj) return emg_models.AnalysisJob.objects.bulk_create(jobs) @pytest.fixture def run(study, sample, run_status, analysis_status, pipeline, experiment_type): run, _ = emg_models.Run.objects.get_or_create( run_id=1234, accession='ABC01234', sample=sample, study=study, status_id=run_status, experiment_type=experiment_type ) emg_models.AnalysisJob.objects.create( job_id=1234, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_1.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00' ) return run @pytest.fixture def run_v5(study, sample, run_status, analysis_status, pipelines, experiment_type): p5 = pipelines.filter(release_version='5.0').first() run, _ = emg_models.Run.objects.get_or_create( run_id=5555, accession='ABC01234', sample=sample, study=study, status_id=run_status, experiment_type=experiment_type ) emg_models.AnalysisJob.objects.create( job_id=1234, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=p5, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_5.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00' ) return run @pytest.fixture def runjob_pipeline_v1(run, sample, study, experiment_type, analysis_status, pipelines): return emg_models.AnalysisJob.objects.create( # NOQA job_id=12345, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipelines.filter(release_version='1.0').first(), analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_1.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00' ) @pytest.fixture def run_multiple_analysis(study, sample, run_status, analysis_status, experiment_type): pipeline, created = emg_models.Pipeline.objects.get_or_create( pk=1, release_version='1.0', release_date='1970-01-01', ) pipeline4, created4 = emg_models.Pipeline.objects.get_or_create( pk=4, release_version='4.0', release_date='1970-01-01', ) pipeline5, created5 = emg_models.Pipeline.objects.get_or_create( pk=5, release_version='5.0', release_date='2020-01-01', ) run = emg_models.Run.objects.create( run_id=1234, accession='ABC01234', sample=sample, study=study, status_id=run_status, experiment_type=experiment_type ) _anl1 = emg_models.AnalysisJob.objects.create( job_id=1234, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_1.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00', ) _anl4 = emg_models.AnalysisJob.objects.create( job_id=5678, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline4, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_4.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00', ) _anl5 = emg_models.AnalysisJob.objects.create( job_id=466090, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline5, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_5.0/ABC_FASTQ', submit_time='2020-01-01 00:00:00', ) return (_anl1, _anl4, _anl5) @pytest.fixture def run_emptyresults(study, sample, run_status, analysis_status, pipeline, experiment_type): run = emg_models.Run.objects.create( run_id=1234, accession='ABC01234', sample=sample, study=study, status_id=run_status, experiment_type=experiment_type ) return emg_models.AnalysisJob.objects.create( job_id=1234, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline, analysis_status=analysis_status, input_file_name='EMPTY_ABC_FASTQ', result_directory='test_data/version_1.0/EMPTY_ABC_FASTQ', submit_time='1970-01-01 00:00:00', ) @pytest.fixture def run_with_sample(study, sample, run_status, analysis_status, pipeline, experiment_type): run = emg_models.Run.objects.create( run_id=1234, accession='ABC01234', status_id=run_status, sample=sample, study=study, experiment_type=experiment_type, ) return emg_models.AnalysisJob.objects.create( job_id=1234, sample=sample, study=study, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipeline, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_1.0/ABC_FASTQ', submit_time='1970-01-01 00:00:00' ) @pytest.fixture def analysis_results(study, sample, run_status, analysis_status, experiment_type, pipelines): run = emg_models.Run.objects.create( run_id=1234, accession='ABC01234', status_id=run_status, sample=sample, study=study, experiment_type=experiment_type, ) res = dict() for pipe in pipelines: res[pipe.release_version] = emg_models.AnalysisJob.objects.create( job_id=pipe.pk, study=study, sample=sample, run=run, run_status_id=4, experiment_type=experiment_type, pipeline=pipe, analysis_status=analysis_status, input_file_name='ABC_FASTQ', result_directory='test_data/version_{}/ABC_FASTQ'.format(pipe.release_version), submit_time='1970-01-01 00:00:00', ) return res @pytest.fixture def var_names(): data = ( 'collection date', 'geographic location (latitude)', 'geographic location (longitude)', 'ENA checklist', 'host taxid', 'host scientific name' ) variable_names = [] for i, name in enumerate(data): variable_names.append(emg_models.VariableNames(var_id=i, var_name=name)) emg_models.VariableNames.objects.bulk_create(variable_names) @pytest.fixture def analysis_metadata_variable_names(): variable_names = ( ("Submitted nucleotide sequences", "n/a"), ("Nucleotide sequences after format-specific filtering", "n/a"), ("Nucleotide sequences after length filtering", "n/a"), ("Nucleotide sequences after undetermined bases filtering", "n/a"), ("Total InterProScan matches", "n/a"), ("Predicted CDS with InterProScan match", "n/a"), ("Contigs with InterProScan match", "n/a"), ("Predicted CDS", "n/a"), ("Contigs with predicted CDS", "n/a"), ("Nucleotide sequences with predicted CDS", "n/a"), ("Contigs with predicted RNA", "n/a"), ("Nucleotide sequences with predicted rRNA", "n/a"), ("Predicted SSU sequences", "n/a"), ("Predicted LSU sequences", "n/a"), ) _variable_names = list() for v in variable_names: _variable_names.append( emg_models.AnalysisMetadataVariableNames( var_name=v[0], description=v[1] ) ) emg_models.AnalysisMetadataVariableNames.objects.bulk_create(_variable_names) @pytest.fixture def assemblies(study, runs, samples, experiment_type_assembly): assemblies = baker.make(emg_models.Assembly, study=study, experiment_type=experiment_type_assembly, _quantity=10) # one with a fixed ERZ9999 to be used with the legacy mapping assemblies.append(baker.make(emg_models.Assembly, accession="ERZ9999", study=study, experiment_type=experiment_type_assembly, _quantity=10)) return assemblies @pytest.fixture def legacy_mapping(assemblies): fake_accession = "ERZ1111" return baker.make(emg_models.LegacyAssembly, legacy_accession=fake_accession, new_accession="ERZ999")
EBI-Metagenomics/emgapi
tests/test_utils/emg_fixtures.py
Python
apache-2.0
19,046
import dataclasses import datetime import logging import re from dataclasses import dataclass, field from functools import partial from multiprocessing.dummy import Pool as ThreadPool from typing import Dict, List, Set import pkg_resources import yaml # pylint: disable=wrong-import-order import dateutil.parser # pylint: disable=wrong-import-order from taskcat._cfn.template import Template as TCTemplate from taskcat._client_factory import Boto3Cache from taskcat._common_utils import deep_get, neglect_submodule_templates from taskcat._dataclasses import RegionObj from taskcat.exceptions import TaskCatException LOG = logging.getLogger(__name__) REGION_REGEX = re.compile( "((eu|ap|us|af|me|ca|cn|sa)-|(us-gov-))" "(north(east|west)?|south(east|west)?|central|east|west)-[0-9]", re.IGNORECASE, ) class Config: raw_dict: dict = {"global": {"AMIs": {}}} codenames: Set[Dict[str, str]] = set() @classmethod def load(cls, file_name, configtype=None): with open(file_name, "r") as _f: try: cls.raw_dict = yaml.safe_load(_f) except yaml.YAMLError as e: LOG.error(f"[{file_name}] - YAML Syntax Error!") # pylint: disable=raise-missing-from raise AMIUpdaterFatalException(str(e)) try: for _x in cls.raw_dict.get("global").get("AMIs").keys(): cls.codenames.add(_x) except Exception as e: LOG.error( f"{configtype} config file [{file_name}]" f"is not structured properly!" ) LOG.error(f"{e}") # pylint: disable=raise-missing-from raise AMIUpdaterFatalException(str(e)) @classmethod def update_filter(cls, code_name): cls.raw_dict["global"]["AMIs"].update(code_name) @classmethod def get_filter(cls, code_name): _x = deep_get(cls.raw_dict, f"global/AMIs/{code_name}", {}) return { str(k): [str(v)] if isinstance(v, str) else list(v) for k, v in _x.items() } @dataclass class EC2FilterValue: # pylint: disable=invalid-name Name: str Values: List[str] @dataclass class APIResultsData: codename: str ami_id: str creation_date: int region: str custom_comparisons: bool = True def __lt__(self, other): # See Codenames.parse_api_results for notes on why this is here. if self.custom_comparisons: return self.creation_date < other.creation_date return object.__lt__(self, other) def __gt__(self, other): # See Codenames.parse_api_results for notes on why this is here. if self.custom_comparisons: return self.creation_date > other.creation_date return object.__gt__(self, other) @dataclass class RegionalCodename: # pylint: disable=invalid-name region: str cn: str new_ami: str = "" filters: list = field(default_factory=list) _creation_dt: datetime.datetime = field(default_factory=datetime.datetime.now) def __hash__(self): return hash(self.region + self.cn + self.new_ami + str(self.filters)) class Template: def __init__(self, underlying: TCTemplate): self.codenames: Set[Dict[str, str]] = set() self.mapping_path: str = "Mappings/AWSAMIRegionMap" self.metadata_path: str = "Metadata/AWSAMIRegionMap/Filters" self.region_codename_lineno: Dict[str, Dict[str, int]] = {} self.region_names: Set[str] = set() self.underlying: TCTemplate = underlying self._ls = self.underlying.linesplit _template_regions = deep_get(self.underlying.template, self.mapping_path, {}) for region_name, region_data in _template_regions.items(): if region_name == "AMI": continue self.region_names.add(region_name) for codename, cnvalue in region_data.items(): key = f"{codename}/{region_name}" line_no = codename.start_mark.line if cnvalue == "": if '""' in self._ls[line_no]: cnvalue = '""' elif "''" in self._ls[line_no]: cnvalue = "''" self.region_codename_lineno[key] = {"line": line_no, "old": cnvalue} def set_codename_ami(self, cname, region, new_ami): if region not in self.region_names: return False key = f"{cname}/{region}" try: line_no = self.region_codename_lineno[key]["line"] old_ami = self.region_codename_lineno[key]["old"] if old_ami == new_ami: return False except KeyError: return False if old_ami == '""': new_ami = f'"{new_ami}"' new_record = re.sub(old_ami, new_ami, self._ls[line_no]) self._ls[line_no] = new_record return True def write(self): self.underlying.raw_template = "\n".join(self._ls) self.underlying.write() class AMIUpdaterFatalException(TaskCatException): """Raised when AMIUpdater experiences a fatal error""" def __init__(self, message=None): # pylint: disable=super-with-arguments super(AMIUpdaterFatalException, self).__init__(message) self.message = message class AMIUpdaterCommitNeededException(TaskCatException): def __init__(self, message=None): # pylint: disable=super-with-arguments super(AMIUpdaterCommitNeededException, self).__init__(message) self.message = message def _construct_filters(cname: str, config: Config) -> List[EC2FilterValue]: formatted_filters: List[EC2FilterValue] = [] fetched_filters = config.get_filter(cname) formatted_filters = [EC2FilterValue(k, v) for k, v in fetched_filters.items()] if formatted_filters: formatted_filters.append(EC2FilterValue("state", ["available"])) return formatted_filters def build_codenames(tobj: Template, config: Config) -> List[RegionalCodename]: """Builds regional codename objects""" built_cn = [] filters = deep_get(tobj.underlying.template, tobj.metadata_path, {}) mappings = deep_get(tobj.underlying.template, tobj.mapping_path, {}) for cname, cfilters in filters.items(): config.update_filter({cname: cfilters}) for region, cndata in mappings.items(): _missing_filters: Set[str] = set() if region == "AMI": continue if not REGION_REGEX.search(region): LOG.error(f"[{region}] is not a valid region. Please check your template!") raise AMIUpdaterFatalException for cnname in cndata.keys(): _filters = _construct_filters(cnname, config) if not _filters: if cnname not in _missing_filters: _missing_filters.add(cnname) LOG.warning( f"No query parameters were found for: {cnname.upper()}.", "(Results for this codename are not possible.", ) continue region_cn = RegionalCodename(region=region, cn=cnname, filters=_filters) built_cn.append(region_cn) return built_cn def _per_codename_amifetch(region_dict, regional_cn): new_filters = [] for _filter in regional_cn.filters: new_filters.append(dataclasses.asdict(_filter)) _r = region_dict.get(regional_cn.region) image_results = [] if _r: image_results = _r.client("ec2").describe_images(Filters=new_filters)["Images"] return { "region": regional_cn.region, "cn": regional_cn.cn, "api_results": image_results, } def query_codenames( codename_list: Set[RegionalCodename], region_dict: Dict[str, RegionObj] ): """Fetches AMI IDs from AWS""" if len(codename_list) == 0: raise AMIUpdaterFatalException( "No AMI filters were found. Nothing to fetch from the EC2 API." ) for region in list(region_dict.keys()): _ = region_dict[region].client("ec2") pool = ThreadPool(len(region_dict)) _p = partial(_per_codename_amifetch, region_dict) response = pool.map(_p, codename_list) return response def _image_timestamp(raw_ts): return int(dateutil.parser.parse(raw_ts).timestamp()) def reduce_api_results(raw_results): unsorted_results = [] missing_results = [] final_results = [] result_state = {} for query_result in raw_results: if query_result["api_results"]: cn_api_results_data = [ APIResultsData( query_result["cn"], x["ImageId"], _image_timestamp(x["CreationDate"]), query_result["region"], ) for x in query_result["api_results"] ] unsorted_results = cn_api_results_data + unsorted_results else: missing_results.append(query_result) if missing_results: LOG.warning( "No results were available for the following CODENAME / Region combination" ) for missing_result in missing_results: LOG.warning(f"- {missing_result['cn']} in {missing_result['region']}") sorted_results = sorted(unsorted_results, reverse=True) for _r in sorted_results: found_key = f"{_r.region}-{_r.codename}" already_found = result_state.get(found_key, False) if already_found: continue result_state[found_key] = True final_results.append(_r) return final_results class AMIUpdater: upstream_config_file = pkg_resources.resource_filename( "taskcat", "/cfg/amiupdater.cfg.yml" ) upstream_config_file_url = ( "https://raw.githubusercontent.com/aws-quickstart/" "taskcat/master/cfg/amiupdater.cfg.yml" ) def __init__(self, config, user_config_file=None, use_upstream_mappings=True): if use_upstream_mappings: Config.load(self.upstream_config_file, configtype="Upstream") if user_config_file: Config.load(user_config_file, configtype="User") # TODO: Needed? self.config = config self.boto3_cache = Boto3Cache() self.template_list = self._determine_templates() self.regions = self._get_regions() def _get_regions(self): profile = ( self.config.config.general.auth.get("default", "default") if self.config.config.general.auth else "default" ) default_region = self.boto3_cache.get_default_region(profile) regions = [ _r["RegionName"] for _r in self.boto3_cache.client( "ec2", profile, default_region ).describe_regions()["Regions"] ] regions = self.get_regions_for_profile(profile, regions) if self.config.config.general.auth: for region, profile in self.config.config.general.auth.items(): regions.update(self.get_regions_for_profile(profile, [region])) return regions def get_regions_for_profile(self, profile, _regions): regions = {} for _r in _regions: regions[_r] = RegionObj( name=_r, account_id=self.boto3_cache.account_id(profile), partition=self.boto3_cache.partition(profile), profile=profile, _boto3_cache=self.boto3_cache, taskcat_id=self.config.uid, ) return regions def _determine_templates(self): _up = self.config.get_templates() unprocessed_templates = list(_up.values()) finalized_templates = neglect_submodule_templates( project_root=self.config.project_root, template_list=unprocessed_templates ) return finalized_templates def _determine_templates_regions(self): templates = [] for tc_template in self.template_list: _t = Template(underlying=tc_template) templates.append(_t) return templates def update_amis(self): codenames = set() LOG.info("Determining templates and supported regions") templates = self._determine_templates_regions() LOG.info("Determining regional search params for each AMI") # Flush out codenames. for template in templates: template_cn = build_codenames(template, Config) for tcn in template_cn: codenames.add(tcn) # Retrieve API Results. LOG.info("Retrieving results from the EC2 API") results = query_codenames(codenames, self.regions) LOG.info("Determining the latest AMI for each Codename/Region") updated_api_results = reduce_api_results(results) # Figure out a way to sort dictionary by key-value (timestmap) _write_template = False for template in templates: for result in updated_api_results: changed = template.set_codename_ami( result.codename, result.region, result.ami_id ) if changed: _write_template = True if _write_template: template.write() if _write_template: LOG.info("Templates updated") raise AMIUpdaterCommitNeededException LOG.info("No AMI's needed updates.")
aws-quickstart/taskcat
taskcat/_amiupdater.py
Python
apache-2.0
13,507
from __future__ import unicode_literals import json import base64 from moto.core.responses import BaseResponse from .models import acm_backends, AWSError, AWSValidationException class AWSCertificateManagerResponse(BaseResponse): @property def acm_backend(self): """ ACM Backend :return: ACM Backend object :rtype: moto.acm.models.AWSCertificateManagerBackend """ return acm_backends[self.region] @property def request_params(self): try: return json.loads(self.body) except ValueError: return {} def _get_param(self, param, default=None): return self.request_params.get(param, default) def add_tags_to_certificate(self): arn = self._get_param("CertificateArn") tags = self._get_param("Tags") if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: self.acm_backend.add_tags_to_certificate(arn, tags) except AWSError as err: return err.response() return "" def delete_certificate(self): arn = self._get_param("CertificateArn") if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: self.acm_backend.delete_certificate(arn) except AWSError as err: return err.response() return "" def describe_certificate(self): arn = self._get_param("CertificateArn") if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: cert_bundle = self.acm_backend.get_certificate(arn) except AWSError as err: return err.response() return json.dumps(cert_bundle.describe()) def get_certificate(self): arn = self._get_param("CertificateArn") if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: cert_bundle = self.acm_backend.get_certificate(arn) except AWSError as err: return err.response() result = { "Certificate": cert_bundle.cert.decode(), "CertificateChain": cert_bundle.chain.decode(), } return json.dumps(result) def import_certificate(self): """ Returns errors on: Certificate, PrivateKey or Chain not being properly formatted Arn not existing if its provided PrivateKey size > 2048 Certificate expired or is not yet in effect Does not return errors on: Checking Certificate is legit, or a selfsigned chain is provided :return: str(JSON) for response """ certificate = self._get_param("Certificate") private_key = self._get_param("PrivateKey") chain = self._get_param("CertificateChain") # Optional current_arn = self._get_param("CertificateArn") # Optional tags = self._get_param("Tags") # Optional # Simple parameter decoding. Rather do it here as its a data transport decision not part of the # actual data try: certificate = base64.standard_b64decode(certificate) except Exception: return AWSValidationException( "The certificate is not PEM-encoded or is not valid." ).response() try: private_key = base64.standard_b64decode(private_key) except Exception: return AWSValidationException( "The private key is not PEM-encoded or is not valid." ).response() if chain is not None: try: chain = base64.standard_b64decode(chain) except Exception: return AWSValidationException( "The certificate chain is not PEM-encoded or is not valid." ).response() try: arn = self.acm_backend.import_cert( certificate, private_key, chain=chain, arn=current_arn, tags=tags ) except AWSError as err: return err.response() return json.dumps({"CertificateArn": arn}) def list_certificates(self): certs = [] statuses = self._get_param("CertificateStatuses") for cert_bundle in self.acm_backend.get_certificates_list(statuses): certs.append( { "CertificateArn": cert_bundle.arn, "DomainName": cert_bundle.common_name, } ) result = {"CertificateSummaryList": certs} return json.dumps(result) def list_tags_for_certificate(self): arn = self._get_param("CertificateArn") if arn is None: msg = "A required parameter for the specified action is not supplied." return {"__type": "MissingParameter", "message": msg}, dict(status=400) try: cert_bundle = self.acm_backend.get_certificate(arn) except AWSError as err: return err.response() result = {"Tags": []} # Tag "objects" can not contain the Value part for key, value in cert_bundle.tags.items(): tag_dict = {"Key": key} if value is not None: tag_dict["Value"] = value result["Tags"].append(tag_dict) return json.dumps(result) def remove_tags_from_certificate(self): arn = self._get_param("CertificateArn") tags = self._get_param("Tags") if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: self.acm_backend.remove_tags_from_certificate(arn, tags) except AWSError as err: return err.response() return "" def request_certificate(self): domain_name = self._get_param("DomainName") domain_validation_options = self._get_param( "DomainValidationOptions" ) # is ignored atm idempotency_token = self._get_param("IdempotencyToken") subject_alt_names = self._get_param("SubjectAlternativeNames") tags = self._get_param("Tags") # Optional if subject_alt_names is not None and len(subject_alt_names) > 10: # There is initial AWS limit of 10 msg = ( "An ACM limit has been exceeded. Need to request SAN limit to be raised" ) return ( json.dumps({"__type": "LimitExceededException", "message": msg}), dict(status=400), ) try: arn = self.acm_backend.request_certificate( domain_name, domain_validation_options, idempotency_token, subject_alt_names, tags, ) except AWSError as err: return err.response() return json.dumps({"CertificateArn": arn}) def resend_validation_email(self): arn = self._get_param("CertificateArn") domain = self._get_param("Domain") # ValidationDomain not used yet. # Contains domain which is equal to or a subset of Domain # that AWS will send validation emails to # https://docs.aws.amazon.com/acm/latest/APIReference/API_ResendValidationEmail.html # validation_domain = self._get_param('ValidationDomain') if arn is None: msg = "A required parameter for the specified action is not supplied." return ( json.dumps({"__type": "MissingParameter", "message": msg}), dict(status=400), ) try: cert_bundle = self.acm_backend.get_certificate(arn) if cert_bundle.common_name != domain: msg = "Parameter Domain does not match certificate domain" _type = "InvalidDomainValidationOptionsException" return json.dumps({"__type": _type, "message": msg}), dict(status=400) except AWSError as err: return err.response() return ""
william-richard/moto
moto/acm/responses.py
Python
apache-2.0
8,891
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import subprocess from collections import OrderedDict, namedtuple from pants.base.workunit import WorkUnit, WorkUnitLabel from pants.binaries.binary_util import BinaryUtil from pants.fs.archive import TGZ from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import temporary_dir from pants.util.memo import memoized_property class GoDistribution(object): """Represents a self-bootstrapping Go distribution.""" class Factory(Subsystem): options_scope = 'go-distribution' @classmethod def subsystem_dependencies(cls): return (BinaryUtil.Factory,) @classmethod def register_options(cls, register): register('--supportdir', advanced=True, default='bin/go', help='Find the go distributions under this dir. Used as part of the path to lookup ' 'the distribution with --binary-util-baseurls and --pants-bootstrapdir') register('--version', advanced=True, default='1.5.2', help='Go distribution version. Used as part of the path to lookup the distribution ' 'with --binary-util-baseurls and --pants-bootstrapdir') def create(self): # NB: create is an instance method to allow the user to choose global or scoped. # It's not unreasonable to imagine multiple go versions in play; for example: when # transitioning from the 1.x series to the 2.x series. binary_util = BinaryUtil.Factory.create() options = self.get_options() return GoDistribution(binary_util, options.supportdir, options.version) def __init__(self, binary_util, relpath, version): self._binary_util = binary_util self._relpath = relpath self._version = version @property def version(self): """Returns the version of the Go distribution. :returns: The Go distribution version number string. :rtype: string """ return self._version @memoized_property def goroot(self): """Returns the $GOROOT for this go distribution. :returns: The Go distribution $GOROOT. :rtype: string """ go_distribution = self._binary_util.select_binary(self._relpath, self.version, 'go.tar.gz') distribution_workdir = os.path.dirname(go_distribution) outdir = os.path.join(distribution_workdir, 'unpacked') if not os.path.exists(outdir): with temporary_dir(root_dir=distribution_workdir) as tmp_dist: TGZ.extract(go_distribution, tmp_dist) os.rename(tmp_dist, outdir) return os.path.join(outdir, 'go') class GoCommand(namedtuple('GoCommand', ['cmdline', 'env'])): """Encapsulates a go command that can be executed.""" @classmethod def _create(cls, goroot, cmd, gopath=None, args=None): # Forcibly nullify the GOPATH if the command does not need one - this can prevent bad user # GOPATHs from erroring out commands; see: https://github.com/pantsbuild/pants/issues/2321. env = OrderedDict(GOROOT=goroot, GOPATH=gopath or '') return cls([os.path.join(goroot, 'bin', 'go'), cmd] + (args or []), env=env) def spawn(self, env=None, **kwargs): """ :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param **kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A handle to the spawned go command subprocess. :rtype: :class:`subprocess.Popen` """ env = (env or os.environ).copy() env.update(self.env) return subprocess.Popen(self.cmdline, env=env, **kwargs) def check_output(self, env=None, **kwargs): """Returns the output of the executed Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param **kwargs: Keyword arguments to pass through to `subprocess.check_output`. :return str: Output of Go command. :raises subprocess.CalledProcessError: Raises if Go command fails. """ env = (env or os.environ).copy() env.update(self.env) return subprocess.check_output(self.cmdline, env=env, **kwargs) def __str__(self): return (' '.join('{}={}'.format(k, v) for k, v in self.env.items()) + ' ' + ' '.join(self.cmdline)) def create_go_cmd(self, cmd, gopath=None, args=None): """Creates a Go command that is optionally targeted to a Go workspace. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: A list of arguments and flags to pass to the Go command. :returns: A go command that can be executed later. :rtype: :class:`GoDistribution.GoCommand` """ return self.GoCommand._create(self.goroot, cmd, gopath=gopath, args=args) def execute_go_cmd(self, cmd, gopath=None, args=None, env=None, workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs): """Runs a Go command that is optionally targeted to a Go workspace. If a `workunit_factory` is supplied the command will run in a work unit context. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: An optional list of arguments and flags to pass to the Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param workunit_factory: An optional callable that can produce a `WorkUnit` context :param string workunit_name: An optional name for the work unit; defaults to the `cmd` :param list workunit_labels: An optional sequence of labels for the work unit. :param **kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A tuple of the exit code and the go command that was run. :rtype: (int, :class:`GoDistribution.GoCommand`) """ go_cmd = self.GoCommand._create(self.goroot, cmd, gopath=gopath, args=args) if workunit_factory is None: return go_cmd.spawn(**kwargs).wait() else: name = workunit_name or cmd labels = [WorkUnitLabel.TOOL] + (workunit_labels or []) with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit: process = go_cmd.spawn(env=env, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'), **kwargs) returncode = process.wait() workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) return returncode, go_cmd
jtrobec/pants
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
Python
apache-2.0
7,190
# Copyright 2019,2020,2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def greater_backward(inputs): """ Args: inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function. kwargs (dict of arguments): Dictionary of the corresponding function arguments. Return: list of Variable: Return the gradients wrt inputs of the corresponding function. """ return [None] * len(inputs)
sony/nnabla
python/src/nnabla/backward_function/greater.py
Python
apache-2.0
995
import sys from typing import Any, Text, NoReturn import rasa.shared.utils.io def print_color(*args: Any, color: Text) -> None: output = rasa.shared.utils.io.wrap_with_color(*args, color=color) try: # colorama is used to fix a regression where colors can not be printed on # windows. https://github.com/RasaHQ/rasa/issues/7053 from colorama import AnsiToWin32 stream = AnsiToWin32(sys.stdout).stream print(output, file=stream) except ImportError: print(output) def print_success(*args: Any) -> None: print_color(*args, color=rasa.shared.utils.io.bcolors.OKGREEN) def print_info(*args: Any) -> None: print_color(*args, color=rasa.shared.utils.io.bcolors.OKBLUE) def print_warning(*args: Any) -> None: print_color(*args, color=rasa.shared.utils.io.bcolors.WARNING) def print_error(*args: Any) -> None: print_color(*args, color=rasa.shared.utils.io.bcolors.FAIL) def print_error_and_exit(message: Text, exit_code: int = 1) -> NoReturn: """Print error message and exit the application.""" print_error(message) sys.exit(exit_code)
RasaHQ/rasa_nlu
rasa/shared/utils/cli.py
Python
apache-2.0
1,129
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from lib.exceptions import BossManageError from lib.ssh import SSHConnection, SSHTarget from lib import utils from lib import constants as const from lib import zip from lib import console import botocore import configparser import yaml import glob import os import tempfile import subprocess import shlex import shutil import pwd import pathlib # Location of settings files for ndingest. NDINGEST_SETTINGS_FOLDER = const.repo_path('salt_stack', 'salt', 'ndingest', 'files', 'ndingest.git', 'settings') # Template used for ndingest settings.ini generation. NDINGEST_SETTINGS_TEMPLATE = NDINGEST_SETTINGS_FOLDER + '/settings.ini.apl' def load_lambda_config(lambda_dir): """Load the lambda.yml config file Args: lambda_dir (str): Name of the directory under cloud_formation/lambda/ that contains the lambda.yml file to load Returns: dict: Dictionary of configuration file data """ lambda_config = const.repo_path('cloud_formation', 'lambda', lambda_dir, 'lambda.yml') with open(lambda_config, 'r') as fh: return yaml.full_load(fh.read()) def lambda_dirs(bosslet_config): """Create a mapping of lambda name to lambda directory Note: The lambda directory is the directory in cloud_formation/lambdas/ that contains the lambda.yml for building the lambda's code zip Args: bosslet_config: Bosslet configuration object Returns: dict: Mapping of lambda name to lambda directory """ n = bosslet_config.names # DP NOTE: Values must be the name of a directory under cloud_formation/lambdas/ return { n.multi_lambda.lambda_: 'multi_lambda', n.downsample_volume.lambda_: 'multi_lambda', n.delete_tile_objs.lambda_: 'multi_lambda', n.delete_tile_index_entry.lambda_: 'multi_lambda', n.index_s3_writer.lambda_: 'multi_lambda', n.index_fanout_id_writer.lambda_: 'multi_lambda', n.index_write_id.lambda_: 'multi_lambda', n.index_write_failed.lambda_: 'multi_lambda', n.index_find_cuboids.lambda_: 'multi_lambda', n.index_split_cuboids.lambda_: 'multi_lambda', n.index_fanout_enqueue_cuboid_keys.lambda_: 'multi_lambda', n.index_batch_enqueue_cuboids.lambda_: 'multi_lambda', n.index_fanout_dequeue_cuboid_keys.lambda_: 'multi_lambda', n.index_dequeue_cuboid_keys.lambda_: 'multi_lambda', n.index_get_num_cuboid_keys_msgs.lambda_: 'multi_lambda', n.index_check_for_throttling.lambda_: 'multi_lambda', n.index_invoke_index_supervisor.lambda_: 'multi_lambda', n.index_load_ids_from_s3.lambda_: 'multi_lambda', n.start_sfn.lambda_: 'multi_lambda', n.copy_cuboid_lambda.lambda_: 'multi_lambda', n.cuboid_import_lambda.lambda_: 'multi_lambda', n.volumetric_ingest_queue_upload_lambda.lambda_: 'multi_lambda', n.tile_uploaded.lambda_: 'multi_lambda', n.tile_ingest.lambda_: 'multi_lambda', n.delete_tile_index_entry.lambda_: 'multi_lambda', n.index_s3_writer.lambda_: 'multi_lambda', n.index_fanout_id_writer.lambda_: 'multi_lambda', n.index_write_id.lambda_: 'multi_lambda', n.index_write_failed.lambda_: 'multi_lambda', n.index_find_cuboids.lambda_: 'multi_lambda', n.index_fanout_enqueue_cuboid_keys.lambda_: 'multi_lambda', n.index_batch_enqueue_cuboids.lambda_: 'multi_lambda', n.start_sfn.lambda_: 'multi_lambda', n.index_fanout_dequeue_cuboid_keys.lambda_: 'multi_lambda', n.index_dequeue_cuboid_keys.lambda_: 'multi_lambda', n.index_get_num_cuboid_keys_msgs.lambda_: 'multi_lambda', n.index_check_for_throttling.lambda_: 'multi_lambda', n.index_invoke_index_supervisor.lambda_: 'multi_lambda', n.index_split_cuboids.lambda_: 'multi_lambda', n.index_load_ids_from_s3.lambda_: 'multi_lambda', n.downsample_volume.lambda_: 'multi_lambda', n.copy_cuboid_lambda.lambda_: 'multi_lambda', n.dynamo_lambda.lambda_: 'dynamodb-lambda-autoscale' } def code_zip(bosslet_config, lambda_config): """Get the name of the lambda code zip file DP NOTE: Must match what salt_stack/salt/lambda-dev/files/build_lambda.py does when uploading the results to S3 Args: bosslet_config: Bosslet configuration object lambda_config (dict): Lambda configuration data Returns: str: Name of the lambda code zip file """ return lambda_config['name'] + '.' + bosslet_config.INTERNAL_DOMAIN + '.zip' def get_layer_arns(bosslet_config, layer_dirs): """Lookup the latest version ARNs for the given layers DP NOTE: Must match what salt_stack/salt/lambda-dev/files/build_lambda.py does when creating a Lambda Layer Args: bosslet_config: Bosslet configuration object layer_dirs (list[str]): List of layer_dir names Returns: list[str]: List of Lambda Layer Version ARNs """ client = bosslet_config.session.client('lambda') layers = [] for layer_dir in layer_dirs: layer_config = load_lambda_config(layer_dir) layer_name = (layer_config['name'] + '.' + bosslet_config.INTERNAL_DOMAIN).replace('.', '-') resp = client.list_layer_versions(LayerName=layer_name) arn = resp['LayerVersions'][0]['LayerVersionArn'] layers.append(arn) return layers def s3_config(bosslet_config, lambda_name, lambda_handler): """Look up the configuration information needed by CloudFormationTemplate Used by lib.cloudformation.CloudFormationTemplate.add_lambda if only a lambda handler is defined Args: bosslet_config: Bosslet configuration object lambda_name (str): Full name of the lambda lambda_handler (str): Name of the lambda handler Returns: tuple[tuple[str, str, str], str, optional[list[str]]]: Tuple of arguments for CloudFormationTemplate.add_lambda kwargs s3, runtime, and layers """ lambda_dir = lambda_dirs(bosslet_config)[lambda_name] config = load_lambda_config(lambda_dir) layers = None if config.get('layers'): layers = get_layer_arns(bosslet_config, config['layers']) return ((bosslet_config.LAMBDA_BUCKET, code_zip(bosslet_config, config), lambda_handler), config['runtime'], layers) def update_lambda_code(bosslet_config): """Update all lambdas that use the multilambda zip file. Args: bosslet_config: Bosslet configuration object """ names = bosslet_config.names uses_multilambda = [k for k, v in lambda_dirs(bosslet_config).items() if v == 'multi_lambda'] config = load_lambda_config('multi_lambda') client = bosslet_config.session.client('lambda') for lambda_name in uses_multilambda: try: resp = client.update_function_code( FunctionName=lambda_name, S3Bucket=bosslet_config.LAMBDA_BUCKET, S3Key=code_zip(bosslet_config, config), Publish=True) print(resp) except botocore.exceptions.ClientError as ex: print('Error updating {}: {}'.format(lambda_name, ex)) BUILT_ZIPS = [] def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None): """Package up the lambda files and send them through the lambda build process where the lambda code zip is produced and uploaded to S3 NOTE: This function is also used to build lambda layer code zips, the only requirement for a layer is that the files in the resulting zip should be in the correct subdirectory (`python/` for Python libraries) so that when a lambda uses the layer the libraries included in the layer can be correctly loaded NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to 'multi_lambda' for backwards compatibility Args: bosslet_config (BossConfiguration): Configuration object of the stack the lambda will be deployed into lambda_name (str): Name of the lambda, which will be mapped to the name of the lambda directory that contains the lambda's code lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that contains the `lambda.yml` configuration file for the lambda Raises: BossManageError: If there was a problem with building the lambda code zip or uploading it to the given S3 bucket """ # For backwards compatibility build the multi_lambda code zip if lambda_name is None and lambda_dir is None: lambda_dir = 'multi_lambda' # Map from lambda_name to lambda_dir if needed if lambda_dir is None: try: lambda_dir = lambda_dirs(bosslet_config)[lambda_name] except KeyError: console.error("Cannot build a lambda that doesn't use a code zip file") return None # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built if lambda_dir in BUILT_ZIPS: console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir)) return BUILT_ZIPS.append(lambda_dir) lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir)) lambda_config = lambda_dir / 'lambda.yml' with lambda_config.open() as fh: lambda_config = yaml.full_load(fh.read()) if lambda_config.get('layers'): for layer in lambda_config['layers']: # Layer names should end with `layer` if not layer.endswith('layer'): console.warning("Layer '{}' doesn't conform to naming conventions".format(layer)) load_lambdas_on_s3(bosslet_config, lambda_dir=layer) console.debug("Building {} lambda code zip".format(lambda_dir)) domain = bosslet_config.INTERNAL_DOMAIN tempname = tempfile.NamedTemporaryFile(delete=True) zipname = pathlib.Path(tempname.name + '.zip') tempname.close() console.debug('Using temp zip file: {}'.format(zipname)) cwd = os.getcwd() # Copy the lambda files into the zip for filename in lambda_dir.glob('*'): zip.write_to_zip(str(filename), zipname, arcname=filename.name) # Copy the other files that should be included if lambda_config.get('include'): for src in lambda_config['include']: dst = lambda_config['include'][src] src_path, src_file = src.rsplit('/', 1) os.chdir(const.repo_path(src_path)) # Generate dynamic configuration files, as needed if src_file == 'ndingest.git': with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl: # Generate settings.ini file for ndingest. create_ndingest_settings(bosslet_config, tmpl) zip.write_to_zip(src_file, zipname, arcname=dst) os.chdir(cwd) # Currently any Docker CLI compatible container setup can be used (like podman) CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}' BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}' BUILD_ARGS = { 'DOMAIN': domain, 'BUCKET': bosslet_config.LAMBDA_BUCKET, } # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev # environment instead of the stack's environment. Different maintainer may have different # container commands installed. container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER') lambda_build_server = bosslet_config.LAMBDA_SERVER if lambda_build_server is None: staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging')) if not staging_target.exists(): staging_target.mkdir() console.debug("Copying build zip to {}".format(staging_target)) staging_zip = staging_target / (domain + '.zip') try: zipname.rename(staging_zip) except OSError: # rename only works within the same filesystem # Using the shell version, as using copy + chmod doesn't always work depending on the filesystem utils.run('mv {} {}'.format(zipname, staging_zip), shell=True) # Provide the AWS Region and Credentials (for S3 upload) via environmental variables env_extras = { 'AWS_REGION': bosslet_config.REGION, 'AWS_DEFAULT_REGION': bosslet_config.REGION } if container_executable is None: BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files') CMD = BUILD_CMD.format(**BUILD_ARGS) if bosslet_config.PROFILE is not None: env_extras['AWS_PROFILE'] = bosslet_config.PROFILE console.info("calling build lambda on localhost") else: BUILD_ARGS['PREFIX'] = '/var/task' CMD = BUILD_CMD.format(**BUILD_ARGS) CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable, HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'), RUNTIME = lambda_config['runtime'], CMD = CMD) if bosslet_config.PROFILE is not None: # Cannot set the profile as the container will not have the credentials file # So extract the underlying keys and provide those instead creds = bosslet_config.session.get_credentials() env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key console.info("calling build lambda in {}".format(container_executable)) try: utils.run(CMD, env_extras=env_extras) except Exception as ex: raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex)) finally: os.remove(staging_zip) else: BUILD_ARGS['PREFIX'] = '~' CMD = BUILD_CMD.format(**BUILD_ARGS) lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key) ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user') bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else [] ssh = SSHConnection(ssh_target, bastions) console.debug("Copying build zip to lambda-build-server") target_file = '~/staging/{}.zip'.format(domain) ret = ssh.scp(zipname, target_file, upload=True) console.debug("scp return code: " + str(ret)) os.remove(zipname) console.info("calling build lambda on lambda-build-server") ret = ssh.cmd(CMD) if ret != 0: raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret)) def create_ndingest_settings(bosslet_config, fp): """Create the settings.ini file for ndingest. The file is placed in ndingest's settings folder. Args: domain (str): The VPC's domain name such as integration.boss. fp (file-like object): File like object to read settings.ini template from. """ names = bosslet_config.names parser = configparser.ConfigParser() parser.read_file(fp) parser['boss']['domain'] = bosslet_config.INTERNAL_DOMAIN parser['aws']['region'] = bosslet_config.REGION parser['aws']['tile_bucket'] = names.tile_bucket.s3 parser['aws']['cuboid_bucket'] = names.cuboid_bucket.s3 parser['aws']['tile_index_table'] = names.tile_index.ddb parser['aws']['cuboid_index_table'] = names.s3_index.ddb parser['aws']['max_task_id_suffix'] = str(const.MAX_TASK_ID_SUFFIX) # parser['spdb']['SUPER_CUBOID_SIZE'] = CUBOIDSIZE[0] # ToDo: find way to always get cuboid size from spdb. parser['spdb']['SUPER_CUBOID_SIZE'] = '512, 512, 16' with open(NDINGEST_SETTINGS_FOLDER + '/settings.ini', 'w') as out: parser.write(out) def freshen_lambda(bosslet_config, lambda_name): """ Tell a lambda to reload its code from S3. Useful when developing and small changes need to be made to a lambda function, but a full rebuild of the entire zip file isn't required. """ lambda_dir = lambda_dirs(bosslet_config)[lambda_name] lambda_config = load_lambda_config(lambda_dir) zip_name = code_zip(bosslet_config, lambda_config) client = bosslet_config.session.client('lambda') resp = client.update_function_code( FunctionName=lambda_name, S3Bucket=bosslet_config.LAMBDA_BUCKET, S3Key=zip_name, Publish=True) console.info("Updated {} function code".format(lambda_name)) if lambda_config.get('layers'): layer_arns = get_layer_arns(bosslet_config, lambda_config['layers']) resp = client.update_function_configuration(FunctionName=full_name, Layers=layer_arns) console.info("Updated {} layer references".format(lambda_name)) def download_lambda_zip(bosslet_config, lambda_name, path): """ Download the existing multilambda.domain.zip from the S3 bucket. Useful when developing and small changes need to be made to a lambda function, but a full rebuild of the entire zip file isn't required. """ lambda_dir = lambda_dirs(bosslet_config)[lambda_name] lambda_config = load_lambda_config(lambda_dir) s3 = bosslet_config.session.client('s3') def download(zip_name): full_path = os.path.join(path, zip_name) resp = s3.get_object(Bucket=bosslet_config.LAMBDA_BUCKET, Key=zip_name) bytes = resp['Body'].read() with open(full_path , 'wb') as out: out.write(bytes) print('Saved zip to {}'.format(full_path)) download(code_zip(bosslet_config, lambda_config)) if lambda_config.get('layers'): for layer in lambda_config['layers']: layer_config = load_lambda_config(layer) download(code_zip(bosslet_config, layer_config)) def upload_lambda_zip(bosslet_config, path): """ Upload a multilambda.domain.zip to the S3 bucket. Useful when developing and small changes need to be made to a lambda function, but a full rebuild of the entire zip file isn't required. """ s3 = bosslet_config.session.client('s3') with open(path, 'rb') as in_file: resp = s3.put_object(Bucket=bosslet_config.LAMBDA_BUCKET, Key=os.path.basename(path), Body=in_file) print(resp)
jhuapl-boss/boss-manage
lib/lambdas.py
Python
apache-2.0
19,677
import time from django.http import * import os, logging import sys from vt_manager_kvm.models import * from vt_manager_kvm.models.Action import Action as ActionModel from vt_manager_kvm.controller import * from vt_manager_kvm.controller.actions.ActionController import ActionController from vt_manager_kvm.controller.dispatchers.xmlrpc.DispatcherLauncher import DispatcherLauncher from vt_manager_kvm.controller.dispatchers.xmlrpc.InformationDispatcher import InformationDispatcher from vt_manager_kvm.communication.utils.XmlHelper import * from vt_manager_kvm.utils.ServiceThread import * from vt_manager_kvm.common.rpc4django import rpcmethod from vt_manager_kvm.common.rpc4django import * from threading import Thread #TODO: Provisional import to make test with VTPlanner. Use SFA API whe stable #from vt_manager_kvm.communication.sfa.managers.AggregateManager import AggregateManager # Used in ListResources for external monitoring (e.g. FELIX monitoring) from vt_manager_kvm.communication.geni.v3.configurators.handlerconfigurator import HandlerConfigurator #XXX: Sync Thread for VTPlanner from vt_manager_kvm.utils.ServiceProcess import ServiceProcess from django.conf import settings from vt_manager_kvm.controller.drivers.VTDriver import VTDriver from threading import Timer from vt_manager_kvm.controller.actions.ActionController import ActionController @rpcmethod(url_name="plugin") def send(callBackUrl, xml): try: print "Recieved RSpec", xml logging.debug("XML RECEIVED: \n%s" % xml) rspec = XmlHelper.parseXmlString(xml) except Exception as e: logging.error("send() could not parse\n") logging.error(e) return ServiceThread.startMethodInNewThread(DispatcherLauncher.processXmlQuery ,rspec, url = callBackUrl) return @rpcmethod(url_name="plugin") def send_sync(callBackUrl=None, xml=None): callBackUrl = "https://" + settings.XMLRPC_USER + ":" + settings.XMLRPC_PASS + "@" + settings.VTAM_IP + ":" + settings.VTAM_PORT + "/xmlrpc/plugin" try: logging.debug("XML RECEIVED: \n%s" % xml) rspec = XmlHelper.parseXmlString(xml) except Exception as e: logging.error("sendSync() could not parse \n") logging.error(e) return #SyncThread.startMethodAndJoin(DispatcherLauncher.processXmlQuerySync, rspec, url = callBackUrl) #ServiceThread.startMethodInNewThread(DispatcherLauncher.processXmlQuery ,rspec, url = callBackUrl) exception = False ErrorMsg = "" count = 0 timeout = 10*60 #timeout set at 10 minutes try: ServiceProcess.startMethodInNewProcess(DispatcherLauncher.processXmlQuerySync ,[rspec, callBackUrl], None) actionModel = None except Exception as e: ErrorMsg = e exception = True while True and not exception: time.sleep(5) try: actionModel = ActionModel.objects.get(uuid=rspec.query.provisioning.action[0].id) print actionModel.getStatus() if actionModel.getStatus() == "SUCCESS": return True elif actionModel.getStatus() in ["FAILED", "UNKNOWN"]: return "The creation of the VM has FAILED" except Exception as e: return "An error has ocurred during the VM creation ", e if count < timeout: count += 5 else: return "TIMEOUT" if exception: return ErrorMsg @rpcmethod(url_name="plugin") def ping(challenge): return challenge @rpcmethod(url_name="plugin") def list_vm_templates(server_uuid): # callback_url = "https://" + settings.XMLRPC_USER + ":" + settings.XMLRPC_PASS + "@" + settings.VTAM_IP + ":" + settings.VTAM_PORT + "/xmlrpc/plugin" # try: # ServiceProcess.startMethodInNewProcess(DispatcherLauncher.processTemplateList, [server_uuid, callback_url], None) # except Exception as e: # logging.error("Could not retrieve VM templates info: " + e) v,s = getattr(DispatcherLauncher,"processVMTemplatesInfo")(server_uuid) return v,s @rpcmethod(url_name="plugin") def listResources(remoteHashValue, projectUUID = 'None', sliceUUID ='None'): v,s = getattr (DispatcherLauncher,"processInformation")(remoteHashValue, projectUUID, sliceUUID) return v,s @rpcmethod(url_name="plugin") def ListResources(): rspec_manager = HandlerConfigurator.get_vt_am_rspec_manager() driver = HandlerConfigurator.get_vt_am_driver() resources_data = driver.get_all_servers() return rspec_manager.compose_advertisement(resources_data) @rpcmethod(url_name="plugin") def ListResourcesAndNodes(slice_urn='None'): am = AggregateManager() options = dict() if not slice_urn == 'None': options = {"geni_slice_urn":slice_urn} print '-----------OPTIONS',options return AggregateManager().ListResources(options) @rpcmethod(url_name="plugin") def force_update_exp_vms(client_id='None', vm_id='None'): if client_id != "None": client_id = VTServer.objects.get(uuid = client_id).id if vm_id != "None": vm_id = VirtualMachine.objects.get(uuid= vm_id).id return InformationDispatcher.forceListActiveVMs(client_id, vm_id) from threading import Timer from vt_manager_kvm.controller.actions.ActionController import ActionController def check(rspec): actionModel = ActionController.ActionToModel(rspec.query.provisioning.action[0],"provisioning") if actionModel.getAction(rspec.query.provisioning.action[0]).status == actionModel.SUCCESS_STATUS or actionModel.getAction(rspec.query.provisioning.action[0]).status == actionModel.FAILED_STATUS: return True @rpcmethod(url_name="plugin") def get_ocf_am_version(): # sv = open('../../../../../.currentVersion','r') import os sv = open(os.path.join(settings.SRC_DIR, "..", ".currentVersion"),"r") software_version = sv.read().strip() sv.close() return software_version @rpcmethod(url_name="plugin") def get_am_info(args=None): # INFO: add as many keys as you wish info = dict() info["version"] = get_ocf_am_version() return info def timeout_handler(signum, frame): raise Exception("TIMEOUT")
ict-felix/stack
vt_manager_kvm/src/python/vt_manager_kvm/communication/northCommInterface.py
Python
apache-2.0
6,020
import json import time import boto3 ddb = boto3.client('dynamodb') class CacheController: def __init__(self, CacheTime): self.CacheTime = CacheTime self.last_update_time = 0.0 self.value = None def is_expired(self): return (time.time() > (self.last_update_time + self.CacheTime)) def update_value(self, value): print "Updating cache from DynamoDB" self.value = value self.last_update_time = time.time() return self.value def get_value(self): return self.value cache = CacheController(5.0) # List all non-default channels (the client knows what the default channel is # called. def handler(event, context): try: print "X-Forwarded-For %s %s" % (repr(time.time()), event['InputParams']['header']['X-Forwarded-For'].replace(',','')) except: pass if 'MessagesTable' not in event or 'ParticipantsTable' not in event: return [] # If it's time to get a new value. if cache.is_expired(): result = ddb.query(TableName=event['MessagesTable'], KeyConditionExpression='contextkey = :k', ExpressionAttributeValues={':k': {'S': 'meta:channels'}}) cache.update_value(result) else: result = cache.get_value() try: return {'channels': [i['channel_name']['S'] for i in result['Items'] if 'channel_name' in i ]} except: return {'channels': []}
Riebart/slot-c
Lambda/GetChannels.py
Python
apache-2.0
1,588
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-07 14:57 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('smartshark', '0004_auto_20160603_1549'), ] operations = [ migrations.CreateModel( name='Argument', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('required', models.BooleanField()), ('position', models.IntegerField()), ('type', models.CharField(choices=[('install', 'Installation Argument'), ('execute', 'Execution Argument')], max_length=7)), ], ), migrations.CreateModel( name='Plugin', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('author', models.CharField(max_length=200)), ('version', models.CharField(max_length=50)), ('abstraction_level', models.CharField(choices=[('rev', 'Revision'), ('repo', 'Repository'), ('other', 'Other')], max_length=5)), ('definition', models.FileField(upload_to='uploads/')), ('schema', models.FileField(upload_to='uploads/')), ('active', models.BooleanField()), ('installed', models.BooleanField()), ('arguments', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='smartshark.Argument')), ('requires', models.ManyToManyField(related_name='_plugin_requires_+', to='smartshark.Plugin')), ], ), migrations.CreateModel( name='PluginExecution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('added_at', models.DateTimeField(auto_now_add=True)), ('submission_value', models.CharField(blank=True, max_length=300)), ('status', models.CharField(choices=[('queue', 'In Queue'), ('running', 'Running'), ('finished', 'Finished'), ('error', 'Error')], max_length=8)), ('plugin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='smartshark.Plugin')), ], ), migrations.AlterField( model_name='smartsharkuser', name='roles', field=models.ManyToManyField(blank=True, to='smartshark.MongoRole'), ), ]
smartshark/serverSHARK
smartshark/migrations/0005_auto_20160607_1657.py
Python
apache-2.0
2,747
# Copyright (C) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Policy based configuration of libvirt objects This module provides helper APIs for populating the config.py classes based on common operational needs / policies """ def set_vif_guest_frontend_config(conf, mac, model, driver): """Populate a LibvirtConfigGuestInterface instance with guest frontend details. """ conf.mac_addr = mac if model is not None: conf.model = model if driver is not None: conf.driver_name = driver def set_vif_host_backend_bridge_config(conf, brname, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for a software bridge. """ conf.net_type = "bridge" conf.source_dev = brname if tapname: conf.target_dev = tapname conf.script = "" def set_vif_host_backend_ethernet_config(conf, tapname): """Populate a LibvirtConfigGuestInterface instance with host backend details for an externally configured host device. NB use of this configuration is discouraged by libvirt project and will mark domains as 'tainted'. """ conf.net_type = "ethernet" conf.target_dev = tapname conf.script = "" def set_vif_host_backend_vhostuser_config(conf, mode, path=None): """Populate a LibvirtConfigGuestInterface instance with vhostuser socket details """ conf.net_type = "vhostuser" # unix is the only supported type in libvirt conf.vhostuser_type = "unix" conf.vhostuser_path = path or "/var/lib/libvirt/qemu/vhostuser" conf.vhostuser_mode = mode def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an OpenVSwitch bridge. """ conf.net_type = "bridge" conf.source_dev = brname conf.vporttype = "openvswitch" conf.add_vport_param("interfaceid", interfaceid) if tapname: conf.target_dev = tapname conf.script = "" def set_vif_host_backend_802qbg_config(conf, devname, managerid, typeid, typeidversion, instanceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbg device. """ conf.net_type = "direct" conf.source_dev = devname conf.source_mode = "vepa" conf.vporttype = "802.1Qbg" conf.add_vport_param("managerid", managerid) conf.add_vport_param("typeid", typeid) conf.add_vport_param("typeidversion", typeidversion) conf.add_vport_param("instanceid", instanceid) if tapname: conf.target_dev = tapname def set_vif_host_backend_802qbh_config(conf, devname, profileid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbh device. """ conf.net_type = "direct" conf.source_dev = devname conf.source_mode = "vepa" conf.vporttype = "802.1Qbh" conf.add_vport_param("profileid", profileid) if tapname: conf.target_dev = tapname def set_vif_host_backend_direct_config(conf, devname): """Populate a LibvirtConfigGuestInterface instance with direct Interface. """ conf.net_type = "direct" conf.source_mode = "passthrough" conf.source_dev = devname conf.model = "virtio" def set_vif_bandwidth_config(conf, inst_type): """Config vif inbound/outbound bandwidth limit. parameters are set in instance_type_extra_specs table, key is in the format quota:vif_inbound_average. """ bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak', 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak', 'vif_outbound_burst'] for key, value in inst_type.get('extra_specs', {}).iteritems(): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in bandwidth_items: setattr(conf, scope[1], value)
virtualopensystems/nova
nova/virt/libvirt/designer.py
Python
apache-2.0
4,624
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the # License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. """ Unit tests for dcc client. """ import unittest from baidubce.auth.bce_credentials import BceCredentials from baidubce.bce_client_configuration import BceClientConfiguration from baidubce.services.dcc import dcc_client class TestDccClient(unittest.TestCase): """ unit test """ def setUp(self): """ set up """ HOST = 'dcc.api-sandbox.baidu.com' AK = '4f4b13eda66e42e29225bb02d9193a48' SK = '507b4a729f6a44feab398a6a5984304d' config = BceClientConfiguration(credentials=BceCredentials(AK, SK), endpoint=HOST) self.the_client = dcc_client.DccClient(config) def tearDown(self): """ tear down """ self.the_client = None def test_list_dedicated_hosts(self): """ test case for list dedicatedHosts """ print self.the_client.list_dedicated_hosts() def test_get_dedicated_host(self): """ test case for get dedicatedHost """ self.the_client.get_dedicated_host('d-MPgs6jPr') if __name__ == "__main__": suite = unittest.TestSuite() suite.addTest(TestDccClient("test_list_dedicated_hosts")) # suite.addTest(TestDccClient("test_get_dedicated_host")) runner = unittest.TextTestRunner() runner.run(suite)
baidubce/bce-sdk-python
test/dcc/test_dcc_client.py
Python
apache-2.0
1,903
######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############ import os import tempfile from shutil import copy, copytree from cloudify._compat import urlparse from . import utils from .exceptions import CloudifyCliError from .constants import DEFAULT_BLUEPRINT_PATH ICON_FILENAME = 'icon.png' def get(source, blueprint_filename=DEFAULT_BLUEPRINT_PATH, icon_path=None, download=False): """Get a source and return a directory containing the blueprint The behavior based on then source argument content is: - local archive: extract it locally and return path blueprint file - local yaml file: return the file - URL: - return it (download=False) - download and get blueprint from downloaded file (download=True) - github repo: - map it to a URL and return it (download=False) - download and get blueprint from downloaded file (download=True) Supported archive types are: zip, tar, tar.gz and tar.bz2 :param source: Path/URL/github repo to archive/blueprint file :type source: str :param blueprint_filename: Path to blueprint (if source is an archive file) :type blueprint_filename: str :param icon_path: Path to blueprint's icon file :type icon_path: str :param download: Download blueprint file if source is URL/github repo :type download: bool :return: Path to file (if archive/blueprint file passsed) or url :rtype: str """ # Cope with windows (where paths always have a scheme) if urlparse(source).scheme and not os.path.exists(source): if download: downloaded_file = utils.download_file(source) return _get_blueprint_file_from_archive( downloaded_file, blueprint_filename, icon_path) return source elif os.path.isfile(source): if utils.is_archive(source): return _get_blueprint_file_from_archive( source, blueprint_filename, icon_path) elif icon_path: return _get_blueprint_file_with_icon(source, icon_path) else: # Maybe check if yaml. If not, verified by dsl parser return source elif len(source.split('/')) == 2: url = _map_to_github_url(source) if download: downloaded_file = utils.download_file(url) return _get_blueprint_file_from_archive( downloaded_file, blueprint_filename, icon_path) return url else: raise CloudifyCliError( 'You must provide either a path to a local file, a remote URL ' 'or a GitHub `organization/repository[:tag/branch]`') def _get_blueprint_file_from_archive(archive, blueprint_filename, icon_path): """Extract archive to temporary location and get path to blueprint file. :param archive: Path to archive file :type archive: str :param blueprint_filename: Path to blueprint file relative to archive :type blueprint_filename: str :param icon_path: Absolute path to blueprint's icon :type icon_path: str :return: Absolute path to blueprint file :rtype: str """ extract_directory = utils.extract_archive(archive) blueprint_directory = os.path.join( extract_directory, os.listdir(extract_directory)[0], ) blueprint_file = os.path.join(blueprint_directory, blueprint_filename) if not os.path.isfile(blueprint_file): raise CloudifyCliError( 'Could not find `{0}`. Please provide the name of the main ' 'blueprint file by using the `-n/--blueprint-filename` flag' .format(blueprint_filename)) if icon_path: icon_file = os.path.join(blueprint_directory, ICON_FILENAME) copy(icon_path, icon_file) return blueprint_file def _get_blueprint_file_with_icon(blueprint_path, icon_path): """Create a temporary directory with a blueprint file and its icon. :param blueprint_path: Absolute path to the blueprint file :type blueprint_path: str :param icon_path: Absolute path to blueprint's icon :type icon_path: str :return: Absolute path to blueprint file :rtype: str """ source, blueprint_filename = os.path.split(blueprint_path) blueprint_directory = os.path.join(tempfile.mkdtemp(), blueprint_filename.rpartition('.')[0]) copytree(source, blueprint_directory) copy(icon_path, os.path.join(blueprint_directory, ICON_FILENAME)) return os.path.join(blueprint_directory, blueprint_filename) def _map_to_github_url(source): """Returns a path to a downloaded github archive. :param source: github repo in the format of `org/repo[:tag/branch]`. :type source: str :return: URL to the archive file for the given repo in github :rtype: str """ source_parts = source.split(':', 1) repo = source_parts[0] tag = source_parts[1] if len(source_parts) == 2 else 'master' url = 'https://github.com/{0}/archive/{1}.tar.gz'.format(repo, tag) return url def generate_id(blueprint_path, blueprint_filename=DEFAULT_BLUEPRINT_PATH): """The name of the blueprint will be the name of the folder. If blueprint_filename is provided, it will be appended to the folder. """ blueprint_id = os.path.split(os.path.dirname(os.path.abspath( blueprint_path)))[-1] if not blueprint_filename == DEFAULT_BLUEPRINT_PATH: filename, _ = os.path.splitext(os.path.basename(blueprint_filename)) blueprint_id = (blueprint_id + '.' + filename) return blueprint_id.replace('_', '-') def get_blueprint_path_and_id(blueprint_path, blueprint_filename, blueprint_id): processed_blueprint_path = get(blueprint_path, blueprint_filename, download=True) blueprint_id = blueprint_id or generate_id(processed_blueprint_path, blueprint_filename) return processed_blueprint_path, blueprint_id
cloudify-cosmo/cloudify-cli
cloudify_cli/blueprint.py
Python
apache-2.0
6,696
from .default import * DEBUG = True ALLOWED_HOSTS = ['*'] SITE_ID = os.environ.get("SITE_ID", 1) SITE_HEADER = 'Example Header' PROJECT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..')) LOGIN_URL = '/admin/login' # TEMPLATES[0].update({'DIRS': [os.path.join(BASE_DIR, 'templates'), ]}) # TEMPLATES[0]['OPTIONS']['context_processors'].append('config.context_processors.version_tag') FIXTURE_DIRS = ['config/fixtures'] STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] FORMAT_MODULE_PATH = 'config.formats' LANGUAGES = ( ('en', 'English'), ('af', 'Afrikaans'), ('zh-hans', '简体中文'), ) STATIC_URL = '/static/' STATIC_ROOT = os.path.join(PROJECT_DIR, 'static') MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media') MIDDLEWARE_CLASSES += [ 'django.contrib.sites.middleware.CurrentSiteMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', # 'axes.middleware.FailedLoginMiddleware', ] # Not recommended at moment: https://github.com/etianen/django-reversion/issues/496 # MIDDLEWARE_CLASSES = ['reversion.middleware.RevisionMiddleware', ] + MIDDLEWARE_CLASSES DJANGO_CONTRIB = [ 'django.contrib.flatpages', 'django.contrib.sites', ] EXTENSIONS = [ 'import_export', 'rest_framework', 'reversion', # 'debug_toolbar', ] PROJECT_APPS = [ 'fact_book.apps.AdminConfig', 'example.apps.ExampleConfig', ] INSTALLED_APPS = INSTALLED_APPS + DJANGO_CONTRIB + EXTENSIONS + PROJECT_APPS # REST_FRAMEWORK = { # # Use Django's standard `django.contrib.auth` permissions, # # or allow read-only access for unauthenticated users. # 'DEFAULT_PERMISSION_CLASSES': [ # 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' # ] # } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ.get('POSTGRES_DB', 'postgres'), 'USER': os.environ.get('POSTGRES_USER', 'postgres'), 'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'postgres'), 'HOST': os.environ.get('POSTGRES_HOSTNAME', 'postgres'), 'PORT': os.environ.get('POSTGRES_PORT_PORT', '5432') } }
obitec/django-factbook
tests/config/settings.py
Python
apache-2.0
2,234
# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.agent import dhcp_agent from neutron.cmd.sanity import checks from neutron.common import config from neutron.conf.db import l3_hamode_db LOG = logging.getLogger(__name__) def setup_conf(): cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.' 'agent.common.config') cfg.CONF.import_group('ml2', 'neutron.conf.plugins.ml2.config') cfg.CONF.import_group('SECURITYGROUP', 'neutron.agent.securitygroups_rpc') dhcp_agent.register_options(cfg.CONF) cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) class BoolOptCallback(cfg.BoolOpt): def __init__(self, name, callback, **kwargs): if 'default' not in kwargs: kwargs['default'] = False self.callback = callback super(BoolOptCallback, self).__init__(name, **kwargs) def check_ovs_vxlan(): result = checks.ovs_vxlan_supported() if not result: LOG.error('Check for Open vSwitch VXLAN support failed. ' 'Please ensure that the version of openvswitch ' 'being used has VXLAN support.') return result def check_ovs_geneve(): result = checks.ovs_geneve_supported() if not result: LOG.error('Check for Open vSwitch Geneve support failed. ' 'Please ensure that the version of openvswitch ' 'and kernel being used has Geneve support.') return result def check_iproute2_vxlan(): result = checks.iproute2_vxlan_supported() if not result: LOG.error('Check for iproute2 VXLAN support failed. Please ensure ' 'that the iproute2 has VXLAN support.') return result def check_ovs_patch(): result = checks.patch_supported() if not result: LOG.error('Check for Open vSwitch patch port support failed. ' 'Please ensure that the version of openvswitch ' 'being used has patch port support or disable features ' 'requiring patch ports (gre/vxlan, etc.).') return result def check_read_netns(): required = checks.netns_read_requires_helper() if not required and cfg.CONF.AGENT.use_helper_for_ns_read: LOG.warning("The user that is executing neutron can read the " "namespaces without using the root_helper. Disable " "the use_helper_for_ns_read option to avoid a " "performance impact.") # Don't fail because nothing is actually broken. Just not optimal. result = True elif required and not cfg.CONF.AGENT.use_helper_for_ns_read: LOG.error("The user that is executing neutron does not have " "permissions to read the namespaces. Enable the " "use_helper_for_ns_read configuration option.") result = False else: # everything is configured appropriately result = True return result # NOTE(ihrachyshka): since the minimal version is currently capped due to # missing hwaddr matching in dnsmasq < 2.67, a better version of the check # would actually start dnsmasq server and issue a DHCP request using a IPv6 # DHCP client. def check_dnsmasq_version(): result = checks.dnsmasq_version_supported() if not result: LOG.error('The installed version of dnsmasq is too old. ' 'Please update to at least version %s.', checks.get_minimal_dnsmasq_version_supported()) return result def check_keepalived_ipv6_support(): result = checks.keepalived_ipv6_supported() if not result: LOG.error('The installed version of keepalived does not support ' 'IPv6. Please update to at least version 1.2.10 for ' 'IPv6 support.') return result def check_dibbler_version(): result = checks.dibbler_version_supported() if not result: LOG.error('The installed version of dibbler-client is too old. ' 'Please update to at least version %s.', checks.get_minimal_dibbler_version_supported()) return result def check_nova_notify(): result = checks.nova_notify_supported() if not result: LOG.error('Nova notifications are enabled, but novaclient is not ' 'installed. Either disable nova notifications or ' 'install python-novaclient.') return result def check_arp_responder(): result = checks.arp_responder_supported() if not result: LOG.error('Check for Open vSwitch ARP responder support failed. ' 'Please ensure that the version of openvswitch ' 'being used has ARP flows support.') return result def check_arp_header_match(): result = checks.arp_header_match_supported() if not result: LOG.error('Check for Open vSwitch support of ARP header matching ' 'failed. ARP spoofing suppression will not work. A ' 'newer version of OVS is required.') return result def check_icmpv6_header_match(): result = checks.icmpv6_header_match_supported() if not result: LOG.error('Check for Open vSwitch support of ICMPv6 header ' 'matching failed. ICMPv6 Neighbor Advt spoofing (part ' 'of arp spoofing) suppression will not work. A newer ' 'version of OVS is required.') return result def check_vf_management(): result = checks.vf_management_supported() if not result: LOG.error('Check for VF management support failed. ' 'Please ensure that the version of ip link ' 'being used has VF support.') return result def check_vf_extended_management(): result = checks.vf_extended_management_supported() if not result: LOG.error('Check for VF extended management support failed. ' 'Please ensure that the version of ip link ' 'being used has VF extended support: version ' '"iproute2-ss140804", git tag "v3.16.0"') return result def check_ovsdb_native(): cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS') result = checks.ovsdb_native_supported() if not result: LOG.error('Check for native OVSDB support failed.') return result def check_ovs_conntrack(): result = checks.ovs_conntrack_supported() if not result: LOG.error('Check for Open vSwitch support of conntrack support ' 'failed. OVS/CT firewall will not work. A newer ' 'version of OVS (2.5+) and linux kernel (4.3+) are ' 'required. See ' 'https://github.com/openvswitch/ovs/blob/master/FAQ.md ' 'for more information.') return result def check_ebtables(): result = checks.ebtables_supported() if not result: LOG.error('Cannot run ebtables. Please ensure that it ' 'is installed.') return result def check_ipset(): result = checks.ipset_supported() if not result: LOG.error('Cannot run ipset. Please ensure that it ' 'is installed.') return result def check_ip6tables(): result = checks.ip6tables_supported() if not result: LOG.error('Cannot run ip6tables. Please ensure that it ' 'is installed.') return result def check_conntrack(): result = checks.conntrack_supported() if not result: LOG.error('Cannot run conntrack. Please ensure that it ' 'is installed.') return result def check_dhcp_release6(): result = checks.dhcp_release6_supported() if not result: LOG.error('No dhcp_release6 tool detected. The installed version ' 'of dnsmasq does not support releasing IPv6 leases. ' 'Please update to at least version %s if you need this ' 'feature. If you do not use IPv6 stateful subnets you ' 'can continue to use this version of dnsmasq, as ' 'other IPv6 address assignment mechanisms besides ' 'stateful DHCPv6 should continue to work without ' 'the dhcp_release6 utility. ' 'Current version of dnsmasq is ok if other checks ' 'pass.', checks.get_dnsmasq_version_with_dhcp_release6()) return result def check_bridge_firewalling_enabled(): result = checks.bridge_firewalling_enabled() if not result: LOG.error('Bridge firewalling is not enabled. It may be the case ' 'that bridge and/or br_netfilter kernel modules are not ' 'loaded. Alternatively, corresponding sysctl settings ' 'may be overridden to disable it by default.') return result def check_ip_nonlocal_bind(): result = checks.ip_nonlocal_bind() if not result: LOG.error('This kernel does not isolate ip_nonlocal_bind kernel ' 'option in namespaces. Please update to kernel ' 'version > 3.19.') return result # Define CLI opts to test specific features, with a callback for the test OPTS = [ BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False, help=_('Check for OVS vxlan support')), BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False, help=_('Check for OVS Geneve support')), BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False, help=_('Check for iproute2 vxlan support')), BoolOptCallback('ovs_patch', check_ovs_patch, default=False, help=_('Check for patch port support')), BoolOptCallback('nova_notify', check_nova_notify, help=_('Check for nova notification support')), BoolOptCallback('arp_responder', check_arp_responder, help=_('Check for ARP responder support')), BoolOptCallback('arp_header_match', check_arp_header_match, help=_('Check for ARP header match support')), BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match, help=_('Check for ICMPv6 header match support')), BoolOptCallback('vf_management', check_vf_management, help=_('Check for VF management support')), BoolOptCallback('vf_extended_management', check_vf_extended_management, help=_('Check for VF extended management support')), BoolOptCallback('read_netns', check_read_netns, help=_('Check netns permission settings')), BoolOptCallback('dnsmasq_version', check_dnsmasq_version, help=_('Check minimal dnsmasq version'), deprecated_for_removal=True, deprecated_since='Pike'), BoolOptCallback('ovsdb_native', check_ovsdb_native, help=_('Check ovsdb native interface support')), BoolOptCallback('ovs_conntrack', check_ovs_conntrack, help=_('Check ovs conntrack support')), BoolOptCallback('ebtables_installed', check_ebtables, help=_('Check ebtables installation')), BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support, help=_('Check keepalived IPv6 support')), BoolOptCallback('dibbler_version', check_dibbler_version, help=_('Check minimal dibbler version'), deprecated_for_removal=True, deprecated_since='Pike'), BoolOptCallback('ipset_installed', check_ipset, help=_('Check ipset installation')), BoolOptCallback('ip6tables_installed', check_ip6tables, help=_('Check ip6tables installation')), BoolOptCallback('conntrack_installed', check_conntrack, help=_('Check conntrack installation')), BoolOptCallback('dhcp_release6', check_dhcp_release6, help=_('Check dhcp_release6 installation')), BoolOptCallback('bridge_firewalling', check_bridge_firewalling_enabled, help=_('Check bridge firewalling'), default=False), BoolOptCallback('ip_nonlocal_bind', check_ip_nonlocal_bind, help=_('Check ip_nonlocal_bind kernel option works with ' 'network namespaces.'), default=False), ] def enable_tests_from_config(): """If a test can depend on configuration, use this function to set the appropriate CLI option to enable that test. It will then be possible to run all necessary tests, just by passing in the appropriate configs. """ cfg.CONF.set_default('vf_management', True) cfg.CONF.set_default('arp_header_match', True) cfg.CONF.set_default('icmpv6_header_match', True) if 'vxlan' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_vxlan', True) if 'geneve' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_geneve', True) if ('vxlan' in cfg.CONF.ml2.type_drivers or cfg.CONF.VXLAN.enable_vxlan): cfg.CONF.set_default('iproute2_vxlan', True) if cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_patch', True) if not cfg.CONF.OVS.use_veth_interconnection: cfg.CONF.set_default('ovs_patch', True) if (cfg.CONF.notify_nova_on_port_status_changes or cfg.CONF.notify_nova_on_port_data_changes): cfg.CONF.set_default('nova_notify', True) if cfg.CONF.AGENT.arp_responder: cfg.CONF.set_default('arp_responder', True) if not cfg.CONF.AGENT.use_helper_for_ns_read: cfg.CONF.set_default('read_netns', True) if cfg.CONF.OVS.ovsdb_interface == 'native': cfg.CONF.set_default('ovsdb_native', True) if cfg.CONF.l3_ha: cfg.CONF.set_default('keepalived_ipv6_support', True) cfg.CONF.set_default('ip_nonlocal_bind', True) if cfg.CONF.SECURITYGROUP.enable_ipset: cfg.CONF.set_default('ipset_installed', True) if cfg.CONF.SECURITYGROUP.enable_security_group: cfg.CONF.set_default('ip6tables_installed', True) if ('sriovnicswitch' in cfg.CONF.ml2.mechanism_drivers and 'qos' in cfg.CONF.ml2.extension_drivers): cfg.CONF.set_default('vf_extended_management', True) if cfg.CONF.SECURITYGROUP.firewall_driver in ( 'iptables', 'iptables_hybrid', ('neutron.agent.linux.iptables_firewall.' 'IptablesFirewallDriver'), ('neutron.agent.linux.iptables_firewall.' 'OVSHybridIptablesFirewallDriver'), ): cfg.CONF.set_default('bridge_firewalling', True) def all_tests_passed(): return all(opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)) def main(): setup_conf() cfg.CONF.register_cli_opts(OPTS) cfg.CONF.set_override('use_stderr', True) config.setup_logging() config.init(sys.argv[1:], default_config_files=[]) if cfg.CONF.config_file: enable_tests_from_config() return 0 if all_tests_passed() else 1
eayunstack/neutron
neutron/cmd/sanity_check.py
Python
apache-2.0
16,099
from django.db import models class A(models.Model): col = models.CharField(max_length=10, null=False, default="empty")
3YOURMIND/django-migration-linter
tests/test_project/app_make_not_null_with_django_default/models.py
Python
apache-2.0
125
""" Django settings for pyladiesdc project. Generated by 'django-admin startproject' using Django 1.8. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '*o*5^a68*bljy4xxv^)p)-s6d07oviu*kxeun2hfse+yqe+b_v' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'jobtranslations', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'pyladiesdc.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'pyladiesdc.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/New_York' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static')
PyLadiesDC/job-translations
pyladiesdc/settings.py
Python
apache-2.0
2,731
"""Builds the Adience network. Summary of available functions: # Compute input images and labels for training. If you would like to run # evaluations, use input() instead. inputs, labels = distorted_inputs() # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import re import sys import tarfile import tensorflow.python.platform from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf import adience_input from tensorflow.python.platform import gfile ad_input = adience_input.DataInput() ad_input.read_from_txt() FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer('batch_size', 32, """Number of images to process in a batch.""") tf.app.flags.DEFINE_string('data_dir', 'data/aligned', """Path to the CIFAR-10 data directory.""") # Process images of this size. Note that this differs from the original CIFAR # image size of 32 x 32. If one alters this number, then the entire model # architecture will change and any model would need to be retrained. IMAGE_SIZE = 64 # Global constants describing the CIFAR-10 data set. NUM_CLASSES = 2 NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 0 #change it when reading input data (in distorded inputs) NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 0 # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. # If a model is trained with multiple GPU's prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer) return var def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev)) if wd: weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def _generate_image_and_label_batch(image, label, min_queue_examples): """Construct a queued batch of images and labels. Args: image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32. label: 1-D Tensor of type.int32 min_queue_examples: int32, minimum number of samples to retain in the queue that provides of batches of examples. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ # Create a queue that shuffles the examples, and then # read 'FLAGS.batch_size' images + labels from the example queue. num_preprocess_threads = 16 images, label_batch = tf.train.shuffle_batch( [image, label], batch_size=FLAGS.batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * FLAGS.batch_size, min_after_dequeue=min_queue_examples) # Display the training images in the visualizer. tf.image_summary('images', images) return images, tf.reshape(label_batch, [FLAGS.batch_size]) def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Raises: ValueError: if no data_dir Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ # filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin', # 'data_batch_%d.bin' % i) # for i in xrange(1, 5)] # for f in filenames: # if not gfile.Exists(f): # raise ValueError('Failed to find file: ' + f) #ad_input.read_adience() #change if you want to go to cross-fold # global NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = len(ad_input.train_string_que) # Create a queue that produces the filenames to read. #filename_queue = tf.train.string_input_producer(filenames) # Read examples from files in the filename queue. read_input = ad_input.read_adience() reshaped_image = tf.cast(read_input.dec_image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE # Image processing for training the network. Note the many random # distortions applied to the image. # Randomly crop a [height, width] section of the image. distorted_image = tf.image.random_crop(reshaped_image, [height, width]) # Randomly flip the image horizontally. distorted_image = tf.image.random_flip_left_right(distorted_image) # Because these operations are not commutative, consider randomizing # randomize the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8) # Subtract off the mean and divide by the variance of the pixels. float_image = tf.image.per_image_whitening(distorted_image) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue) print ('Filling queue with %d Adience images before starting to train. ' 'This will take a few minutes.' % min_queue_examples) # Generate a batch of images and labels by building up a queue of examples. return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples) def inputs(eval_data): print("\neval inputs adience called") """Construct input for Adience evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Raises: ValueError: if no data_dir Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ global NUM_EXAMPLES_PER_EPOCH_FOR_EVAL NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = len(ad_input.eval_string_que) #TODO: # if not eval_data: # filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin', # 'data_batch_%d.bin' % i) # for i in xrange(1, 5)] # num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # else: # filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin', # 'test_batch.bin')] # num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL # # for f in filenames: # if not gfile.Exists(f): # raise ValueError('Failed to find file: ' + f) # Create a queue that produces the filenames to read. #filename_queue = tf.train.string_input_producer(filenames) # Read examples from files in the filename queue. read_input = ad_input.read_adience_eval() reshaped_image = tf.cast(read_input.dec_image, tf.float32) print("reshaped image eval") height = IMAGE_SIZE width = IMAGE_SIZE # Image processing for evaluation. # Crop the central [height, width] of the image. resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height) print("image resized") # Subtract off the mean and divide by the variance of the pixels. float_image = tf.image.per_image_whitening(resized_image) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL * min_fraction_of_examples_in_queue) print("eval inputs adience done") # Generate a batch of images and labels by building up a queue of examples. return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples) def inference(images): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits. """ # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=1e-4, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope.name) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=1e-4, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope.name) _activation_summary(conv2) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. dim = 1 for d in pool2.get_shape()[1:].as_list(): dim *= d reshape = tf.reshape(pool2, [FLAGS.batch_size, dim]) weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name) _activation_summary(local3) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu_layer(local3, weights, biases, name=scope.name) _activation_summary(local4) # softmax, i.e. softmax(WX + b) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.nn.xw_plus_b(local4, weights, biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): """Add L2Loss to all the trainable variables. Add summary for for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float. """ # Reshape the labels into a dense Tensor of # shape [batch_size, NUM_CLASSES]. sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1]) #(FLAGS.batch_size, 1) if old tensorflow indices = tf.reshape(tf.range(0,FLAGS.batch_size,1), [FLAGS.batch_size, 1]) concated = tf.concat(1, [indices, sparse_labels]) dense_labels = tf.sparse_to_dense(concated, [FLAGS.batch_size, NUM_CLASSES], 1.0, 0.0) # Calculate the average cross entropy loss across the batch. cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits, dense_labels, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op def train(total_loss, global_step): """Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad: tf.histogram_summary(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
NumesSanguis/MLTensor
adience/adience.py
Python
apache-2.0
18,728
# Author: Denys Makogon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glanceclient.v2 import client as glanceclient from keystoneauth1 import loading from keystoneauth1 import session from keystoneclient import client as keystoneclient from novaclient import client as novaclient from neutronclient.v2_0 import client as neutronclient class OpenStackClients(object): __keystone = None __nova = None __neutron = None __glance = None def __password_session_setup(self, node): creds = node.runtime_properties['auth_properties'] if 'region_name' in creds: del creds['region_name'] loader = loading.get_plugin_loader('password') auth = loader.load_from_options(**creds) sess = session.Session(auth=auth) return sess def keystone(self, node): if self.__keystone is None: self.__keystone = keystoneclient.Client(**node.properties) self.__keystone.authenticate() return self.__keystone def nova(self, node): if self.__nova is None: version = node.properties['compute_api_version'] use_connection_pool = node.properties['use_connection_pool'] self.__nova = novaclient.Client( version, session=self.__password_session_setup(node), connection_pool=use_connection_pool) return self.__nova def neutron(self, node): if self.__neutron is None: self.__neutron = neutronclient.Client( session=self.__password_session_setup(node)) return self.__neutron def glance(self, node): if self.__glance is None: self.__glance = glanceclient.Client( session=self.__password_session_setup(node)) return self.__glance openstack = OpenStackClients()
aiorchestra/aiorchestra-openstack-plugin
openstack_plugin/common/clients.py
Python
apache-2.0
2,375
"""Provide Tournament style selection. This implements selection based on a tournament style. In this model of selection, two individuals are randomly chosen from the population, and the organism with the higher fitness is considered the 'winner' and moves to the next generation. """ # standard modules import random # local modules from Abstract import AbstractSelection class TournamentSelection(AbstractSelection): """Implement tournament style selection. """ def __init__(self, mutator, crossover, repairer, num_competitors = 2): """Initialize the tournament selector. Arguments: o num_competitors-- The number of individiuals that should be involved in a selection round. By default we just have two individuals (head to head!). See AbstractSelection for a description of the arguments to the initializer. """ AbstractSelection.__init__(self, mutator, crossover, repairer) if num_competitors < 2: raise ValueError("Must have at least 2 competitors!") self._num_competitors = num_competitors def _fitness_cmp(self, org_1, org_2): """Comparison function for comparing two organisms. This just allows us to easily sort organisms by fitness. """ return cmp(org_1.fitness, org_2.fitness) def select(self, population): """Perform selection on the population using the Tournament model. Arguments: o population -- A population of organisms on which we will perform selection. The individuals are assumed to have fitness values which are due to their current genome (ie. the fitness is up to date). """ # we want to create a new population of the same size as the original new_population = [] while len(new_population) < len(population): # select two individuals using tournament selection new_orgs = [] # select for two individuals for round_num in range(2): competitors = [] while len(competitors) < self._num_competitors: new_org = random.choice(population) if new_org not in competitors: competitors.append(new_org) # sort the competitors by fitness, this will put them # from lowest to highest competitors.sort(self._fitness_cmp) # get the best organism new_orgs.append(competitors[-1]) assert len(new_orgs) == 2, "Expected two organisms to be selected" # do mutation and crossover to get the new organisms new_org_1, new_org_2 = self.mutate_and_crossover(new_orgs[0], new_orgs[1]) new_population.extend([new_org_1, new_org_2]) return new_population
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/GA/Selection/Tournament.py
Python
apache-2.0
2,999
# # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # # # This file is built up from an autogenerated template resource_server.py and # contains code/hooks at different point during processing a request, specific # to type of resource. For eg. allocation of mac/ip-addr for a port during its # creation. import json import re import cfgm_common import netaddr import uuid import vnc_quota from gen.resource_xsd import * from gen.resource_common import * from gen.resource_server import * from pprint import pformat from pysandesh.gen_py.sandesh.ttypes import SandeshLevel class GlobalSystemConfigServer(GlobalSystemConfigServerGen): @classmethod def _check_asn(cls, obj_dict, db_conn): global_asn = obj_dict.get('autonomous_system') if not global_asn: return (True, '') (ok, result) = db_conn.dbe_list('virtual-network') if not ok: return (ok, result) for vn_name, vn_uuid in result: ok, result = db_conn.dbe_read('virtual-network', {'uuid': vn_uuid}) if not ok: return ok, result rt_dict = result.get('route_target_list', {}) for rt in rt_dict.get('route_target', []): (_, asn, target) = rt.split(':') if (int(asn) == global_asn and int(target) >= cfgm_common.BGP_RTGT_MIN_ID): return (False, (400, "Virtual network %s is configured " "with a route target with this ASN and route " "target value in the same range as used by " "automatically allocated route targets" % vn_name)) return (True, '') # end _check_asn @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): ok, result = cls._check_asn(obj_dict, db_conn) if not ok: return ok, result return True, '' # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): ok, result = cls._check_asn(obj_dict, db_conn) if not ok: return ok, result return True, '' # end http_put # end class GlobalSystemConfigServer class FloatingIpServer(FloatingIpServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): proj_dict = obj_dict['project_refs'][0] if 'uuid' in proj_dict: proj_uuid = proj_dict['uuid'] else: proj_uuid = db_conn.fq_name_to_uuid('project', proj_dict['to']) user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'floating_ip_back_refs', 'obj_type': 'floating-ip', 'user_visibility': user_visibility, 'proj_uuid': proj_uuid} (ok, response) = vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) if not ok: return (ok, response) vn_fq_name = obj_dict['fq_name'][:-2] req_ip = obj_dict.get("floating_ip_address") if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name): return (False, (403, 'Ip address already in use')) try: fip_addr = cls.addr_mgmt.ip_alloc_req(vn_fq_name, asked_ip_addr=req_ip) except Exception as e: return (False, (500, str(e))) obj_dict['floating_ip_address'] = fip_addr db_conn.config_log('AddrMgmt: alloc %s FIP for vn=%s, tenant=%s, askip=%s' \ % (obj_dict['floating_ip_address'], vn_fq_name, tenant_name, req_ip), level=SandeshLevel.SYS_DEBUG) return True, "" # end http_post_collection @classmethod def http_post_collection_fail(cls, tenant_name, obj_dict, db_conn): vn_fq_name = obj_dict['fq_name'][:-2] fip_addr = obj_dict['floating_ip_address'] db_conn.config_log('AddrMgmt: free FIP %s for vn=%s tenant=%s, on post fail' % (fip_addr, vn_fq_name, tenant_name), level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(fip_addr, vn_fq_name) return True, "" # end http_post_collection_fail @classmethod def http_delete(cls, id, obj_dict, db_conn): vn_fq_name = obj_dict['fq_name'][:-2] fip_addr = obj_dict['floating_ip_address'] db_conn.config_log('AddrMgmt: free FIP %s for vn=%s' % (fip_addr, vn_fq_name), level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(fip_addr, vn_fq_name) return True, "" # end http_delete @classmethod def http_delete_fail(cls, id, obj_dict, db_conn): vn_fq_name = obj_dict['fq_name'][:-2] req_ip = obj_dict.get("floating_ip_address", None) if req_ip == None: return True, "" try: cls.addr_mgmt.ip_alloc_req(vn_fq_name, asked_ip_addr=req_ip) except Exception as e: return (False, (500, str(e))) db_conn.config_log('AddrMgmt: alloc %s FIP for vn=%s to recover DELETE failure' % (obj_dict['floating_ip_address'], vn_fq_name), level=SandeshLevel.SYS_DEBUG) return True, "" # end http_delete_fail @classmethod def dbe_create_notification(cls, obj_ids, obj_dict): fip_addr = obj_dict['floating_ip_address'] vn_fq_name = obj_dict['fq_name'][:-2] cls.addr_mgmt.ip_alloc_notify(fip_addr, vn_fq_name) # end dbe_create_notification @classmethod def dbe_delete_notification(cls, obj_ids, obj_dict): fip_addr = obj_dict['floating_ip_address'] vn_fq_name = obj_dict['fq_name'][:-2] cls.addr_mgmt.ip_free_notify(fip_addr, vn_fq_name) # end dbe_delete_notification # end class FloatingIpServer class InstanceIpServer(InstanceIpServerGen): generate_default_instance = False @classmethod def _get_subnet_name(cls, vn_dict, subnet_uuid): ipam_refs = vn_dict.get('network_ipam_refs', []) subnet_name = None for ipam in ipam_refs: ipam_subnets = ipam['attr'].get('ipam_subnets', []) for subnet in ipam_subnets: if subnet['subnet_uuid'] == subnet_uuid: subnet_dict = subnet['subnet'] subnet_name = subnet_dict['ip_prefix'] + '/' + str( subnet_dict['ip_prefix_len']) return subnet_name @classmethod def _is_gateway_ip(cls, vn_dict, ip_addr): ipam_refs = vn_dict.get('network_ipam_refs', []) for ipam in ipam_refs: ipam_subnets = ipam['attr'].get('ipam_subnets', []) for subnet in ipam_subnets: if subnet['default_gateway'] == ip_addr: return True return False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if ((vn_fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (vn_fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric and link-local address allocations return True, "" req_ip = obj_dict.get("instance_ip_address", None) req_ip_family = obj_dict.get("instance_ip_family", None) req_ip_version = 4 # default ip v4 if req_ip_family == "v6": req_ip_version = 6 vn_id = {'uuid': db_conn.fq_name_to_uuid('virtual-network', vn_fq_name)} (read_ok, vn_dict) = db_conn.dbe_read('virtual-network', vn_id) if not read_ok: return (False, (500, 'Internal error : ' + pformat(vn_dict))) subnet_uuid = obj_dict.get('subnet_uuid', None) sub = cls._get_subnet_name(vn_dict, subnet_uuid) if subnet_uuid else None if subnet_uuid and not sub: return (False, (404, "Subnet id " + subnet_uuid + " not found")) # If its an external network, check whether floating IP equivalent to # requested fixed-IP is already reserved. router_external = vn_dict.get('router_external', None) if req_ip and router_external and \ not cls._is_gateway_ip(vn_dict, req_ip) and \ cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name): return (False, (403, 'Ip address already in use')) try: ip_addr = cls.addr_mgmt.ip_alloc_req( vn_fq_name, sub=sub, asked_ip_addr=req_ip, asked_ip_version=req_ip_version) except Exception as e: return (False, (500, str(e))) obj_dict['instance_ip_address'] = ip_addr db_conn.config_log('AddrMgmt: alloc %s for vn=%s, tenant=%s, askip=%s' % (obj_dict['instance_ip_address'], vn_fq_name, tenant_name, req_ip), level=SandeshLevel.SYS_DEBUG) return True, "" # end http_post_collection @classmethod def http_post_collection_fail(cls, tenant_name, obj_dict, db_conn): vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if ((vn_fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (vn_fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric and link-local address allocations return True, "" ip_addr = obj_dict['instance_ip_address'] db_conn.config_log('AddrMgmt: free IP %s, vn=%s tenant=%s on post fail' % (ip_addr, vn_fq_name, tenant_name), level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(ip_addr, vn_fq_name) return True, "" # end http_post_collection_fail @classmethod def http_delete(cls, id, obj_dict, db_conn): vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if ((vn_fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (vn_fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric and link-local address allocations return True, "" ip_addr = obj_dict['instance_ip_address'] db_conn.config_log('AddrMgmt: free IP %s, vn=%s' % (ip_addr, vn_fq_name), level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(ip_addr, vn_fq_name) return True, "" # end http_delete @classmethod def http_delete_fail(cls, id, obj_dict, db_conn): vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if ((vn_fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (vn_fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric and link-local address allocations return True, "" req_ip = obj_dict.get("instance_ip_address", None) if req_ip == None: return True, "" try: cls.addr_mgmt.ip_alloc_req(vn_fq_name, asked_ip_addr=req_ip) except Exception as e: return (False, (500, str(e))) db_conn.config_log('AddrMgmt: alloc %s for vn=%s to recover DELETE failure' % (obj_dict['instance_ip_address'], vn_fq_name), level=SandeshLevel.SYS_DEBUG) return True, "" # end http_delete_fail @classmethod def dbe_create_notification(cls, obj_ids, obj_dict): ip_addr = obj_dict['instance_ip_address'] vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] cls.addr_mgmt.ip_alloc_notify(ip_addr, vn_fq_name) # end dbe_create_notification @classmethod def dbe_delete_notification(cls, obj_ids, obj_dict): ip_addr = obj_dict['instance_ip_address'] vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] cls.addr_mgmt.ip_free_notify(ip_addr, vn_fq_name) # end dbe_delete_notification # end class InstanceIpServer class LogicalRouterServer(LogicalRouterServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'logical_routers', 'obj_type': 'logical-router', 'user_visibility': user_visibility} return vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) # end http_post_collection # end class LogicalRouterServer class VirtualMachineInterfaceServer(VirtualMachineInterfaceServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): vn_dict = obj_dict['virtual_network_refs'][0] vn_uuid = vn_dict.get('uuid') if not vn_uuid: vn_fq_name = vn_dict.get('to') if not vn_fq_name: return (False, (500, 'Internal error : ' + pformat(vn_dict))) vn_uuid = db_conn.fq_name_to_uuid('virtual-network', vn_fq_name) (ok, vn_dict) = db_conn.dbe_read('virtual-network', {'uuid':vn_uuid}) if not ok: return (False, (500, 'Internal error : ' + pformat(vn_dict))) proj_uuid = vn_dict['parent_uuid'] user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'virtual_machine_interfaces', 'obj_type': 'virtual-machine-interface', 'user_visibility': user_visibility, 'proj_uuid': proj_uuid} (ok, response) = vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) if not ok: return (ok, response) inmac = None if 'virtual_machine_interface_mac_addresses' in obj_dict: mc = obj_dict['virtual_machine_interface_mac_addresses'] if 'mac_address' in mc: if len(mc['mac_address'])==1: inmac = mc['mac_address'] if inmac!=None: mac_addrs_obj = MacAddressesType(inmac) else: mac_addr = cls.addr_mgmt.mac_alloc(obj_dict) mac_addrs_obj = MacAddressesType([mac_addr]) mac_addrs_json = json.dumps( mac_addrs_obj, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems())) mac_addrs_dict = json.loads(mac_addrs_json) obj_dict['virtual_machine_interface_mac_addresses'] = mac_addrs_dict if 'virtual_machine_interface_allowed_address_pairs' in obj_dict: aap_config = obj_dict['virtual_machine_interface_allowed_address_pairs'] if 'allowed_address_pair' in aap_config: aaps = aap_config['allowed_address_pair'] for aap in aaps or []: if aap['mac'] == "": aap['mac'] = obj_dict['virtual_machine_interface_mac_addresses']['mac_address'] return True, "" # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): if 'virtual_machine_interface_allowed_address_pairs' in obj_dict: vmi_id = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('virtual-machine-interface', vmi_id) if not read_ok: return (False, (500, read_result)) aap_config = obj_dict['virtual_machine_interface_allowed_address_pairs'] if 'allowed_address_pair' in aap_config: aaps = aap_config['allowed_address_pair'] for aap in aaps or []: if aap['mac'] == "": aap['mac'] = read_result['virtual_machine_interface_mac_addresses']['mac_address'] return True, "" # end http_put # end class VirtualMachineInterfaceServer class VirtualNetworkServer(VirtualNetworkServerGen): @classmethod def _check_route_targets(cls, obj_dict, db_conn): if 'route_target_list' not in obj_dict: return (True, '') config_uuid = db_conn.fq_name_to_uuid('global_system_config', ['default-global-system-config']) config = db_conn.uuid_to_obj_dict(config_uuid) global_asn = config.get('prop:autonomous_system') if not global_asn: return (True, '') global_asn = json.loads(global_asn) rt_dict = obj_dict.get('route_target_list') if not rt_dict: return (True, '') for rt in rt_dict.get('route_target', []): try: (prefix, asn, target) = rt.split(':') if prefix != 'target': raise ValueError() target = int(target) if not asn.isdigit(): netaddr.IPAddress(asn) except (ValueError, netaddr.core.AddrFormatError) as e: return (False, "Route target must be of the format " "'target:<asn>:<number>' or 'target:<ip>:number'") if asn == global_asn and target >= cfgm_common.BGP_RTGT_MIN_ID: return (False, "Configured route target must use ASN that is " "different from global ASN or route target value must" " be less than %d" % cfgm_common.BGP_RTGT_MIN_ID) return (True, '') # end _check_route_targets @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'virtual_networks', 'obj_type': 'virtual-network', 'user_visibility': user_visibility} (ok, response) = vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) if not ok: return (ok, response) db_conn.update_subnet_uuid(obj_dict) (ok, error) = cls._check_route_targets(obj_dict, db_conn) if not ok: return (False, (400, error)) try: cls.addr_mgmt.net_create_req(obj_dict) except Exception as e: return (False, (500, str(e))) return True, "" # end http_post_collection @classmethod def http_post_collection_fail(cls, tenant_name, obj_dict, db_conn): cls.addr_mgmt.net_delete_req(obj_dict) return True, "" # end post_collection_fail @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): if ((fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric subnet updates return True, "" if 'network_ipam_refs' not in obj_dict: # NOP for addr-mgmt module return True, "" vn_id = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('virtual-network', vn_id) if not read_ok: return (False, (500, read_result)) (ok, result) = cls.addr_mgmt.net_check_subnet(read_result, obj_dict) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.net_check_subnet_quota(read_result, obj_dict, db_conn) if not ok: return (ok, (403, result)) (ok, result) = cls.addr_mgmt.net_check_subnet_overlap(read_result, obj_dict) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.net_check_subnet_delete(read_result, obj_dict) if not ok: return (ok, (409, result)) try: cls.addr_mgmt.net_update_req(fq_name, read_result, obj_dict, id) except Exception as e: return (False, (500, str(e))) db_conn.update_subnet_uuid(obj_dict) (ok, error) = cls._check_route_targets(obj_dict, db_conn) if not ok: return (False, (400, error)) return True, "" # end http_put @classmethod def http_put_fail(cls, id, fq_name, obj_dict, db_conn): if ((fq_name == cfgm_common.IP_FABRIC_VN_FQ_NAME) or (fq_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME)): # Ignore ip-fabric subnet updates return True, "" ipam_refs = obj_dict.get('network_ipam_refs', None) if not ipam_refs: # NOP for addr-mgmt module return True, "" vn_id = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('virtual-network', vn_id) if not read_ok: return (False, (500, read_result)) cls.addr_mgmt.net_update_req(fq_name, obj_dict, read_result, id) # end http_put_fail @classmethod def http_delete(cls, id, obj_dict, db_conn): cls.addr_mgmt.net_delete_req(obj_dict) return True, "" # end http_delete @classmethod def http_delete_fail(cls, id, obj_dict, db_conn): cls.addr_mgmt.net_create_req(obj_dict) return True, "" # end http_delete_fail @classmethod def ip_alloc(cls, vn_fq_name, subnet_name, count): ip_list = [cls.addr_mgmt.ip_alloc_req(vn_fq_name, subnet_name) for i in range(count)] msg = 'AddrMgmt: reserve %d IP for vn=%s, subnet=%s - %s' \ % (count, vn_fq_name, subnet_name if subnet_name else '', ip_list) cls.addr_mgmt.config_log(msg, level=SandeshLevel.SYS_DEBUG) return {'ip_addr': ip_list} # end ip_alloc @classmethod def ip_free(cls, vn_fq_name, subnet_name, ip_list): msg = 'AddrMgmt: release IP %s for vn=%s, subnet=%s' \ % (ip_list, vn_fq_name, subnet_name if subnet_name else '') cls.addr_mgmt.config_log(msg, level=SandeshLevel.SYS_DEBUG) for ip_addr in ip_list: cls.addr_mgmt.ip_free_req(ip_addr, vn_fq_name, subnet_name) # end ip_free @classmethod def subnet_ip_count(cls, obj_dict, subnet_list): ip_count_list = [] for item in subnet_list: ip_count_list.append(cls.addr_mgmt.ip_count(obj_dict, item)) return {'ip_count_list': ip_count_list} # end subnet_ip_count @classmethod def dbe_create_notification(cls, obj_ids, obj_dict): cls.addr_mgmt.net_create_notify(obj_ids, obj_dict) # end dbe_create_notification @classmethod def dbe_update_notification(cls, obj_ids): cls.addr_mgmt.net_update_notify(obj_ids) # end dbe_update_notification @classmethod def dbe_delete_notification(cls, obj_ids, obj_dict): cls.addr_mgmt.net_delete_notify(obj_ids, obj_dict) # end dbe_update_notification # end class VirtualNetworkServer class NetworkIpamServer(NetworkIpamServerGen): @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): ipam_uuid = obj_dict['uuid'] ipam_id = {'uuid': ipam_uuid} (read_ok, read_result) = db_conn.dbe_read('network-ipam', ipam_id) if not read_ok: return (False, (500, "Internal error : IPAM is not valid")) old_ipam_mgmt = read_result.get('network_ipam_mgmt') new_ipam_mgmt = obj_dict.get('network_ipam_mgmt') if not old_ipam_mgmt or not new_ipam_mgmt: return True, "" old_dns_method = old_ipam_mgmt.get('ipam_dns_method') new_dns_method = new_ipam_mgmt.get('ipam_dns_method') if not cls.is_change_allowed(old_dns_method, new_dns_method, obj_dict, db_conn): return (False, (409, "Cannot change DNS Method from " + old_dns_method + " to " + new_dns_method + " with active VMs referring to the IPAM")) return True, "" # end http_put @classmethod def http_put_fail(cls, id, fq_name, obj_dict, db_conn): # undo any state change done by http_put function return True, "" # end http_put_fail @classmethod def is_change_allowed(cls, old, new, obj_dict, db_conn): if (old == "default-dns-server" or old == "virtual-dns-server"): if ((new == "tenant-dns-server" or new == "none") and cls.is_active_vm_present(obj_dict, db_conn)): return False if (old == "tenant-dns-server" and new != old and cls.is_active_vm_present(obj_dict, db_conn)): return False if (old == "none" and new != old and cls.is_active_vm_present(obj_dict, db_conn)): return False return True # end is_change_allowed @classmethod def is_active_vm_present(cls, obj_dict, db_conn): if 'virtual_network_back_refs' in obj_dict: vn_backrefs = obj_dict['virtual_network_back_refs'] for vn in vn_backrefs: vn_uuid = vn['uuid'] vn_id = {'uuid': vn_uuid} (read_ok, read_result) = db_conn.dbe_read('virtual-network', vn_id) if not read_ok: continue if 'virtual_machine_interface_back_refs' in read_result: return True return False # end is_active_vm_present # end class NetworkIpamServer class VirtualDnsServer(VirtualDnsServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): return cls.validate_dns_server(obj_dict, db_conn) # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): return cls.validate_dns_server(obj_dict, db_conn) # end http_put @classmethod def http_put_fail(cls, id, fq_name, obj_dict, db_conn): # undo any state change done by http_put function return True, "" # end http_put_fail @classmethod def http_delete(cls, id, obj_dict, db_conn): vdns_name = ":".join(obj_dict['fq_name']) if 'parent_uuid' in obj_dict: domain_uuid = obj_dict['parent_uuid'] domain_id = {'uuid': domain_uuid} (read_ok, read_result) = db_conn.dbe_read('domain', domain_id) if not read_ok: return ( False, (500, "Internal error : Virtual DNS is not in a domain")) virtual_DNSs = read_result.get('virtual_DNSs', []) for vdns in virtual_DNSs: vdns_uuid = vdns['uuid'] vdns_id = {'uuid': vdns_uuid} (read_ok, read_result) = db_conn.dbe_read('virtual-DNS', vdns_id) if not read_ok: return ( False, (500, "Internal error : Unable to read Virtual DNS data")) vdns_data = read_result['virtual_DNS_data'] if 'next_virtual_DNS' in vdns_data: if vdns_data['next_virtual_DNS'] == vdns_name: return ( False, (403, "Virtual DNS server is referred" " by other virtual DNS servers")) return True, "" # end http_delete @classmethod def http_delete_fail(cls, id, obj_dict, db_conn): # undo any state change done by http_delete function return True, "" # end http_delete_fail @classmethod def is_valid_dns_name(cls, name): if len(name) > 255: return False if name.endswith("."): # A single trailing dot is legal # strip exactly one dot from the right, if present name = name[:-1] disallowed = re.compile("[^A-Z\d-]", re.IGNORECASE) return all( # Split by labels and verify individually (label and len(label) <= 63 # length is within proper range # no bordering hyphens and not label.startswith("-") and not label.endswith("-") and not disallowed.search(label)) # contains only legal char for label in name.split(".")) # end is_valid_dns_name @classmethod def is_valid_ipv4_address(cls, address): parts = address.split(".") if len(parts) != 4: return False for item in parts: try: if not 0 <= int(item) <= 255: return False except ValueError: return False return True # end is_valid_ipv4_address @classmethod def validate_dns_server(cls, obj_dict, db_conn): virtual_dns = obj_dict['fq_name'][1] disallowed = re.compile("[^A-Z\d-]", re.IGNORECASE) if disallowed.search(virtual_dns) or virtual_dns.startswith("-"): return (False, (403, "Special characters are not allowed in " + "Virtual DNS server name")) vdns_data = obj_dict['virtual_DNS_data'] if not cls.is_valid_dns_name(vdns_data['domain_name']): return ( False, (403, "Domain name does not adhere to DNS name requirements")) record_order = ["fixed", "random", "round-robin"] if not str(vdns_data['record_order']).lower() in record_order: return (False, (403, "Invalid value for record order")) ttl = vdns_data['default_ttl_seconds'] if ttl < 0 or ttl > 2147483647: return (False, (403, "Invalid value for TTL")) if 'next_virtual_DNS' in vdns_data: vdns_next = vdns_data['next_virtual_DNS'] if not vdns_next or vdns_next is None: return True, "" next_vdns = vdns_data['next_virtual_DNS'].split(":") # check that next vdns exists try: next_vdns_uuid = db_conn.fq_name_to_uuid( 'virtual_DNS', next_vdns) except Exception as e: if not cls.is_valid_ipv4_address( vdns_data['next_virtual_DNS']): return ( False, (403, "Invalid Virtual Forwarder(next virtual dns server)")) else: return True, "" # check that next virtual dns servers arent referring to each other # above check doesnt allow during create, but entry could be # modified later next_vdns_id = {'uuid': next_vdns_uuid} (read_ok, read_result) = db_conn.dbe_read( 'virtual-DNS', next_vdns_id) if read_ok: next_vdns_data = read_result['virtual_DNS_data'] if 'next_virtual_DNS' in next_vdns_data: vdns_name = ":".join(obj_dict['fq_name']) if next_vdns_data['next_virtual_DNS'] == vdns_name: return ( False, (403, "Cannot have Virtual DNS Servers " "referring to each other")) return True, "" # end validate_dns_server # end class VirtualDnsServer class VirtualDnsRecordServer(VirtualDnsRecordServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): return cls.validate_dns_record(obj_dict, db_conn) # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): return cls.validate_dns_record(obj_dict, db_conn) # end http_put @classmethod def http_put_fail(cls, id, fq_name, obj_dict, db_conn): # undo any state change done by http_put function return True, "" # end http_put_fail @classmethod def http_delete(cls, id, obj_dict, db_conn): return True, "" # end http_delete @classmethod def http_delete_fail(cls, id, obj_dict, db_conn): # undo any state change done by http_delete function return True, "" # end http_delete_fail @classmethod def validate_dns_record(cls, obj_dict, db_conn): rec_data = obj_dict['virtual_DNS_record_data'] rec_types = ["a", "cname", "ptr", "ns"] rec_type = str(rec_data['record_type']).lower() if not rec_type in rec_types: return (False, (403, "Invalid record type")) if str(rec_data['record_class']).lower() != "in": return (False, (403, "Invalid record class")) rec_name = rec_data['record_name'] rec_value = rec_data['record_data'] # check rec_name validity if rec_type == "ptr": if (not VirtualDnsServer.is_valid_ipv4_address(rec_name) and not "in-addr.arpa" in rec_name.lower()): return ( False, (403, "PTR Record name has to be IP address" " or reverse.ip.in-addr.arpa")) elif not VirtualDnsServer.is_valid_dns_name(rec_name): return ( False, (403, "Record name does not adhere to DNS name requirements")) # check rec_data validity if rec_type == "a": if not VirtualDnsServer.is_valid_ipv4_address(rec_value): return (False, (403, "Invalid IP address")) elif rec_type == "cname" or rec_type == "ptr" or rec_type == "mx": if not VirtualDnsServer.is_valid_dns_name(rec_value): return ( False, (403, "Record data does not adhere to DNS name requirements")) elif rec_type == "ns": try: vdns_name = rec_value.split(":") vdns_uuid = db_conn.fq_name_to_uuid('virtual_DNS', vdns_name) except Exception as e: if (not VirtualDnsServer.is_valid_ipv4_address(rec_value) and not VirtualDnsServer.is_valid_dns_name(rec_value)): return ( False, (403, "Invalid virtual dns server in record data")) ttl = rec_data['record_ttl_seconds'] if ttl < 0 or ttl > 2147483647: return (False, (403, "Invalid value for TTL")) if rec_type == "mx": preference = rec_data['record_mx_preference'] if preference < 0 or preference > 65535: return (False, (403, "Invalid value for MX record preference")) return True, "" # end validate_dns_record # end class VirtualDnsRecordServer def _check_policy_rule_uuid(entries): if not entries: return for rule in entries.get('policy_rule') or []: if not rule.get('rule_uuid'): rule['rule_uuid'] = str(uuid.uuid4()) # end _check_policy_rule_uuid class SecurityGroupServer(SecurityGroupServerGen): generate_default_instance = False @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'security_groups', 'obj_type': 'security-group', 'user_visibility': user_visibility} (ok, response) = vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) if not ok: return (ok, response) _check_policy_rule_uuid(obj_dict.get('security_group_entries')) return True, "" # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): (ok, sec_dict) = db_conn.dbe_read('security-group', {'uuid': id}) if not ok: return (False, (500, 'Bad Security Group error : ' + pformat(sec_dict))) (ok, proj_dict) = vnc_quota.QuotaHelper.get_project_dict( sec_dict['parent_uuid'], db_conn) if not ok: return (False, (500, 'Bad Project error : ' + pformat(proj_dict))) if 'security_group_entries' in obj_dict: rule_count = len(obj_dict['security_group_entries']['policy_rule']) obj_type = 'security-group-rule' for sg in proj_dict.get('security_groups', []): if sg['uuid'] == sec_dict['uuid']: continue ok, sg_dict = db_conn.dbe_read('security-group', sg) if not ok: continue sge = sg_dict.get('security_group_entries', {}) rule_count += len(sge.get('policy_rule', [])) if sec_dict['id_perms'].get('user_visible', True) is not False: (ok, quota_limit) = vnc_quota.QuotaHelper.check_quota_limit( proj_dict, obj_type, rule_count-1) if not ok: return (False, (403, pformat(fq_name) + ' : ' + quota_limit)) _check_policy_rule_uuid(obj_dict.get('security_group_entries')) return True, "" # end http_put # end class SecurityGroupServer class NetworkPolicyServer(NetworkPolicyServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'network_policys', 'obj_type': 'network-policy', 'user_visibility': user_visibility} (ok, response) = vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) if not ok: return (ok, response) _check_policy_rule_uuid(obj_dict.get('network_policy_entries')) try: cls._check_policy(obj_dict) except Exception as e: return (False, (500, str(e))) return True, "" # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): p_id = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('network-policy', p_id) if not read_ok: return (False, (500, read_result)) _check_policy_rule_uuid(obj_dict.get('network_policy_entries')) return True, "" # end http_put @classmethod def _check_policy(cls, obj_dict): entries = obj_dict.get('network_policy_entries') if not entries: return # end _check_policy # end class NetworkPolicyServer class LogicalInterfaceServer(LogicalInterfaceServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): (ok, msg) = cls._check_vlan(obj_dict, db_conn) if ok == False: return (False, msg) vlan = 0 if 'logical_interface_vlan_tag' in obj_dict: vlan = obj_dict['logical_interface_vlan_tag'] return PhysicalInterfaceServer._check_interface_name(obj_dict, db_conn, vlan) # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): interface = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('logical-interface', interface) if not read_ok: return (False, (500, read_result)) # do not allow change in display name if 'display_name' in obj_dict: if obj_dict['display_name'] != read_result.get('display_name'): return (False, (403, "Cannot change display name !")) vlan = None if 'logical_interface_vlan_tag' in obj_dict: vlan = obj_dict['logical_interface_vlan_tag'] if 'logical_interface_vlan_tag' in read_result: if int(vlan) != int(read_result.get('logical_interface_vlan_tag')): return (False, (403, "Cannot change Vlan id")) return True, "" # end http_put @classmethod def _check_vlan(cls, obj_dict, db_conn): if 'logical_interface_vlan_tag' in obj_dict: vlan = obj_dict['logical_interface_vlan_tag'] if vlan < 0 or vlan > 4094: return (False, (403, "Invalid Vlan id")) return True, "" # end _check_vlan # end class LogicalInterfaceServer class PhysicalInterfaceServer(PhysicalInterfaceServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): return cls._check_interface_name(obj_dict, db_conn, None) # end http_post_collection @classmethod def http_put(cls, id, fq_name, obj_dict, db_conn): # do not allow change in display name if 'display_name' in obj_dict: interface = {'uuid': id} (read_ok, read_result) = db_conn.dbe_read('physical-interface', interface) if not read_ok: return (False, (500, read_result)) if obj_dict['display_name'] != read_result.get('display_name'): return (False, (403, "Cannot change display name !")) return True, "" # end http_put @classmethod def _check_interface_name(cls, obj_dict, db_conn, vlan_tag): interface_name = obj_dict['display_name'] router = obj_dict['fq_name'][:2] try: router_uuid = db_conn.fq_name_to_uuid('physical-router', router) except cfgm_common.exceptions.NoIdError: return (False, (500, 'Internal error : Physical router ' + ":".join(router) + ' not found')) physical_interface_uuid = "" if obj_dict['parent_type'] == 'physical-interface': try: physical_interface_name = obj_dict['fq_name'][:3] physical_interface_uuid = db_conn.fq_name_to_uuid('physical-interface', physical_interface_name) except cfgm_common.exceptions.NoIdError: return (False, (500, 'Internal error : Physical interface ' + ":".join(physical_interface_name) + ' not found')) (ok, physical_router) = db_conn.dbe_read('physical-router', {'uuid':router_uuid}) if not ok: return (False, (500, 'Internal error : Physical router ' + ":".join(router) + ' not found')) for physical_interface in physical_router.get('physical_interfaces', []): (ok, interface_object) = db_conn.dbe_read('physical-interface', {'uuid':physical_interface['uuid']}) if not ok: return (False, (500, 'Internal error : physical interface ' + physical_interface['uuid'] + ' not found')) if 'display_name' in interface_object: if interface_name == interface_object['display_name']: return (False, (403, "Display name already used in another interface :" + physical_interface['uuid'])) for logical_interface in interface_object.get('logical_interfaces', []): (ok, li_object) = db_conn.dbe_read('logical-interface', {'uuid':logical_interface['uuid']}) if not ok: return (False, (500, 'Internal error : logical interface ' + logical_interface['uuid'] + ' not found')) if 'display_name' in li_object: if interface_name == li_object['display_name']: return (False, (403, "Display name already used in another interface : " + logical_interface['uuid'])) if vlan_tag != None: # check vlan tags on the same physical interface if obj_dict['parent_type'] == 'physical-interface' and \ physical_interface['uuid'] != physical_interface_uuid: continue if 'logical_interface_vlan_tag' in li_object: if vlan_tag == int(li_object['logical_interface_vlan_tag']): return (False, (403, "Vlan tag already used in " + "another interface : " + logical_interface['uuid'])) for logical_interface in physical_router.get('logical_interfaces', []): (ok, li_object) = db_conn.dbe_read('logical-interface', {'uuid':logical_interface['uuid']}) if not ok: return (False, (500, 'Internal error : logical interface ' + logical_interface['uuid'] + ' not found')) if 'display_name' in li_object: if interface_name == li_object['display_name']: return (False, (403, "Display name already used in another interface : " + logical_interface['uuid'])) return True, "" # end _check_interface_name # end class PhysicalInterfaceServer class LoadbalancerMemberServer(LoadbalancerMemberServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) try: fq_name = obj_dict['fq_name'] proj_uuid = db_conn.fq_name_to_uuid('project', fq_name[0:2]) except cfgm_common.exceptions.NoIdError: return (False, (500, 'No Project ID error : ' + proj_uuid)) ok, proj_dict = db_conn.dbe_read('project', {'uuid': proj_uuid}) if not ok: return (False, (500, 'Internal error : ' + pformat(proj_dict))) if not user_visibility: return True, "" lb_pools = proj_dict.get('loadbalancer_pools', []) quota_count = 0 for pool in lb_pools: ok, lb_pool_dict = db_conn.dbe_read('loadbalancer-pool', {'uuid': pool['uuid']}) if not ok: return (False, (500, 'Internal error : ' + pformat(lb_pool_dict))) quota_count += len(lb_pool_dict.get('loadbalancer_members', [])) (ok, quota_limit) = vnc_quota.QuotaHelper.check_quota_limit( proj_dict, 'loadbalancer-member', quota_count) if not ok: return (False, (403, pformat(fq_name) + ' : ' + quota_limit)) return True, "" #end class LoadbalancerMemberServer class LoadbalancerPoolServer(LoadbalancerPoolServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'loadbalancer_pools', 'obj_type': 'loadbalancer-pool', 'user_visibility': user_visibility} return vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) # end class LoadbalancerPoolServer class LoadbalancerHealthmonitorServer(LoadbalancerHealthmonitorServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'loadbalancer_healthmonitors', 'obj_type': 'loadbalancer-healthmonitor', 'user_visibility': user_visibility} return vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) # end class LoadbalancerHealthmonitorServer class VirtualIpServer(VirtualIpServerGen): @classmethod def http_post_collection(cls, tenant_name, obj_dict, db_conn): user_visibility = obj_dict['id_perms'].get('user_visible', True) verify_quota_kwargs = {'db_conn': db_conn, 'fq_name': obj_dict['fq_name'], 'resource': 'virtual_ips', 'obj_type': 'virtual-ip', 'user_visibility': user_visibility} return vnc_quota.QuotaHelper.verify_quota_for_resource( **verify_quota_kwargs) # end class VirtualIpServer
cloudwatt/contrail-controller
src/config/api-server/vnc_cfg_types.py
Python
apache-2.0
49,718
import bz2 from collections import Counter from contextlib import contextmanager from datetime import datetime from functools import wraps import gzip import http.client import os import re from shutil import rmtree import string import tempfile import traceback from typing import Union, cast import warnings import zipfile import numpy as np from numpy.random import rand, randn from pandas._config.localization import ( # noqa:F401 can_set_locale, get_locales, set_locale, ) import pandas._libs.testing as _testing from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback from pandas.core.dtypes.common import ( is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_interval_dtype, is_list_like, is_number, is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, IntervalIndex, MultiIndex, RangeIndex, Series, bdate_range, ) from pandas.core.algorithms import take_1d from pandas.core.arrays import ( DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray, period_array, ) from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing lzma = _import_lzma() N = 30 K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False # set testing_mode _testing_mode_warnings = (DeprecationWarning, ResourceWarning) def set_testing_mode(): # set the testing mode filters testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None") if "deprecate" in testing_mode: warnings.simplefilter("always", _testing_mode_warnings) def reset_testing_mode(): # reset the testing mode filters testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None") if "deprecate" in testing_mode: warnings.simplefilter("ignore", _testing_mode_warnings) set_testing_mode() def reset_display_options(): """ Reset the display options for printing and representing objects. """ pd.reset_option("^display.", silent=True) def round_trip_pickle(obj, path=None): """ Pickle an object and then read it again. Parameters ---------- obj : pandas object The object to pickle and then re-read. path : str, default None The path where the pickled object is written and then read. Returns ------- round_trip_pickled_object : pandas object The original object that was pickled and then re-read. """ if path is None: path = "__{random_bytes}__.pickle".format(random_bytes=rands(10)) with ensure_clean(path) as path: pd.to_pickle(obj, path) return pd.read_pickle(path) def round_trip_pathlib(writer, reader, path=None): """ Write an object to file specified by a pathlib.Path and read it back Parameters ---------- writer : callable bound to pandas object IO writing function (e.g. DataFrame.to_csv ) reader : callable IO reading function (e.g. pd.read_csv ) path : str, default None The path where the object is written and then read. Returns ------- round_trip_object : pandas object The original object that was serialized and then re-read. """ import pytest Path = pytest.importorskip("pathlib").Path if path is None: path = "___pathlib___" with ensure_clean(path) as path: writer(Path(path)) obj = reader(Path(path)) return obj def round_trip_localpath(writer, reader, path=None): """ Write an object to file specified by a py.path LocalPath and read it back Parameters ---------- writer : callable bound to pandas object IO writing function (e.g. DataFrame.to_csv ) reader : callable IO reading function (e.g. pd.read_csv ) path : str, default None The path where the object is written and then read. Returns ------- round_trip_object : pandas object The original object that was serialized and then re-read. """ import pytest LocalPath = pytest.importorskip("py.path").local if path is None: path = "___localpath___" with ensure_clean(path) as path: writer(LocalPath(path)) obj = reader(LocalPath(path)) return obj @contextmanager def decompress_file(path, compression): """ Open a compressed file and return a file object Parameters ---------- path : str The path where the file is read from compression : {'gzip', 'bz2', 'zip', 'xz', None} Name of the decompression to use Returns ------- f : file object """ if compression is None: f = open(path, "rb") elif compression == "gzip": f = gzip.open(path, "rb") elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": f = _get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() if len(zip_names) == 1: f = zip_file.open(zip_names.pop()) else: raise ValueError("ZIP file {} error. Only one file per ZIP.".format(path)) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) try: yield f finally: f.close() if compression == "zip": zip_file.close() def write_to_compressed(compression, path, data, dest="test"): """ Write data to a compressed file. Parameters ---------- compression : {'gzip', 'bz2', 'zip', 'xz'} The compression type to use. path : str The file path to write the data. data : str The data to write. dest : str, default "test" The destination file (for ZIP only) Raises ------ ValueError : An invalid compression value was passed in. """ if compression == "zip": import zipfile compress_method = zipfile.ZipFile elif compression == "gzip": import gzip compress_method = gzip.GzipFile elif compression == "bz2": import bz2 compress_method = bz2.BZ2File elif compression == "xz": compress_method = _get_lzma_file(lzma) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) if compression == "zip": mode = "w" args = (dest, data) method = "writestr" else: mode = "wb" args = (data,) method = "write" with compress_method(path, mode=mode) as f: getattr(f, method)(*args) def assert_almost_equal( left, right, check_dtype="equiv", check_less_precise=False, **kwargs ): """ Check that the left and right objects are approximately equal. By approximately equal, we refer to objects that are numbers or that contain numbers which may be equivalent to specific levels of precision. Parameters ---------- left : object right : object check_dtype : bool / string {'equiv'}, default 'equiv' Check dtype if both a and b are the same type. If 'equiv' is passed in, then `RangeIndex` and `Int64Index` are also considered equivalent when doing type checking. check_less_precise : bool or int, default False Specify comparison precision. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the number of digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. """ if isinstance(left, pd.Index): assert_index_equal( left, right, check_exact=False, exact=check_dtype, check_less_precise=check_less_precise, **kwargs ) elif isinstance(left, pd.Series): assert_series_equal( left, right, check_exact=False, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) elif isinstance(left, pd.DataFrame): assert_frame_equal( left, right, check_exact=False, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) else: # Other sequences. if check_dtype: if is_number(left) and is_number(right): # Do not compare numeric classes, like np.float64 and float. pass elif is_bool(left) and is_bool(right): # Do not compare bool classes, like np.bool_ and bool. pass else: if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): obj = "numpy array" else: obj = "Input" assert_class_equal(left, right, obj=obj) _testing.assert_almost_equal( left, right, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) def _check_isinstance(left, right, cls): """ Helper method for our assert_* methods that ensures that the two objects being compared have the right type before proceeding with the comparison. Parameters ---------- left : The first object being compared. right : The second object being compared. cls : The class type to check against. Raises ------ AssertionError : Either `left` or `right` is not an instance of `cls`. """ err_msg = "{name} Expected type {exp_type}, found {act_type} instead" cls_name = cls.__name__ if not isinstance(left, cls): raise AssertionError( err_msg.format(name=cls_name, exp_type=cls, act_type=type(left)) ) if not isinstance(right, cls): raise AssertionError( err_msg.format(name=cls_name, exp_type=cls, act_type=type(right)) ) def assert_dict_equal(left, right, compare_keys=True): _check_isinstance(left, right, dict) _testing.assert_dict_equal(left, right, compare_keys=compare_keys) def randbool(size=(), p=0.5): return rand(*size) <= p RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) RANDU_CHARS = np.array( list("".join(map(chr, range(1488, 1488 + 26))) + string.digits), dtype=(np.unicode_, 1), ) def rands_array(nchars, size, dtype="O"): """Generate an array of byte strings.""" retval = ( np.random.choice(RANDS_CHARS, size=nchars * np.prod(size)) .view((np.str_, nchars)) .reshape(size) ) if dtype is None: return retval else: return retval.astype(dtype) def randu_array(nchars, size, dtype="O"): """Generate an array of unicode strings.""" retval = ( np.random.choice(RANDU_CHARS, size=nchars * np.prod(size)) .view((np.unicode_, nchars)) .reshape(size) ) if dtype is None: return retval else: return retval.astype(dtype) def rands(nchars): """ Generate one random byte string. See `rands_array` if you want to create an array of random strings. """ return "".join(np.random.choice(RANDS_CHARS, nchars)) def randu(nchars): """ Generate one random unicode string. See `randu_array` if you want to create an array of random unicode strings. """ return "".join(np.random.choice(RANDU_CHARS, nchars)) def close(fignum=None): from matplotlib.pyplot import get_fignums, close as _close if fignum is None: for fignum in get_fignums(): _close(fignum) else: _close(fignum) # ----------------------------------------------------------------------------- # contextmanager to ensure the file cleanup @contextmanager def ensure_clean(filename=None, return_filelike=False): """Gets a temporary path and agrees to remove on close. Parameters ---------- filename : str (optional) if None, creates a temporary file which is then removed when out of scope. if passed, creates temporary file with filename as ending. return_filelike : bool (default False) if True, returns a file-like which is *always* cleaned. Necessary for savefig and other functions which want to append extensions. """ filename = filename or "" fd = None if return_filelike: f = tempfile.TemporaryFile(suffix=filename) try: yield f finally: f.close() else: # don't generate tempfile if using a path with directory specified if len(os.path.dirname(filename)): raise ValueError("Can't pass a qualified name to ensure_clean()") try: fd, filename = tempfile.mkstemp(suffix=filename) except UnicodeEncodeError: import pytest pytest.skip("no unicode file names on this system") try: yield filename finally: try: os.close(fd) except Exception: print( "Couldn't close file descriptor: {fdesc} (file: {fname})".format( fdesc=fd, fname=filename ) ) try: if os.path.exists(filename): os.remove(filename) except Exception as e: print("Exception on removing file: {error}".format(error=e)) @contextmanager def ensure_clean_dir(): """ Get a temporary directory path and agrees to remove on close. Yields ------ Temporary directory path """ directory_name = tempfile.mkdtemp(suffix="") try: yield directory_name finally: try: rmtree(directory_name) except Exception: pass @contextmanager def ensure_safe_environment_variables(): """ Get a context manager to safely set environment variables All changes will be undone on close, hence environment variables set within this contextmanager will neither persist nor change global state. """ saved_environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(saved_environ) # ----------------------------------------------------------------------------- # Comparators def equalContents(arr1, arr2): """Checks if the set of unique elements of arr1 and arr2 are equivalent. """ return frozenset(arr1) == frozenset(arr2) def assert_index_equal( left: Index, right: Index, exact: Union[bool, str] = "equiv", check_names: bool = True, check_less_precise: Union[bool, int] = False, check_exact: bool = True, check_categorical: bool = True, obj: str = "Index", ) -> None: """Check that left and right Index are equal. Parameters ---------- left : Index right : Index exact : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. If 'equiv', then RangeIndex can be substituted for Int64Index as well. check_names : bool, default True Whether to check the names attribute. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare check_exact : bool, default True Whether to compare number exactly. check_categorical : bool, default True Whether to compare internal Categorical exactly. obj : str, default 'Index' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True def _check_types(l, r, obj="Index"): if exact: assert_class_equal(l, r, exact=exact, obj=obj) # Skip exact dtype checking when `check_categorical` is False if check_categorical: assert_attr_equal("dtype", l, r, obj=obj) # allow string-like to have different inferred_types if l.inferred_type in ("string", "unicode"): assert r.inferred_type in ("string", "unicode") else: assert_attr_equal("inferred_type", l, r, obj=obj) def _get_ilevel_values(index, level): # accept level number only unique = index.levels[level] labels = index.codes[level] filled = take_1d(unique.values, labels, fill_value=unique._na_value) values = unique._shallow_copy(filled, name=index.names[level]) return values # instance validation _check_isinstance(left, right, Index) # class / dtype comparison _check_types(left, right, obj=obj) # level comparison if left.nlevels != right.nlevels: msg1 = "{obj} levels are different".format(obj=obj) msg2 = "{nlevels}, {left}".format(nlevels=left.nlevels, left=left) msg3 = "{nlevels}, {right}".format(nlevels=right.nlevels, right=right) raise_assert_detail(obj, msg1, msg2, msg3) # length comparison if len(left) != len(right): msg1 = "{obj} length are different".format(obj=obj) msg2 = "{length}, {left}".format(length=len(left), left=left) msg3 = "{length}, {right}".format(length=len(right), right=right) raise_assert_detail(obj, msg1, msg2, msg3) # MultiIndex special comparison for little-friendly error messages if left.nlevels > 1: left = cast(MultiIndex, left) right = cast(MultiIndex, right) for level in range(left.nlevels): # cannot use get_level_values here because it can change dtype llevel = _get_ilevel_values(left, level) rlevel = _get_ilevel_values(right, level) lobj = "MultiIndex level [{level}]".format(level=level) assert_index_equal( llevel, rlevel, exact=exact, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, obj=lobj, ) # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) # skip exact index checking when `check_categorical` is False if check_exact and check_categorical: if not left.equals(right): diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left) msg = "{obj} values are different ({pct} %)".format( obj=obj, pct=np.round(diff, 5) ) raise_assert_detail(obj, msg, left, right) else: _testing.assert_almost_equal( left.values, right.values, check_less_precise=check_less_precise, check_dtype=exact, obj=obj, lobj=left, robj=right, ) # metadata comparison if check_names: assert_attr_equal("names", left, right, obj=obj) if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex): assert_attr_equal("freq", left, right, obj=obj) if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex): assert_interval_array_equal(left.values, right.values) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal( left.values, right.values, obj="{obj} category".format(obj=obj) ) def assert_class_equal(left, right, exact=True, obj="Input"): """checks classes are equal.""" __tracebackhide__ = True def repr_class(x): if isinstance(x, Index): # return Index as it is to include values in the error message return x try: return x.__class__.__name__ except AttributeError: return repr(type(x)) if exact == "equiv": if type(left) != type(right): # allow equivalence of Int64Index/RangeIndex types = {type(left).__name__, type(right).__name__} if len(types - {"Int64Index", "RangeIndex"}): msg = "{obj} classes are not equivalent".format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) elif exact: if type(left) != type(right): msg = "{obj} classes are different".format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) def assert_attr_equal(attr, left, right, obj="Attributes"): """checks attributes are equal. Both objects must have attribute. Parameters ---------- attr : str Attribute name being compared. left : object right : object obj : str, default 'Attributes' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True left_attr = getattr(left, attr) right_attr = getattr(right, attr) if left_attr is right_attr: return True elif ( is_number(left_attr) and np.isnan(left_attr) and is_number(right_attr) and np.isnan(right_attr) ): # np.nan return True try: result = left_attr == right_attr except TypeError: # datetimetz on rhs may raise TypeError result = False if not isinstance(result, bool): result = result.all() if result: return True else: msg = 'Attribute "{attr}" are different'.format(attr=attr) raise_assert_detail(obj, msg, left_attr, right_attr) def assert_is_valid_plot_return_object(objs): import matplotlib.pyplot as plt if isinstance(objs, (pd.Series, np.ndarray)): for el in objs.ravel(): msg = ( "one of 'objs' is not a matplotlib Axes instance, type " "encountered {name!r}" ).format(name=el.__class__.__name__) assert isinstance(el, (plt.Axes, dict)), msg else: assert isinstance(objs, (plt.Artist, tuple, dict)), ( "objs is neither an ndarray of Artist instances nor a " 'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format( name=objs.__class__.__name__ ) ) def isiterable(obj): return hasattr(obj, "__iter__") def assert_is_sorted(seq): """Assert that the sequence is sorted.""" if isinstance(seq, (Index, Series)): seq = seq.values # sorting does not change precisions assert_numpy_array_equal(seq, np.sort(np.array(seq))) def assert_categorical_equal( left, right, check_dtype=True, check_category_order=True, obj="Categorical" ): """Test that Categoricals are equivalent. Parameters ---------- left : Categorical right : Categorical check_dtype : bool, default True Check that integer dtype of the codes are the same check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. obj : str, default 'Categorical' Specify object name being compared, internally used to show appropriate assertion message """ _check_isinstance(left, right, Categorical) if check_category_order: assert_index_equal( left.categories, right.categories, obj="{obj}.categories".format(obj=obj) ) assert_numpy_array_equal( left.codes, right.codes, check_dtype=check_dtype, obj="{obj}.codes".format(obj=obj), ) else: assert_index_equal( left.categories.sort_values(), right.categories.sort_values(), obj="{obj}.categories".format(obj=obj), ) assert_index_equal( left.categories.take(left.codes), right.categories.take(right.codes), obj="{obj}.values".format(obj=obj), ) assert_attr_equal("ordered", left, right, obj=obj) def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"): """Test that two IntervalArrays are equivalent. Parameters ---------- left, right : IntervalArray The IntervalArrays to compare. exact : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. If 'equiv', then RangeIndex can be substituted for Int64Index as well. obj : str, default 'IntervalArray' Specify object name being compared, internally used to show appropriate assertion message """ _check_isinstance(left, right, IntervalArray) assert_index_equal( left.left, right.left, exact=exact, obj="{obj}.left".format(obj=obj) ) assert_index_equal( left.right, right.right, exact=exact, obj="{obj}.left".format(obj=obj) ) assert_attr_equal("closed", left, right, obj=obj) def assert_period_array_equal(left, right, obj="PeriodArray"): _check_isinstance(left, right, PeriodArray) assert_numpy_array_equal( left._data, right._data, obj="{obj}.values".format(obj=obj) ) assert_attr_equal("freq", left, right, obj=obj) def assert_datetime_array_equal(left, right, obj="DatetimeArray"): __tracebackhide__ = True _check_isinstance(left, right, DatetimeArray) assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj)) assert_attr_equal("freq", left, right, obj=obj) assert_attr_equal("tz", left, right, obj=obj) def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"): __tracebackhide__ = True _check_isinstance(left, right, TimedeltaArray) assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj)) assert_attr_equal("freq", left, right, obj=obj) def raise_assert_detail(obj, message, left, right, diff=None): __tracebackhide__ = True if isinstance(left, np.ndarray): left = pprint_thing(left) elif is_categorical_dtype(left): left = repr(left) if isinstance(right, np.ndarray): right = pprint_thing(right) elif is_categorical_dtype(right): right = repr(right) msg = """{obj} are different {message} [left]: {left} [right]: {right}""".format( obj=obj, message=message, left=left, right=right ) if diff is not None: msg += "\n[diff]: {diff}".format(diff=diff) raise AssertionError(msg) def assert_numpy_array_equal( left, right, strict_nan=False, check_dtype=True, err_msg=None, check_same=None, obj="numpy array", ): """ Checks that 'np.ndarray' is equivalent Parameters ---------- left : np.ndarray or iterable right : np.ndarray or iterable strict_nan : bool, default False If True, consider NaN and None to be different. check_dtype: bool, default True check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message check_same : None|'copy'|'same', default None Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True # instance validation # Show a detailed error message when classes are different assert_class_equal(left, right, obj=obj) # both classes must be an np.ndarray _check_isinstance(left, right, np.ndarray) def _get_base(obj): return obj.base if getattr(obj, "base", None) is not None else obj left_base = _get_base(left) right_base = _get_base(right) if check_same == "same": if left_base is not right_base: msg = "{left!r} is not {right!r}".format(left=left_base, right=right_base) raise AssertionError(msg) elif check_same == "copy": if left_base is right_base: msg = "{left!r} is {right!r}".format(left=left_base, right=right_base) raise AssertionError(msg) def _raise(left, right, err_msg): if err_msg is None: if left.shape != right.shape: raise_assert_detail( obj, "{obj} shapes are different".format(obj=obj), left.shape, right.shape, ) diff = 0 for l, r in zip(left, right): # count up differences if not array_equivalent(l, r, strict_nan=strict_nan): diff += 1 diff = diff * 100.0 / left.size msg = "{obj} values are different ({pct} %)".format( obj=obj, pct=np.round(diff, 5) ) raise_assert_detail(obj, msg, left, right) raise AssertionError(err_msg) # compare shape and values if not array_equivalent(left, right, strict_nan=strict_nan): _raise(left, right, err_msg) if check_dtype: if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): assert_attr_equal("dtype", left, right, obj=obj) def assert_extension_array_equal( left, right, check_dtype=True, check_less_precise=False, check_exact=False ): """Check that left and right ExtensionArrays are equal. Parameters ---------- left, right : ExtensionArray The two arrays to compare check_dtype : bool, default True Whether to check if the ExtensionArray dtypes are identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. check_exact : bool, default False Whether to compare number exactly. Notes ----- Missing values are checked separately from valid values. A mask of missing values is computed for each and checked to match. The remaining all-valid values are cast to object dtype and checked. """ assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" if check_dtype: assert_attr_equal("dtype", left, right, obj="ExtensionArray") if hasattr(left, "asi8") and type(right) == type(left): # Avoid slow object-dtype comparisons assert_numpy_array_equal(left.asi8, right.asi8) return left_na = np.asarray(left.isna()) right_na = np.asarray(right.isna()) assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask") left_valid = np.asarray(left[~left_na].astype(object)) right_valid = np.asarray(right[~right_na].astype(object)) if check_exact: assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray") else: _testing.assert_almost_equal( left_valid, right_valid, check_dtype=check_dtype, check_less_precise=check_less_precise, obj="ExtensionArray", ) # This could be refactored to use the NDFrame.equals method def assert_series_equal( left, right, check_dtype=True, check_index_type="equiv", check_series_type=True, check_less_precise=False, check_names=True, check_exact=False, check_datetimelike_compat=False, check_categorical=True, obj="Series", ): """Check that left and right Series are equal. Parameters ---------- left : Series right : Series check_dtype : bool, default True Whether to check the Series dtype is identical. check_index_type : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. check_series_type : bool, default True Whether to check the Series class is identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. check_names : bool, default True Whether to check the Series and Index names attribute. check_exact : bool, default False Whether to compare number exactly. check_datetimelike_compat : bool, default False Compare datetime-like which is comparable ignoring dtype. check_categorical : bool, default True Whether to compare internal Categorical exactly. obj : str, default 'Series' Specify object name being compared, internally used to show appropriate assertion message. """ __tracebackhide__ = True # instance validation _check_isinstance(left, right, Series) if check_series_type: # ToDo: There are some tests using rhs is sparse # lhs is dense. Should use assert_class_equal in future assert isinstance(left, type(right)) # assert_class_equal(left, right, obj=obj) # length comparison if len(left) != len(right): msg1 = "{len}, {left}".format(len=len(left), left=left.index) msg2 = "{len}, {right}".format(len=len(right), right=right.index) raise_assert_detail(obj, "Series length are different", msg1, msg2) # index comparison assert_index_equal( left.index, right.index, exact=check_index_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.index".format(obj=obj), ) if check_dtype: # We want to skip exact dtype checking when `check_categorical` # is False. We'll still raise if only one is a `Categorical`, # regardless of `check_categorical` if ( is_categorical_dtype(left) and is_categorical_dtype(right) and not check_categorical ): pass else: assert_attr_equal("dtype", left, right) if check_exact: assert_numpy_array_equal( left._internal_get_values(), right._internal_get_values(), check_dtype=check_dtype, obj="{obj}".format(obj=obj), ) elif check_datetimelike_compat: # we want to check only if we have compat dtypes # e.g. integer and M|m are NOT compat, but we can simply check # the values in that case if needs_i8_conversion(left) or needs_i8_conversion(right): # datetimelike may have different objects (e.g. datetime.datetime # vs Timestamp) but will compare equal if not Index(left.values).equals(Index(right.values)): msg = ( "[datetimelike_compat=True] {left} is not equal to " "{right}." ).format(left=left.values, right=right.values) raise AssertionError(msg) else: assert_numpy_array_equal( left._internal_get_values(), right._internal_get_values(), check_dtype=check_dtype, ) elif is_interval_dtype(left) or is_interval_dtype(right): assert_interval_array_equal(left.array, right.array) elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype): # .values is an ndarray, but ._values is the ExtensionArray. # TODO: Use .array assert is_extension_array_dtype(right.dtype) assert_extension_array_equal(left._values, right._values) elif ( is_extension_array_dtype(left) and not is_categorical_dtype(left) and is_extension_array_dtype(right) and not is_categorical_dtype(right) ): assert_extension_array_equal(left.array, right.array) else: _testing.assert_almost_equal( left._internal_get_values(), right._internal_get_values(), check_less_precise=check_less_precise, check_dtype=check_dtype, obj="{obj}".format(obj=obj), ) # metadata comparison if check_names: assert_attr_equal("name", left, right, obj=obj) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal( left.values, right.values, obj="{obj} category".format(obj=obj) ) # This could be refactored to use the NDFrame.equals method def assert_frame_equal( left, right, check_dtype=True, check_index_type="equiv", check_column_type="equiv", check_frame_type=True, check_less_precise=False, check_names=True, by_blocks=False, check_exact=False, check_datetimelike_compat=False, check_categorical=True, check_like=False, obj="DataFrame", ): """ Check that left and right DataFrame are equal. This function is intended to compare two DataFrames and output any differences. Is is mostly intended for use in unit tests. Additional parameters allow varying the strictness of the equality checks performed. Parameters ---------- left : DataFrame First DataFrame to compare. right : DataFrame Second DataFrame to compare. check_dtype : bool, default True Whether to check the DataFrame dtype is identical. check_index_type : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. check_column_type : bool / string {'equiv'}, default 'equiv' Whether to check the columns class, dtype and inferred_type are identical. Is passed as the ``exact`` argument of :func:`assert_index_equal`. check_frame_type : bool, default True Whether to check the DataFrame class is identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. check_names : bool, default True Whether to check that the `names` attribute for both the `index` and `column` attributes of the DataFrame is identical, i.e. * left.index.names == right.index.names * left.columns.names == right.columns.names by_blocks : bool, default False Specify how to compare internal data. If False, compare by columns. If True, compare by blocks. check_exact : bool, default False Whether to compare number exactly. check_datetimelike_compat : bool, default False Compare datetime-like which is comparable ignoring dtype. check_categorical : bool, default True Whether to compare internal Categorical exactly. check_like : bool, default False If True, ignore the order of index & columns. Note: index labels must match their respective rows (same as in columns) - same labels must be with the same data. obj : str, default 'DataFrame' Specify object name being compared, internally used to show appropriate assertion message. See Also -------- assert_series_equal : Equivalent method for asserting Series equality. DataFrame.equals : Check DataFrame equality. Examples -------- This example shows comparing two DataFrames that are equal but with columns of differing dtypes. >>> from pandas.util.testing import assert_frame_equal >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) df1 equals itself. >>> assert_frame_equal(df1, df1) df1 differs from df2 as column 'b' is of a different type. >>> assert_frame_equal(df1, df2) Traceback (most recent call last): AssertionError: Attributes are different ... Attribute "dtype" are different [left]: int64 [right]: float64 Ignore differing dtypes in columns with check_dtype. >>> assert_frame_equal(df1, df2, check_dtype=False) """ __tracebackhide__ = True # instance validation _check_isinstance(left, right, DataFrame) if check_frame_type: # ToDo: There are some tests using rhs is SparseDataFrame # lhs is DataFrame. Should use assert_class_equal in future assert isinstance(left, type(right)) # assert_class_equal(left, right, obj=obj) # shape comparison if left.shape != right.shape: raise_assert_detail( obj, "{obj} shape mismatch".format(obj=obj), "{shape!r}".format(shape=left.shape), "{shape!r}".format(shape=right.shape), ) if check_like: left, right = left.reindex_like(right), right # index comparison assert_index_equal( left.index, right.index, exact=check_index_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.index".format(obj=obj), ) # column comparison assert_index_equal( left.columns, right.columns, exact=check_column_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.columns".format(obj=obj), ) # compare by blocks if by_blocks: rblocks = right._to_dict_of_blocks() lblocks = left._to_dict_of_blocks() for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): assert dtype in lblocks assert dtype in rblocks assert_frame_equal( lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj ) # compare by columns else: for i, col in enumerate(left.columns): assert col in right lcol = left.iloc[:, i] rcol = right.iloc[:, i] assert_series_equal( lcol, rcol, check_dtype=check_dtype, check_index_type=check_index_type, check_less_precise=check_less_precise, check_exact=check_exact, check_names=check_names, check_datetimelike_compat=check_datetimelike_compat, check_categorical=check_categorical, obj="{obj}.iloc[:, {idx}]".format(obj=obj, idx=i), ) def assert_equal(left, right, **kwargs): """ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. Parameters ---------- left : Index, Series, DataFrame, ExtensionArray, or np.ndarray right : Index, Series, DataFrame, ExtensionArray, or np.ndarray **kwargs """ __tracebackhide__ = True if isinstance(left, pd.Index): assert_index_equal(left, right, **kwargs) elif isinstance(left, pd.Series): assert_series_equal(left, right, **kwargs) elif isinstance(left, pd.DataFrame): assert_frame_equal(left, right, **kwargs) elif isinstance(left, IntervalArray): assert_interval_array_equal(left, right, **kwargs) elif isinstance(left, PeriodArray): assert_period_array_equal(left, right, **kwargs) elif isinstance(left, DatetimeArray): assert_datetime_array_equal(left, right, **kwargs) elif isinstance(left, TimedeltaArray): assert_timedelta_array_equal(left, right, **kwargs) elif isinstance(left, ExtensionArray): assert_extension_array_equal(left, right, **kwargs) elif isinstance(left, np.ndarray): assert_numpy_array_equal(left, right, **kwargs) else: raise NotImplementedError(type(left)) def box_expected(expected, box_cls, transpose=True): """ Helper function to wrap the expected output of a test in a given box_class. Parameters ---------- expected : np.ndarray, Index, Series box_cls : {Index, Series, DataFrame} Returns ------- subclass of box_cls """ if box_cls is pd.Index: expected = pd.Index(expected) elif box_cls is pd.Series: expected = pd.Series(expected) elif box_cls is pd.DataFrame: expected = pd.Series(expected).to_frame() if transpose: # for vector operations, we we need a DataFrame to be a single-row, # not a single-column, in order to operate against non-DataFrame # vectors of the same length. expected = expected.T elif box_cls is PeriodArray: # the PeriodArray constructor is not as flexible as period_array expected = period_array(expected) elif box_cls is DatetimeArray: expected = DatetimeArray(expected) elif box_cls is TimedeltaArray: expected = TimedeltaArray(expected) elif box_cls is np.ndarray: expected = np.array(expected) elif box_cls is to_array: expected = to_array(expected) else: raise NotImplementedError(box_cls) return expected def to_array(obj): # temporary implementation until we get pd.array in place if is_period_dtype(obj): return period_array(obj) elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj): return DatetimeArray._from_sequence(obj) elif is_timedelta64_dtype(obj): return TimedeltaArray._from_sequence(obj) else: return np.array(obj) # ----------------------------------------------------------------------------- # Sparse def assert_sp_array_equal( left, right, check_dtype=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, ): """Check that the left and right SparseArray are equal. Parameters ---------- left : SparseArray right : SparseArray check_dtype : bool, default True Whether to check the data dtype is identical. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. """ _check_isinstance(left, right, pd.SparseArray) assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype) # SparseIndex comparison assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex) assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex) if not check_kind: left_index = left.sp_index.to_block_index() right_index = right.sp_index.to_block_index() else: left_index = left.sp_index right_index = right.sp_index if consolidate_block_indices and left.kind == "block": # we'll probably remove this hack... left_index = left_index.to_int_index().to_block_index() right_index = right_index.to_int_index().to_block_index() if not left_index.equals(right_index): raise_assert_detail( "SparseArray.index", "index are not equal", left_index, right_index ) else: # Just ensure a pass if check_fill_value: assert_attr_equal("fill_value", left, right) if check_dtype: assert_attr_equal("dtype", left, right) assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype) def assert_sp_series_equal( left, right, check_dtype=True, exact_indices=True, check_series_type=True, check_names=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, obj="SparseSeries", ): """Check that the left and right SparseSeries are equal. Parameters ---------- left : SparseSeries right : SparseSeries check_dtype : bool, default True Whether to check the Series dtype is identical. exact_indices : bool, default True check_series_type : bool, default True Whether to check the SparseSeries class is identical. check_names : bool, default True Whether to check the SparseSeries name attribute. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. obj : str, default 'SparseSeries' Specify the object name being compared, internally used to show the appropriate assertion message. """ _check_isinstance(left, right, pd.SparseSeries) if check_series_type: assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj)) assert_sp_array_equal( left.values, right.values, check_kind=check_kind, check_fill_value=check_fill_value, consolidate_block_indices=consolidate_block_indices, ) if check_names: assert_attr_equal("name", left, right) if check_dtype: assert_attr_equal("dtype", left, right) assert_numpy_array_equal(np.asarray(left.values), np.asarray(right.values)) def assert_sp_frame_equal( left, right, check_dtype=True, exact_indices=True, check_frame_type=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, obj="SparseDataFrame", ): """Check that the left and right SparseDataFrame are equal. Parameters ---------- left : SparseDataFrame right : SparseDataFrame check_dtype : bool, default True Whether to check the Series dtype is identical. exact_indices : bool, default True SparseSeries SparseIndex objects must be exactly the same, otherwise just compare dense representations. check_frame_type : bool, default True Whether to check the SparseDataFrame class is identical. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. obj : str, default 'SparseDataFrame' Specify the object name being compared, internally used to show the appropriate assertion message. """ _check_isinstance(left, right, pd.SparseDataFrame) if check_frame_type: assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj)) assert_index_equal(left.columns, right.columns, obj="{obj}.columns".format(obj=obj)) if check_fill_value: assert_attr_equal("default_fill_value", left, right, obj=obj) for col, series in left.items(): assert col in right # trade-off? if exact_indices: assert_sp_series_equal( series, right[col], check_dtype=check_dtype, check_kind=check_kind, check_fill_value=check_fill_value, consolidate_block_indices=consolidate_block_indices, ) else: assert_series_equal( series.to_dense(), right[col].to_dense(), check_dtype=check_dtype ) # do I care? # assert(left.default_kind == right.default_kind) for col in right: assert col in left # ----------------------------------------------------------------------------- # Others def assert_contains_all(iterable, dic): for k in iterable: assert k in dic, "Did not contain item: '{key!r}'".format(key=k) def assert_copy(iter1, iter2, **eql_kwargs): """ iter1, iter2: iterables that produce elements comparable with assert_almost_equal Checks that the elements are equal, but not the same object. (Does not check that items in sequences are also not the same object) """ for elem1, elem2 in zip(iter1, iter2): assert_almost_equal(elem1, elem2, **eql_kwargs) msg = ( "Expected object {obj1!r} and object {obj2!r} to be " "different objects, but they were the same object." ).format(obj1=type(elem1), obj2=type(elem2)) assert elem1 is not elem2, msg def getCols(k): return string.ascii_uppercase[:k] # make index def makeStringIndex(k=10, name=None): return Index(rands_array(nchars=10, size=k), name=name) def makeUnicodeIndex(k=10, name=None): return Index(randu_array(nchars=10, size=k), name=name) def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): """ make a length k index or n categories """ x = rands_array(nchars=4, size=n) return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs) def makeIntervalIndex(k=10, name=None, **kwargs): """ make a length k IntervalIndex """ x = np.linspace(0, 100, num=(k + 1)) return IntervalIndex.from_breaks(x, name=name, **kwargs) def makeBoolIndex(k=10, name=None): if k == 1: return Index([True], name=name) elif k == 2: return Index([False, True], name=name) return Index([False, True] + [False] * (k - 2), name=name) def makeIntIndex(k=10, name=None): return Index(list(range(k)), name=name) def makeUIntIndex(k=10, name=None): return Index([2 ** 63 + i for i in range(k)], name=name) def makeRangeIndex(k=10, name=None, **kwargs): return RangeIndex(0, k, 1, name=name, **kwargs) def makeFloatIndex(k=10, name=None): values = sorted(np.random.random_sample(k)) - np.random.random_sample(1) return Index(values * (10 ** np.random.randint(0, 9)), name=name) def makeDateIndex(k=10, freq="B", name=None, **kwargs): dt = datetime(2000, 1, 1) dr = bdate_range(dt, periods=k, freq=freq, name=name) return DatetimeIndex(dr, name=name, **kwargs) def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs): return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs) def makePeriodIndex(k=10, name=None, **kwargs): dt = datetime(2000, 1, 1) dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs) return dr def makeMultiIndex(k=10, names=None, **kwargs): return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs) def all_index_generator(k=10): """Generator which can be iterated over to get instances of all the various index classes. Parameters ---------- k: length of each of the index instances """ all_make_index_funcs = [ makeIntIndex, makeFloatIndex, makeStringIndex, makeUnicodeIndex, makeDateIndex, makePeriodIndex, makeTimedeltaIndex, makeBoolIndex, makeRangeIndex, makeIntervalIndex, makeCategoricalIndex, ] for make_index_func in all_make_index_funcs: yield make_index_func(k=k) def index_subclass_makers_generator(): make_index_funcs = [ makeDateIndex, makePeriodIndex, makeTimedeltaIndex, makeRangeIndex, makeIntervalIndex, makeCategoricalIndex, makeMultiIndex, ] for make_index_func in make_index_funcs: yield make_index_func def all_timeseries_index_generator(k=10): """Generator which can be iterated over to get instances of all the classes which represent time-series. Parameters ---------- k: length of each of the index instances """ make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex] for make_index_func in make_index_funcs: yield make_index_func(k=k) # make series def makeFloatSeries(name=None): index = makeStringIndex(N) return Series(randn(N), index=index, name=name) def makeStringSeries(name=None): index = makeStringIndex(N) return Series(randn(N), index=index, name=name) def makeObjectSeries(name=None): dateIndex = makeDateIndex(N) dateIndex = Index(dateIndex, dtype=object) index = makeStringIndex(N) return Series(dateIndex, index=index, name=name) def getSeriesData(): index = makeStringIndex(N) return {c: Series(randn(N), index=index) for c in getCols(K)} def makeTimeSeries(nper=None, freq="B", name=None): if nper is None: nper = N return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name) def makePeriodSeries(nper=None, name=None): if nper is None: nper = N return Series(randn(nper), index=makePeriodIndex(nper), name=name) def getTimeSeriesData(nper=None, freq="B"): return {c: makeTimeSeries(nper, freq) for c in getCols(K)} def getPeriodData(nper=None): return {c: makePeriodSeries(nper) for c in getCols(K)} # make frame def makeTimeDataFrame(nper=None, freq="B"): data = getTimeSeriesData(nper, freq) return DataFrame(data) def makeDataFrame(): data = getSeriesData() return DataFrame(data) def getMixedTypeDict(): index = Index(["a", "b", "c", "d", "e"]) data = { "A": [0.0, 1.0, 2.0, 3.0, 4.0], "B": [0.0, 1.0, 0.0, 1.0, 0.0], "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], "D": bdate_range("1/1/2009", periods=5), } return index, data def makeMixedDataFrame(): return DataFrame(getMixedTypeDict()[1]) def makePeriodFrame(nper=None): data = getPeriodData(nper) return DataFrame(data) def makeCustomIndex( nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None ): """Create an index/multindex with given dimensions, levels, names, etc' nentries - number of entries in index nlevels - number of levels (> 1 produces multindex) prefix - a string prefix for labels names - (Optional), bool or list of strings. if True will use default names, if false will use no names, if a list is given, the name of each level in the index will be taken from the list. ndupe_l - (Optional), list of ints, the number of rows for which the label will repeated at the corresponding level, you can specify just the first few, the rest will use the default ndupe_l of 1. len(ndupe_l) <= nlevels. idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td". If idx_type is not None, `idx_nlevels` must be 1. "i"/"f" creates an integer/float index, "s"/"u" creates a string/unicode index "dt" create a datetime index. "td" create a datetime index. if unspecified, string labels will be generated. """ if ndupe_l is None: ndupe_l = [1] * nlevels assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels assert names is None or names is False or names is True or len(names) is nlevels assert idx_type is None or ( idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1 ) if names is True: # build default names names = [prefix + str(i) for i in range(nlevels)] if names is False: # pass None to index constructor for no name names = None # make singleton case uniform if isinstance(names, str) and nlevels == 1: names = [names] # specific 1D index type requested? idx_func = dict( i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex, u=makeUnicodeIndex, dt=makeDateIndex, td=makeTimedeltaIndex, p=makePeriodIndex, ).get(idx_type) if idx_func: idx = idx_func(nentries) # but we need to fill in the name if names: idx.name = names[0] return idx elif idx_type is not None: raise ValueError( '"{idx_type}" is not a legal value for `idx_type`, ' 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'.format(idx_type=idx_type) ) if len(ndupe_l) < nlevels: ndupe_l.extend([1] * (nlevels - len(ndupe_l))) assert len(ndupe_l) == nlevels assert all(x > 0 for x in ndupe_l) tuples = [] for i in range(nlevels): def keyfunc(x): import re numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_") return [int(num) for num in numeric_tuple] # build a list of lists to create the index from div_factor = nentries // ndupe_l[i] + 1 cnt = Counter() for j in range(div_factor): label = "{prefix}_l{i}_g{j}".format(prefix=prefix, i=i, j=j) cnt[label] = ndupe_l[i] # cute Counter trick result = list(sorted(cnt.elements(), key=keyfunc))[:nentries] tuples.append(result) tuples = list(zip(*tuples)) # convert tuples to index if nentries == 1: # we have a single level of tuples, i.e. a regular Index index = Index(tuples[0], name=names[0]) elif nlevels == 1: name = None if names is None else names[0] index = Index((x[0] for x in tuples), name=name) else: index = MultiIndex.from_tuples(tuples, names=names) return index def makeCustomDataframe( nrows, ncols, c_idx_names=True, r_idx_names=True, c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, c_ndupe_l=None, r_ndupe_l=None, dtype=None, c_idx_type=None, r_idx_type=None, ): """ nrows, ncols - number of data rows/cols c_idx_names, idx_names - False/True/list of strings, yields No names , default names or uses the provided names for the levels of the corresponding index. You can provide a single string when c_idx_nlevels ==1. c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex data_gen_f - a function f(row,col) which return the data value at that position, the default generator used yields values of the form "RxCy" based on position. c_ndupe_l, r_ndupe_l - list of integers, determines the number of duplicates for each label at a given level of the corresponding index. The default `None` value produces a multiplicity of 1 across all levels, i.e. a unique index. Will accept a partial list of length N < idx_nlevels, for just the first N levels. If ndupe doesn't divide nrows/ncol, the last label might have lower multiplicity. dtype - passed to the DataFrame constructor as is, in case you wish to have more control in conjunction with a custom `data_gen_f` r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td". If idx_type is not None, `idx_nlevels` must be 1. "i"/"f" creates an integer/float index, "s"/"u" creates a string/unicode index "dt" create a datetime index. "td" create a timedelta index. if unspecified, string labels will be generated. Examples: # 5 row, 3 columns, default names on both, single index on both axis >> makeCustomDataframe(5,3) # make the data a random int between 1 and 100 >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100)) # 2-level multiindex on rows with each label duplicated # twice on first level, default names on both axis, single # index on both axis >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2]) # DatetimeIndex on row, index with unicode labels on columns # no names on either axis >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False, r_idx_type="dt",c_idx_type="u") # 4-level multindex on rows with names provided, 2-level multindex # on columns with default labels and default names. >> a=makeCustomDataframe(5,3,r_idx_nlevels=4, r_idx_names=["FEE","FI","FO","FAM"], c_idx_nlevels=2) >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) """ assert c_idx_nlevels > 0 assert r_idx_nlevels > 0 assert r_idx_type is None or ( r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1 ) assert c_idx_type is None or ( c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1 ) columns = makeCustomIndex( ncols, nlevels=c_idx_nlevels, prefix="C", names=c_idx_names, ndupe_l=c_ndupe_l, idx_type=c_idx_type, ) index = makeCustomIndex( nrows, nlevels=r_idx_nlevels, prefix="R", names=r_idx_names, ndupe_l=r_ndupe_l, idx_type=r_idx_type, ) # by default, generate data based on location if data_gen_f is None: data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c) data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] return DataFrame(data, index, columns, dtype=dtype) def _create_missing_idx(nrows, ncols, density, random_state=None): if random_state is None: random_state = np.random else: random_state = np.random.RandomState(random_state) # below is cribbed from scipy.sparse size = int(np.round((1 - density) * nrows * ncols)) # generate a few more to ensure unique values min_rows = 5 fac = 1.02 extra_size = min(size + min_rows, fac * size) def _gen_unique_rand(rng, _extra_size): ind = rng.rand(int(_extra_size)) return np.unique(np.floor(ind * nrows * ncols))[:size] ind = _gen_unique_rand(random_state, extra_size) while ind.size < size: extra_size *= 1.05 ind = _gen_unique_rand(random_state, extra_size) j = np.floor(ind * 1.0 / nrows).astype(int) i = (ind - j * nrows).astype(int) return i.tolist(), j.tolist() def makeMissingCustomDataframe( nrows, ncols, density=0.9, random_state=None, c_idx_names=True, r_idx_names=True, c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, c_ndupe_l=None, r_ndupe_l=None, dtype=None, c_idx_type=None, r_idx_type=None, ): """ Parameters ---------- Density : float, optional Float in (0, 1) that gives the percentage of non-missing numbers in the DataFrame. random_state : {np.random.RandomState, int}, optional Random number generator or random seed. See makeCustomDataframe for descriptions of the rest of the parameters. """ df = makeCustomDataframe( nrows, ncols, c_idx_names=c_idx_names, r_idx_names=r_idx_names, c_idx_nlevels=c_idx_nlevels, r_idx_nlevels=r_idx_nlevels, data_gen_f=data_gen_f, c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l, dtype=dtype, c_idx_type=c_idx_type, r_idx_type=r_idx_type, ) i, j = _create_missing_idx(nrows, ncols, density, random_state) df.values[i, j] = np.nan return df def makeMissingDataframe(density=0.9, random_state=None): df = makeDataFrame() i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state) df.values[i, j] = np.nan return df class TestSubDict(dict): def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, *args, **kwargs)""" @wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and callable(args[0]) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # skip tests on exceptions with this message _network_error_messages = ( # 'urlopen error timed out', # 'timeout: timed out', # 'socket.timeout: timed out', "timed out", "Server Hangup", "HTTP Error 503: Service Unavailable", "502: Proxy Error", "HTTP Error 502: internal error", "HTTP Error 502", "HTTP Error 503", "HTTP Error 403", "HTTP Error 400", "Temporary failure in name resolution", "Name or service not known", "Connection refused", "certificate verify", ) # or this e.errno/e.reason.errno _network_errno_vals = ( 101, # Network is unreachable 111, # Connection refused 110, # Connection timed out 104, # Connection reset Error 54, # Connection reset by peer 60, # urllib.error.URLError: [Errno 60] Connection timed out ) # Both of the above shouldn't mask real issues such as 404's # or refused connections (changed DNS). # But some tests (test_data yahoo) contact incredibly flakey # servers. # and conditionally raise on these exception types _network_error_classes = (IOError, http.client.HTTPException, TimeoutError) def can_connect(url, error_classes=_network_error_classes): """Try to connect to the given url. True if succeeds, False if IOError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no IOError (unable to connect) or URLError (bad url) was raised """ try: with urlopen(url): pass except error_classes: return False else: return True @optional_args def network( t, url="http://www.google.com", raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False, error_classes=_network_error_classes, skip_errnos=_network_errno_vals, _skip_on_messages=_network_error_messages, ): """ Label a test as requiring network connection and, if an error is encountered, only raise if it does not find a network connection. In comparison to ``network``, this assumes an added contract to your test: you must assert that, under normal conditions, your test will ONLY fail if it does not have network connectivity. You can call this in 3 ways: as a standard decorator, with keyword arguments, or with a positional argument that is the url to check. Parameters ---------- t : callable The test requiring network connectivity. url : path The url to test via ``pandas.io.common.urlopen`` to check for connectivity. Defaults to 'http://www.google.com'. raise_on_error : bool If True, never catches errors. check_before_test : bool If True, checks connectivity before running the test case. error_classes : tuple or Exception error classes to ignore. If not in ``error_classes``, raises the error. defaults to IOError. Be careful about changing the error classes here. skip_errnos : iterable of int Any exception that has .errno or .reason.erno set to one of these values will be skipped with an appropriate message. _skip_on_messages: iterable of string any exception e for which one of the strings is a substring of str(e) will be skipped with an appropriate message. Intended to suppress errors where an errno isn't available. Notes ----- * ``raise_on_error`` supercedes ``check_before_test`` Returns ------- t : callable The decorated test ``t``, with checks for connectivity errors. Example ------- Tests decorated with @network will fail if it's possible to make a network connection to another URL (defaults to google.com):: >>> from pandas.util.testing import network >>> from pandas.io.common import urlopen >>> @network ... def test_network(): ... with urlopen("rabbit://bonanza.com"): ... pass Traceback ... URLError: <urlopen error unknown url type: rabit> You can specify alternative URLs:: >>> @network("http://www.yahoo.com") ... def test_something_with_yahoo(): ... raise IOError("Failure Message") >>> test_something_with_yahoo() Traceback (most recent call last): ... IOError: Failure Message If you set check_before_test, it will check the url first and not run the test on failure:: >>> @network("failing://url.blaher", check_before_test=True) ... def test_something(): ... print("I ran!") ... raise ValueError("Failure") >>> test_something() Traceback (most recent call last): ... Errors not related to networking will always be raised. """ from pytest import skip t.network = True @wraps(t) def wrapper(*args, **kwargs): if check_before_test and not raise_on_error: if not can_connect(url, error_classes): skip() try: return t(*args, **kwargs) except Exception as e: errno = getattr(e, "errno", None) if not errno and hasattr(errno, "reason"): errno = getattr(e.reason, "errno", None) if errno in skip_errnos: skip( "Skipping test due to known errno" " and error {error}".format(error=e) ) try: e_str = traceback.format_exc(e) except Exception: e_str = str(e) if any(m.lower() in e_str.lower() for m in _skip_on_messages): skip( "Skipping test because exception " "message is known and error {error}".format(error=e) ) if not isinstance(e, error_classes): raise if raise_on_error or can_connect(url, error_classes): raise else: skip( "Skipping test due to lack of connectivity" " and error {error}".format(error=e) ) return wrapper with_connectivity_check = network def assert_raises_regex(_exception, _regexp, _callable=None, *args, **kwargs): r""" Check that the specified Exception is raised and that the error message matches a given regular expression pattern. This may be a regular expression object or a string containing a regular expression suitable for use by `re.search()`. This is a port of the `assertRaisesRegexp` function from unittest in Python 2.7. .. deprecated:: 0.24.0 Use `pytest.raises` instead. Examples -------- >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ') >>> import re >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ') If an exception of a different type is raised, it bubbles up. >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ') Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'XYZ' >>> dct = dict() >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple') Traceback (most recent call last): ... AssertionError: "pear" does not match "'apple'" You can also use this in a with statement. >>> with assert_raises_regex(TypeError, r'unsupported operand type\(s\)'): ... 1 + {} >>> with assert_raises_regex(TypeError, 'banana'): ... 'apple'[0] = 'b' Traceback (most recent call last): ... AssertionError: "banana" does not match "'str' object does not support \ item assignment" """ warnings.warn( ( "assert_raises_regex has been deprecated and will " "be removed in the next release. Please use " "`pytest.raises` instead." ), FutureWarning, stacklevel=2, ) manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp) if _callable is not None: with manager: _callable(*args, **kwargs) else: return manager class _AssertRaisesContextmanager: """ Context manager behind `assert_raises_regex`. """ def __init__(self, exception, regexp=None): """ Initialize an _AssertRaisesContextManager instance. Parameters ---------- exception : class The expected Exception class. regexp : str, default None The regex to compare against the Exception message. """ self.exception = exception if regexp is not None and not hasattr(regexp, "search"): regexp = re.compile(regexp, re.DOTALL) self.regexp = regexp def __enter__(self): return self def __exit__(self, exc_type, exc_value, trace_back): expected = self.exception if not exc_type: exp_name = getattr(expected, "__name__", str(expected)) raise AssertionError("{name} not raised.".format(name=exp_name)) return self.exception_matches(exc_type, exc_value, trace_back) def exception_matches(self, exc_type, exc_value, trace_back): """ Check that the Exception raised matches the expected Exception and expected error message regular expression. Parameters ---------- exc_type : class The type of Exception raised. exc_value : Exception The instance of `exc_type` raised. trace_back : stack trace object The traceback object associated with `exc_value`. Returns ------- is_matched : bool Whether or not the Exception raised matches the expected Exception class and expected error message regular expression. Raises ------ AssertionError : The error message provided does not match the expected error message regular expression. """ if issubclass(exc_type, self.exception): if self.regexp is not None: val = str(exc_value) if not self.regexp.search(val): msg = '"{pat}" does not match "{val}"'.format( pat=self.regexp.pattern, val=val ) e = AssertionError(msg) raise_with_traceback(e, trace_back) return True else: # Failed, so allow Exception to bubble up. return False @contextmanager def assert_produces_warning( expected_warning=Warning, filter_level="always", clear=None, check_stacklevel=True, raise_on_extra_warnings=True, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated clear : str, default None If not ``None`` then remove any previously raised warnings from the ``__warningsregistry__`` to ensure that no warning messages are suppressed by this context manager. If ``None`` is specified, the ``__warningsregistry__`` keeps track of which warnings have been shown, and does not show them again. check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: if clear is not None: # make sure that we are clearing these warnings # if they have happened before # to guarantee that we will catch them if not is_list_like(clear): clear = [clear] for m in clear: try: m.__warningregistry__.clear() except Exception: pass saw_warning = False warnings.simplefilter(filter_level) yield w extra_warnings = [] for actual_warning in w: if expected_warning and issubclass( actual_warning.category, expected_warning ): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): from inspect import getframeinfo, stack caller = getframeinfo(stack()[2][0]) msg = ( "Warning not set with correct stacklevel. " "File where warning is raised: {actual} != " "{caller}. Warning message: {message}" ).format( actual=actual_warning.filename, caller=caller.filename, message=actual_warning.message, ) assert actual_warning.filename == caller.filename, msg else: extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if expected_warning: msg = "Did not see expected warning of class {name!r}.".format( name=expected_warning.__name__ ) assert saw_warning, msg if raise_on_extra_warnings and extra_warnings: raise AssertionError( "Caused unexpected warning(s): {!r}.".format(extra_warnings) ) class RNGContext: """ Context manager to set the numpy random number generator speed. Returns to the original value upon exiting the context manager. Parameters ---------- seed : int Seed for numpy.random.seed Examples -------- with RNGContext(42): np.random.randn() """ def __init__(self, seed): self.seed = seed def __enter__(self): self.start_state = np.random.get_state() np.random.seed(self.seed) def __exit__(self, exc_type, exc_value, traceback): np.random.set_state(self.start_state) @contextmanager def with_csv_dialect(name, **kwargs): """ Context manager to temporarily register a CSV dialect for parsing CSV. Parameters ---------- name : str The name of the dialect. kwargs : mapping The parameters for the dialect. Raises ------ ValueError : the name of the dialect conflicts with a builtin one. See Also -------- csv : Python's CSV library. """ import csv _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} if name in _BUILTIN_DIALECTS: raise ValueError("Cannot override builtin dialect.") csv.register_dialect(name, **kwargs) yield csv.unregister_dialect(name) @contextmanager def use_numexpr(use, min_elements=None): from pandas.core.computation import expressions as expr if min_elements is None: min_elements = expr._MIN_ELEMENTS olduse = expr._USE_NUMEXPR oldmin = expr._MIN_ELEMENTS expr.set_use_numexpr(use) expr._MIN_ELEMENTS = min_elements yield expr._MIN_ELEMENTS = oldmin expr.set_use_numexpr(olduse) def test_parallel(num_threads=2, kwargs_list=None): """Decorator to run the same function multiple times in parallel. Parameters ---------- num_threads : int, optional The number of times the function is run in parallel. kwargs_list : list of dicts, optional The list of kwargs to update original function kwargs on different threads. Notes ----- This decorator does not pass the return value of the decorated function. Original from scikit-image: https://github.com/scikit-image/scikit-image/pull/1519 """ assert num_threads > 0 has_kwargs_list = kwargs_list is not None if has_kwargs_list: assert len(kwargs_list) == num_threads import threading def wrapper(func): @wraps(func) def inner(*args, **kwargs): if has_kwargs_list: update_kwargs = lambda i: dict(kwargs, **kwargs_list[i]) else: update_kwargs = lambda i: kwargs threads = [] for i in range(num_threads): updated_kwargs = update_kwargs(i) thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs) threads.append(thread) for thread in threads: thread.start() for thread in threads: thread.join() return inner return wrapper class SubclassedSeries(Series): _metadata = ["testattr", "name"] @property def _constructor(self): return SubclassedSeries @property def _constructor_expanddim(self): return SubclassedDataFrame class SubclassedDataFrame(DataFrame): _metadata = ["testattr"] @property def _constructor(self): return SubclassedDataFrame @property def _constructor_sliced(self): return SubclassedSeries class SubclassedSparseSeries(pd.SparseSeries): _metadata = ["testattr"] @property def _constructor(self): return SubclassedSparseSeries @property def _constructor_expanddim(self): return SubclassedSparseDataFrame class SubclassedSparseDataFrame(pd.SparseDataFrame): _metadata = ["testattr"] @property def _constructor(self): return SubclassedSparseDataFrame @property def _constructor_sliced(self): return SubclassedSparseSeries class SubclassedCategorical(Categorical): @property def _constructor(self): return SubclassedCategorical @contextmanager def set_timezone(tz): """Context manager for temporarily setting a timezone. Parameters ---------- tz : str A string representing a valid timezone. Examples -------- >>> from datetime import datetime >>> from dateutil.tz import tzlocal >>> tzlocal().tzname(datetime.now()) 'IST' >>> with set_timezone('US/Eastern'): ... tzlocal().tzname(datetime.now()) ... 'EDT' """ import os import time def setTZ(tz): if tz is None: try: del os.environ["TZ"] except KeyError: pass else: os.environ["TZ"] = tz time.tzset() orig_tz = os.environ.get("TZ") setTZ(tz) try: yield finally: setTZ(orig_tz) def _make_skipna_wrapper(alternative, skipna_alternative=None): """Create a function for calling on an array. Parameters ---------- alternative : function The function to be called on the array with no NaNs. Only used when 'skipna_alternative' is None. skipna_alternative : function The function to be called on the original array Returns ------- skipna_wrapper : function """ if skipna_alternative: def skipna_wrapper(x): return skipna_alternative(x.values) else: def skipna_wrapper(x): nona = x.dropna() if len(nona) == 0: return np.nan return alternative(nona) return skipna_wrapper def convert_rows_list_to_csv_str(rows_list): """ Convert list of CSV rows to single CSV-formatted string for current OS. This method is used for creating expected value of to_csv() method. Parameters ---------- rows_list : list The list of string. Each element represents the row of csv. Returns ------- expected : string Expected output of to_csv() in current OS """ sep = os.linesep expected = sep.join(rows_list) + sep return expected
kushalbhola/MyStuff
Practice/PythonApplication/env/Lib/site-packages/pandas/util/testing.py
Python
apache-2.0
92,720
"""Template helper methods for rendering strings with Home Assistant data.""" from datetime import datetime import json import logging import re import jinja2 from jinja2.sandbox import ImmutableSandboxedEnvironment from homeassistant.const import ( STATE_UNKNOWN, ATTR_LATITUDE, ATTR_LONGITUDE, MATCH_ALL) from homeassistant.core import State from homeassistant.exceptions import TemplateError from homeassistant.helpers import location as loc_helper from homeassistant.loader import get_component from homeassistant.util import convert, dt as dt_util, location as loc_util from homeassistant.util.async import run_callback_threadsafe _LOGGER = logging.getLogger(__name__) _SENTINEL = object() DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S" _RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M) _RE_GET_ENTITIES = re.compile( r"(?:(?:states\.|(?:is_state|is_state_attr|states)\(.)([\w]+\.[\w]+))", re.I | re.M ) def attach(hass, obj): """Recursively attach hass to all template instances in list and dict.""" if isinstance(obj, list): for child in obj: attach(hass, child) elif isinstance(obj, dict): for child in obj.values(): attach(hass, child) elif isinstance(obj, Template): obj.hass = hass def extract_entities(template): """Extract all entities for state_changed listener from template string.""" if template is None or _RE_NONE_ENTITIES.search(template): return MATCH_ALL extraction = _RE_GET_ENTITIES.findall(template) if len(extraction) > 0: return list(set(extraction)) return MATCH_ALL class Template(object): """Class to hold a template and manage caching and rendering.""" def __init__(self, template, hass=None): """Instantiate a Template.""" if not isinstance(template, str): raise TypeError('Expected template to be a string') self.template = template self._compiled_code = None self._compiled = None self.hass = hass def ensure_valid(self): """Return if template is valid.""" if self._compiled_code is not None: return try: self._compiled_code = ENV.compile(self.template) except jinja2.exceptions.TemplateSyntaxError as err: raise TemplateError(err) def extract_entities(self): """Extract all entities for state_changed listener.""" return extract_entities(self.template) def render(self, variables=None, **kwargs): """Render given template.""" if variables is not None: kwargs.update(variables) return run_callback_threadsafe( self.hass.loop, self.async_render, kwargs).result() def async_render(self, variables=None, **kwargs): """Render given template. This method must be run in the event loop. """ self._ensure_compiled() if variables is not None: kwargs.update(variables) try: return self._compiled.render(kwargs).strip() except jinja2.TemplateError as err: raise TemplateError(err) def render_with_possible_json_value(self, value, error_value=_SENTINEL): """Render template with value exposed. If valid JSON will expose value_json too. """ return run_callback_threadsafe( self.hass.loop, self.async_render_with_possible_json_value, value, error_value).result() # pylint: disable=invalid-name def async_render_with_possible_json_value(self, value, error_value=_SENTINEL): """Render template with value exposed. If valid JSON will expose value_json too. This method must be run in the event loop. """ self._ensure_compiled() variables = { 'value': value } try: variables['value_json'] = json.loads(value) except ValueError: pass try: return self._compiled.render(variables).strip() except jinja2.TemplateError as ex: _LOGGER.error("Error parsing value: %s (value: %s, template: %s)", ex, value, self.template) return value if error_value is _SENTINEL else error_value def _ensure_compiled(self): """Bind a template to a specific hass instance.""" if self._compiled is not None: return self.ensure_valid() assert self.hass is not None, 'hass variable not set on template' location_methods = LocationMethods(self.hass) global_vars = ENV.make_globals({ 'closest': location_methods.closest, 'distance': location_methods.distance, 'is_state': self.hass.states.is_state, 'is_state_attr': self.hass.states.is_state_attr, 'states': AllStates(self.hass), }) self._compiled = jinja2.Template.from_code( ENV, self._compiled_code, global_vars, None) return self._compiled def __eq__(self, other): """Compare template with another.""" return (self.__class__ == other.__class__ and self.template == other.template and self.hass == other.hass) class AllStates(object): """Class to expose all HA states as attributes.""" def __init__(self, hass): """Initialize all states.""" self._hass = hass def __getattr__(self, name): """Return the domain state.""" return DomainStates(self._hass, name) def __iter__(self): """Return all states.""" return iter(sorted(self._hass.states.async_all(), key=lambda state: state.entity_id)) def __call__(self, entity_id): """Return the states.""" state = self._hass.states.get(entity_id) return STATE_UNKNOWN if state is None else state.state class DomainStates(object): """Class to expose a specific HA domain as attributes.""" def __init__(self, hass, domain): """Initialize the domain states.""" self._hass = hass self._domain = domain def __getattr__(self, name): """Return the states.""" return self._hass.states.get('{}.{}'.format(self._domain, name)) def __iter__(self): """Return the iteration over all the states.""" return iter(sorted( (state for state in self._hass.states.async_all() if state.domain == self._domain), key=lambda state: state.entity_id)) class LocationMethods(object): """Class to expose distance helpers to templates.""" def __init__(self, hass): """Initialize the distance helpers.""" self._hass = hass def closest(self, *args): """Find closest entity. Closest to home: closest(states) closest(states.device_tracker) closest('group.children') closest(states.group.children) Closest to a point: closest(23.456, 23.456, 'group.children') closest('zone.school', 'group.children') closest(states.zone.school, 'group.children') """ if len(args) == 1: latitude = self._hass.config.latitude longitude = self._hass.config.longitude entities = args[0] elif len(args) == 2: point_state = self._resolve_state(args[0]) if point_state is None: _LOGGER.warning("Closest:Unable to find state %s", args[0]) return None elif not loc_helper.has_location(point_state): _LOGGER.warning( "Closest:State does not contain valid location: %s", point_state) return None latitude = point_state.attributes.get(ATTR_LATITUDE) longitude = point_state.attributes.get(ATTR_LONGITUDE) entities = args[1] else: latitude = convert(args[0], float) longitude = convert(args[1], float) if latitude is None or longitude is None: _LOGGER.warning( "Closest:Received invalid coordinates: %s, %s", args[0], args[1]) return None entities = args[2] if isinstance(entities, (AllStates, DomainStates)): states = list(entities) else: if isinstance(entities, State): gr_entity_id = entities.entity_id else: gr_entity_id = str(entities) group = get_component('group') states = [self._hass.states.get(entity_id) for entity_id in group.expand_entity_ids(self._hass, [gr_entity_id])] return loc_helper.closest(latitude, longitude, states) def distance(self, *args): """Calculate distance. Will calculate distance from home to a point or between points. Points can be passed in using state objects or lat/lng coordinates. """ locations = [] to_process = list(args) while to_process: value = to_process.pop(0) if isinstance(value, State): latitude = value.attributes.get(ATTR_LATITUDE) longitude = value.attributes.get(ATTR_LONGITUDE) if latitude is None or longitude is None: _LOGGER.warning( "Distance:State does not contains a location: %s", value) return None else: # We expect this and next value to be lat&lng if not to_process: _LOGGER.warning( "Distance:Expected latitude and longitude, got %s", value) return None value_2 = to_process.pop(0) latitude = convert(value, float) longitude = convert(value_2, float) if latitude is None or longitude is None: _LOGGER.warning("Distance:Unable to process latitude and " "longitude: %s, %s", value, value_2) return None locations.append((latitude, longitude)) if len(locations) == 1: return self._hass.config.distance(*locations[0]) return self._hass.config.units.length( loc_util.distance(*locations[0] + locations[1]), 'm') def _resolve_state(self, entity_id_or_state): """Return state or entity_id if given.""" if isinstance(entity_id_or_state, State): return entity_id_or_state elif isinstance(entity_id_or_state, str): return self._hass.states.get(entity_id_or_state) return None def forgiving_round(value, precision=0): """Rounding filter that accepts strings.""" try: value = round(float(value), precision) return int(value) if precision == 0 else value except (ValueError, TypeError): # If value can't be converted to float return value def multiply(value, amount): """Filter to convert value to float and multiply it.""" try: return float(value) * amount except (ValueError, TypeError): # If value can't be converted to float return value def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True): """Filter to convert given timestamp to format.""" try: date = dt_util.utc_from_timestamp(value) if local: date = dt_util.as_local(date) return date.strftime(date_format) except (ValueError, TypeError): # If timestamp can't be converted return value def timestamp_local(value): """Filter to convert given timestamp to local date/time.""" try: return dt_util.as_local( dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): # If timestamp can't be converted return value def timestamp_utc(value): """Filter to convert given timestamp to UTC date/time.""" try: return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): # If timestamp can't be converted return value def forgiving_as_timestamp(value): """Try to convert value to timestamp.""" try: return dt_util.as_timestamp(value) except (ValueError, TypeError): return None def strptime(string, fmt): """Parse a time string to datetime.""" try: return datetime.strptime(string, fmt) except (ValueError, AttributeError): return string def fail_when_undefined(value): """Filter to force a failure when the value is undefined.""" if isinstance(value, jinja2.Undefined): value() return value def forgiving_float(value): """Try to convert value to a float.""" try: return float(value) except (ValueError, TypeError): return value class TemplateEnvironment(ImmutableSandboxedEnvironment): """The Home Assistant template environment.""" def is_safe_callable(self, obj): """Test if callback is safe.""" return isinstance(obj, AllStates) or super().is_safe_callable(obj) ENV = TemplateEnvironment() ENV.filters['round'] = forgiving_round ENV.filters['multiply'] = multiply ENV.filters['timestamp_custom'] = timestamp_custom ENV.filters['timestamp_local'] = timestamp_local ENV.filters['timestamp_utc'] = timestamp_utc ENV.filters['is_defined'] = fail_when_undefined ENV.filters['max'] = max ENV.filters['min'] = min ENV.globals['float'] = forgiving_float ENV.globals['now'] = dt_util.now ENV.globals['utcnow'] = dt_util.utcnow ENV.globals['as_timestamp'] = forgiving_as_timestamp ENV.globals['relative_time'] = dt_util.get_age ENV.globals['strptime'] = strptime
JshWright/home-assistant
homeassistant/helpers/template.py
Python
apache-2.0
14,020
class JobResult(): def __init__(self, job, build_number, status): self.job = job self.build_number = int(build_number) self.status = status def __repr__(self): return "job: %s, build number: %s, status: %s" % (self.job, self.build_number, self.status)
alberto/hudson-notifier
src/job_result.py
Python
apache-2.0
263
#TLE class Solution: def maxProduct(self, nums): """ :type nums: List[int] :rtype: int """ if not nums: return 0 ret = - sys.maxsize for i in range(len(nums)): j = i + 1 temp = nums[i] ret = temp if temp > ret else ret while j < len(nums): temp *= nums[j] ret = temp if temp > ret else ret j += 1 return ret # not TLE ... int maxProduct(int* nums, int numsSize) { if(numsSize == 0){ return 0; } int ret = INT_MIN; int temp; for(int i = 0; i < numsSize; i++){ int j = i + 1; temp = nums[i]; ret = ret > temp ? ret : temp; while(j < numsSize){ temp *= nums[j++]; ret = ret > temp ? ret : temp; } } return ret; }
MingfeiPan/leetcode
array/152.py
Python
apache-2.0
932
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals # Mock test input source import unicodecsv as csv class MockInputSource(): def __init__(self, in_file): self._mock_data = self._read_input(in_file) def fetch_gene_info(self, ac): pass def fetch_gene_transcripts(self, ac): pass def fetch_transcript_exons(self, ac): result = None data = self._mock_data.get(ac) if data: result = {'ord': 1, 't_start_i': 0, 't_end_i': data['cds_end_i'] - data['cds_start_i'], 't_seq_a': data['transcript_sequence'] } return [result] def fetch_transcript_info(self,ac): result = None data = self._mock_data.get(ac) if data: # interbase coordinates result = {'cds_start_i': data['cds_start_i'], 'cds_end_i': data['cds_end_i']} return result def get_tx_identity_info(self, ac): return self.fetch_transcript_info(ac) def get_tx_info(self, ac): return self.fetch_transcript_info(ac) def get_tx_exons(self, ac): return self.fetch_transcript_exons(ac) def get_tx_seq(self, ac): result = None data = self._mock_data.get(ac) if data: result = data['transcript_sequence'] return result # # internal methods # def _read_input(self, in_file): """Dummy file of inputs :param in_file: path to input file of 2 cols (tab-delim); accession_number, sequence :type string :return dictionary of accession_number to sequence tags """ result = {} with open(in_file, 'r') as f: reader = csv.DictReader(f, delimiter=str('\t')) for row in reader: result[row['accession']] = {'transcript_sequence': row['transcript_sequence'], 'cds_start_i': int(row['cds_start_i']), 'cds_end_i': int(row['cds_end_i'])} return result def main(): pass if __name__ == "__main__": main() ## <LICENSE> ## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs) ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## </LICENSE>
jmuhlich/hgvs
tests/framework/mock_input_source.py
Python
apache-2.0
2,910
#!/usr/bin/env python # -*- coding: utf-8 -*- from org.wikiup.database.orm import WikiupEntityManager from org.wikiup.modules.jython.orm import PythonEntity def getEntity(name, ctx=None, **selection): return PythonEntity(_getEntity(name, ctx, selection)) def query(name, relation, ctx=None, **selection): entity = _getEntity(name, ctx, selection) relatives = entity.getRelatives(relation, None) props = [str(i.getName()) for i in relatives.getProperties()] r = [dict([(j, str(i.get(j))) for j in props]) for i in relatives] if len(r) == 0: r = dict([(str(i.getName()), i.getObject()) for i in relatives.getProperties()]) entity.release() return r def _getEntity(name, ctx=None, selection=None): entity = None if ctx is not None: entity = ctx.get(name) if entity is None: entity = (ctx or WikiupEntityManager.getInstance()).getEntity(name) if selection: for k in selection: entity.set(k, selection[k]) return entity
smantinc/wikiup
modules/jython/src/wikiup/webroot/WEB-INF/python/lib/wk/orm.py
Python
apache-2.0
1,017
"""Just a subclass of service_manager.Manager for testing -- the methods don't do anything but print to a logfile. """ import engage.drivers.service_manager as service_manager import engage.utils.log_setup logger = engage.utils.log_setup.setup_script_logger("DummyService") from engage.utils.user_error import UserError, ScriptErrInf import gettext _ = gettext.gettext errors = { } def define_error(error_code, msg): global errors error_info = ScriptErrInf("DummyService", error_code, msg) errors[error_info.error_code] = error_info # error codes ERR_START_ALREADY_CALLED = 1 ERR_STOP_NOT_STARTED = 2 ERR_ALREADY_INSTALLED = 3 define_error(ERR_START_ALREADY_CALLED, _("Package %(pkg)s: start was already called")) define_error(ERR_STOP_NOT_STARTED, _("Package %(pkg)s: attempt to start service that was not started")) define_error(ERR_ALREADY_INSTALLED, _("Package %(pkg)s: install called when package already installed")) class Manager(service_manager.Manager): def __init__(self, metadata): package_name = "%s %s (dummy_service_manager)" % (metadata.key["name"], metadata.key["version"]) service_manager.Manager.__init__(self, metadata, package_name) self.started = False def start(self): if self.started: raise UserError(errors[ERR_START_ALREADY_CALLED], msg_args={"pkg":self.package_name}) logger.info("%s: start() called" % self.package_name) self.started = True def stop(self): if not self.started: raise UserError(errors[ERR_STOP_NOT_STARTED], msg_args={"pkg":self.package_name}) logger.info("%s: stop() called" % self.package_name) self.started = False def is_running(self): return self.started def validate_pre_install(self): logger.info("%s: validate_pre_install() called" % self.package_name) def install(self, library_package): if self.is_installed(): raise UserError(errors[ERR_ALREADY_INSTALLED], msg_args={"pkg":self.package_name}) logger.info("%s: install() called" % self.package_name) def validate_post_install(self): logger.info("%s: validate_post_install() called" % self.package_name) def uninstall(self): logger.info("%: uninstall() called" % self.package_name)
quaddra/engage
python_pkg/engage/drivers/genforma/dummy_service_manager.py
Python
apache-2.0
2,556
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import logging from netaddr import IPAddress from utils import retry_until_success, debug_failures from network import DockerNetwork from exceptions import CommandExecError NET_NONE = "none" logger = logging.getLogger(__name__) class Workload(object): """ A calico workload. These are the end-users containers that will run application-level software. """ def __init__(self, host, name, image="busybox", network="bridge", ip=None): """ Create the workload and detect its IPs. :param host: The host container on which this workload is instantiated. All commands executed by this container will be passed through the host via docker exec. :param name: The name given to the workload container. This name is passed to docker and can be used inside docker commands. :param image: The docker image to be used to instantiate this container. busybox used by default because it is extremely small and has ping. :param network: The DockerNetwork to connect to. Set to None to use default Docker networking. :param ip: The ip address to assign to the container. """ self.host = host self.name = name ip_option = ("--ip %s" % ip) if ip else "" command = "docker run -tid --name %s --net %s %s %s" % \ (name, network, ip_option, image) host.execute(command) self.ip = host.execute("docker inspect --format " "'{{.NetworkSettings.Networks.%s.IPAddress}}' %s" % (network, name)) def execute(self, command): """ Execute arbitrary commands on this workload. """ # Make sure we've been created in the context of a host. Done here # instead of in __init__ as we can't exist in the host until we're # created. assert self in self.host.workloads return self.host.execute("docker exec %s %s" % (self.name, command)) def _get_ping_function(self, ip): """ Return a function to ping the supplied IP address from this workload. :param ip: The IPAddress to ping. :return: A partial function that can be executed to perform the ping. The function raises a CommandExecError exception if the ping fails, or returns the output of the ping. """ # Default to "ping" ping = "ping" try: version = IPAddress(ip).version assert version in [4, 6] if version == 6: ping = "ping6" except BaseException: pass args = [ ping, "-c", "1", # Number of pings "-W", "1", # Timeout for each ping ip, ] command = ' '.join(args) ping = partial(self.execute, command) return ping @debug_failures def assert_can_ping(self, ip, retries=0): """ Execute a ping from this workload to the ip. Assert than a workload can ping an IP. Use retries to allow for convergence. Use of this method assumes the network will be transitioning from a state where the destination is currently unreachable. :param ip: The IP address (str or IPAddress) to ping. :param retries: The number of retries. :return: None. """ try: retry_until_success(self._get_ping_function(ip), retries=retries, ex_class=CommandExecError) except CommandExecError: raise AssertionError("%s cannot ping %s" % (self, ip)) @debug_failures def assert_cant_ping(self, ip, retries=0): """ Execute a ping from this workload to the ip. Assert that the workload cannot ping an IP. Use retries to allow for convergence. Use of this method assumes the network will be transitioning from a state where the destination is currently reachable. :param ip: The IP address (str or IPAddress) to ping. :param retries: The number of retries. :return: None. """ ping = self._get_ping_function(ip) def cant_ping(): try: ping() except CommandExecError: pass else: raise _PingError() try: retry_until_success(cant_ping, retries=retries, ex_class=_PingError) except _PingError: raise AssertionError("%s can ping %s" % (self, ip)) def __str__(self): return self.name class _PingError(Exception): pass
caseydavenport/libcalico
calico_test/tests/st/utils/workload.py
Python
apache-2.0
5,408
# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import os import os.path import tempfile import eventlet from keystoneauth1 import adapter as ks_adapter from keystoneauth1 import exceptions as ks_exc from keystoneauth1.identity import base as ks_identity from keystoneauth1 import session as ks_session import mock import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_context import context as common_context from oslo_context import fixture as context_fixture from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import fixture as utils_fixture from oslo_utils import units import six from nova import context from nova import exception from nova.objects import base as obj_base from nova.objects import instance as instance_obj from nova import test from nova.tests.unit.objects import test_objects from nova.tests.unit import utils as test_utils from nova import utils CONF = cfg.CONF class GenericUtilsTestCase(test.NoDBTestCase): def test_parse_server_string(self): result = utils.parse_server_string('::1') self.assertEqual(('::1', ''), result) result = utils.parse_server_string('[::1]:8773') self.assertEqual(('::1', '8773'), result) result = utils.parse_server_string('2001:db8::192.168.1.1') self.assertEqual(('2001:db8::192.168.1.1', ''), result) result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773') self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) result = utils.parse_server_string('192.168.1.1') self.assertEqual(('192.168.1.1', ''), result) result = utils.parse_server_string('192.168.1.2:8773') self.assertEqual(('192.168.1.2', '8773'), result) result = utils.parse_server_string('192.168.1.3') self.assertEqual(('192.168.1.3', ''), result) result = utils.parse_server_string('www.example.com:8443') self.assertEqual(('www.example.com', '8443'), result) result = utils.parse_server_string('www.example.com') self.assertEqual(('www.example.com', ''), result) # error case result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) result = utils.parse_server_string('') self.assertEqual(('', ''), result) def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_hostname_has_default(self): hostname = u"\u7684hello" defaultname = "Server-1" self.assertEqual("hello", utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default(self): hostname = u"\u7684" defaultname = "Server-1" self.assertEqual(defaultname, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_has_default_too_long(self): hostname = u"\u7684" defaultname = "a" * 64 self.assertEqual("a" * 63, utils.sanitize_hostname(hostname, defaultname)) def test_hostname_empty_no_default(self): hostname = u"\u7684" self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_empty_minus_period(self): hostname = "---..." self.assertEqual("", utils.sanitize_hostname(hostname)) def test_hostname_with_space(self): hostname = " a b c " self.assertEqual("a-b-c", utils.sanitize_hostname(hostname)) def test_hostname_too_long(self): hostname = "a" * 64 self.assertEqual(63, len(utils.sanitize_hostname(hostname))) def test_hostname_truncated_no_hyphen(self): hostname = "a" * 62 hostname = hostname + '-' + 'a' res = utils.sanitize_hostname(hostname) # we trim to 63 and then trim the trailing dash self.assertEqual(62, len(res)) self.assertFalse(res.endswith('-'), 'The hostname ends with a -') def test_generate_password(self): password = utils.generate_password() self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) @mock.patch('nova.privsep.path.chown') def test_temporary_chown(self, mock_chown): with tempfile.NamedTemporaryFile() as f: with utils.temporary_chown(f.name, owner_uid=2): mock_chown.assert_called_once_with(f.name, uid=2) mock_chown.reset_mock() mock_chown.assert_called_once_with(f.name, uid=os.getuid()) def test_get_shortened_ipv6(self): self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe", utils.get_shortened_ipv6( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254")) self.assertEqual("::1", utils.get_shortened_ipv6( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertEqual("caca::caca:0:babe:201:102", utils.get_shortened_ipv6( "caca:0000:0000:caca:0000:babe:0201:0102")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "failure") def test_get_shortened_ipv6_cidr(self): self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600:0000:0000:0000:0000:0000:0000:0000/64")) self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600::1/64")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "failure") def test_safe_ip_format(self): self.assertEqual("[::1]", utils.safe_ip_format("::1")) self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1")) self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format( "::ffff:127.0.0.1")) self.assertEqual("localhost", utils.safe_ip_format("localhost")) def test_format_remote_path(self): self.assertEqual("[::1]:/foo/bar", utils.format_remote_path("::1", "/foo/bar")) self.assertEqual("127.0.0.1:/foo/bar", utils.format_remote_path("127.0.0.1", "/foo/bar")) self.assertEqual("[::ffff:127.0.0.1]:/foo/bar", utils.format_remote_path("::ffff:127.0.0.1", "/foo/bar")) self.assertEqual("localhost:/foo/bar", utils.format_remote_path("localhost", "/foo/bar")) self.assertEqual("/foo/bar", utils.format_remote_path(None, "/foo/bar")) def test_get_hash_str(self): base_str = b"foo" base_unicode = u"foo" value = hashlib.md5(base_str).hexdigest() self.assertEqual( value, utils.get_hash_str(base_str)) self.assertEqual( value, utils.get_hash_str(base_unicode)) def test_get_obj_repr_unicode(self): instance = instance_obj.Instance() instance.display_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9' # should be a bytes string if python2 before conversion self.assertIs(str, type(repr(instance))) self.assertIs(six.text_type, type(utils.get_obj_repr_unicode(instance))) def test_use_rootwrap(self): self.flags(disable_rootwrap=False, group='workarounds') self.flags(rootwrap_config='foo') cmd = utils.get_root_helper() self.assertEqual('sudo nova-rootwrap foo', cmd) def test_use_sudo(self): self.flags(disable_rootwrap=True, group='workarounds') cmd = utils.get_root_helper() self.assertEqual('sudo', cmd) def test_ssh_execute(self): expected_args = ('ssh', '-o', 'BatchMode=yes', 'remotehost', 'ls', '-l') with mock.patch('nova.utils.execute') as mock_method: utils.ssh_execute('remotehost', 'ls', '-l') mock_method.assert_called_once_with(*expected_args) def test_generate_hostid(self): host = 'host' project_id = '9b9e3c847e904b0686e8ffb20e4c6381' hostId = 'fa123c6f74efd4aad95f84096f9e187caa0625925a9e7837b2b46792' self.assertEqual(hostId, utils.generate_hostid(host, project_id)) def test_generate_hostid_with_none_host(self): project_id = '9b9e3c847e904b0686e8ffb20e4c6381' self.assertEqual('', utils.generate_hostid(None, project_id)) class TestCachedFile(test.NoDBTestCase): @mock.patch('os.path.getmtime', return_value=1) def test_read_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fresh, data = utils.read_cached_file("/this/is/a/fake") fdata = utils._FILE_CACHE['/this/is/a/fake']["data"] self.assertEqual(fdata, data) @mock.patch('os.path.getmtime', return_value=2) def test_read_modified_cached_file(self, getmtime): utils._FILE_CACHE = { '/this/is/a/fake': {"data": 1123, "mtime": 1} } fake_contents = "lorem ipsum" with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=fake_contents)): fresh, data = utils.read_cached_file("/this/is/a/fake") self.assertEqual(data, fake_contents) self.assertTrue(fresh) def test_delete_cached_file(self): filename = '/this/is/a/fake/deletion/of/cached/file' utils._FILE_CACHE = { filename: {"data": 1123, "mtime": 1} } self.assertIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) def test_delete_cached_file_not_exist(self): # We expect that if cached file does not exist no Exception raised. filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file' self.assertNotIn(filename, utils._FILE_CACHE) utils.delete_cached_file(filename) self.assertNotIn(filename, utils._FILE_CACHE) class RootwrapDaemonTesetCase(test.NoDBTestCase): @mock.patch('oslo_rootwrap.client.Client') def test_get_client(self, mock_client): mock_conf = mock.MagicMock() utils.RootwrapDaemonHelper(mock_conf) mock_client.assert_called_once_with( ["sudo", "nova-rootwrap-daemon", mock_conf]) @mock.patch('nova.utils.LOG.info') def test_execute(self, mock_info): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) mock_info.assert_has_calls([mock.call( u'Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] ' u'kwargs=[%(kwargs)r]', {'cmd': u'a 1', 'kwargs': {'run_as_root': True, 'foo': 'bar'}})]) def test_execute_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.execute('a', 1, foo='bar', run_as_root=True, process_input=True) daemon.client.execute.assert_called_once_with(['a', '1'], True) def test_execute_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2) def test_execute_pass_with_check_exit_code(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) daemon.execute('b', 2, check_exit_code=[-2]) def test_execute_fail_with_retry(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2) daemon.client.execute.assert_has_calls( [mock.call(['b', '2'], None), mock.call(['b', '2'], None)]) @mock.patch('nova.utils.LOG.log') def test_execute_fail_and_logging(self, mock_log): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) self.assertRaises(processutils.ProcessExecutionError, daemon.execute, 'b', 2, attempts=2, loglevel=logging.CRITICAL, log_errors=processutils.LOG_ALL_ERRORS) mock_log.assert_has_calls( [ mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Retrying.', u'b 2'), mock.call(logging.CRITICAL, u'Running cmd (subprocess): %s', u'b 2'), mock.call(logging.CRITICAL, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': u'b 2', 'return_code': -2, 'end_time': mock.ANY}), mock.call(logging.CRITICAL, u'%(desc)r\ncommand: %(cmd)r\nexit code: %(code)r' u'\nstdout: %(stdout)r\nstderr: %(stderr)r', {'code': -2, 'cmd': u'b 2', 'stdout': u'None', 'stderr': u'None', 'desc': None}), mock.call(logging.CRITICAL, u'%r failed. Not Retrying.', u'b 2')] ) def test_trycmd(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(0, None, None)) daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) def test_trycmd_with_kwargs(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.execute = mock.Mock(return_value=('out', 'err')) daemon.trycmd('a', 1, foo='bar', run_as_root=True, loglevel=logging.WARN, log_errors=True, process_input=True, delay_on_retry=False, attempts=5, check_exit_code=[200]) daemon.execute.assert_called_once_with('a', 1, attempts=5, check_exit_code=[200], delay_on_retry=False, foo='bar', log_errors=True, loglevel=30, process_input=True, run_as_root=True) def test_trycmd_fail(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True) daemon.client.execute.assert_called_once_with(['a', '1'], None) self.assertIn(expected_err, err) def test_trycmd_fail_with_rety(self): mock_conf = mock.MagicMock() daemon = utils.RootwrapDaemonHelper(mock_conf) daemon.client = mock.MagicMock() daemon.client.execute = mock.Mock(return_value=(-2, None, None)) expected_err = six.text_type('''\ Unexpected error while running command. Command: a 1 Exit code: -2''') out, err = daemon.trycmd('a', 1, foo='bar', run_as_root=True, attempts=3) self.assertIn(expected_err, err) daemon.client.execute.assert_has_calls( [mock.call(['a', '1'], None), mock.call(['a', '1'], None), mock.call(['a', '1'], None)]) class AuditPeriodTest(test.NoDBTestCase): def setUp(self): super(AuditPeriodTest, self).setUp() # a fairly random time to test with self.useFixture(utils_fixture.TimeFixture( datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012))) def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEqual(begin, datetime.datetime( hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEqual(begin, datetime.datetime( minute=10, hour=7, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=10, hour=8, day=5, month=3, year=2012)) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEqual(begin, datetime.datetime( minute=30, hour=6, day=5, month=3, year=2012)) self.assertEqual(end, datetime.datetime( minute=30, hour=7, day=5, month=3, year=2012)) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEqual(begin, datetime.datetime( day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( day=5, month=3, year=2012)) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEqual(begin, datetime.datetime( hour=6, day=4, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=6, day=5, month=3, year=2012)) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEqual(begin, datetime.datetime( hour=10, day=3, month=3, year=2012)) self.assertEqual(end, datetime.datetime( hour=10, day=4, month=3, year=2012)) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=1, month=3, year=2012)) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEqual(begin, datetime.datetime( day=2, month=2, year=2012)) self.assertEqual(end, datetime.datetime( day=2, month=3, year=2012)) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEqual(begin, datetime.datetime( day=15, month=1, year=2012)) self.assertEqual(end, datetime.datetime( day=15, month=2, year=2012)) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEqual(begin, datetime.datetime( day=1, month=1, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=1, year=2012)) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEqual(begin, datetime.datetime( day=1, month=2, year=2011)) self.assertEqual(end, datetime.datetime( day=1, month=2, year=2012)) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEqual(begin, datetime.datetime( day=1, month=6, year=2010)) self.assertEqual(end, datetime.datetime( day=1, month=6, year=2011)) class MetadataToDictTestCase(test.NoDBTestCase): def test_metadata_to_dict(self): self.assertEqual(utils.metadata_to_dict( [{'key': 'foo1', 'value': 'bar'}, {'key': 'foo2', 'value': 'baz'}]), {'foo1': 'bar', 'foo2': 'baz'}) def test_metadata_to_dict_with_include_deleted(self): metadata = [{'key': 'foo1', 'value': 'bar', 'deleted': 1442875429, 'other': 'stuff'}, {'key': 'foo2', 'value': 'baz', 'deleted': 0, 'other': 'stuff2'}] self.assertEqual({'foo1': 'bar', 'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=True)) self.assertEqual({'foo2': 'baz'}, utils.metadata_to_dict(metadata, include_deleted=False)) # verify correct default behavior self.assertEqual(utils.metadata_to_dict(metadata), utils.metadata_to_dict(metadata, include_deleted=False)) def test_metadata_to_dict_empty(self): self.assertEqual({}, utils.metadata_to_dict([])) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=True)) self.assertEqual({}, utils.metadata_to_dict([], include_deleted=False)) def test_dict_to_metadata(self): def sort_key(adict): return sorted(adict.items()) metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2')) expected = [{'key': 'foo1', 'value': 'bar1'}, {'key': 'foo2', 'value': 'bar2'}] self.assertEqual(sorted(metadata, key=sort_key), sorted(expected, key=sort_key)) def test_dict_to_metadata_empty(self): self.assertEqual(utils.dict_to_metadata({}), []) class ExpectedArgsTestCase(test.NoDBTestCase): def test_passes(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f @dec def func(foo, bar, baz="lol"): pass # Call to ensure nothing errors func(None, None) def test_raises(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def func(bar, baz): pass self.assertRaises(TypeError, dec, func) def test_var_no_of_args(self): @utils.expects_func_args('foo') def dec(f): return f @dec def func(bar, *args, **kwargs): pass # Call to ensure nothing errors func(None) def test_more_layers(self): @utils.expects_func_args('foo', 'baz') def dec(f): return f def dec_2(f): def inner_f(*a, **k): return f() return inner_f @dec_2 def func(bar, baz): pass self.assertRaises(TypeError, dec, func) class StringLengthTestCase(test.NoDBTestCase): def test_check_string_length(self): self.assertIsNone(utils.check_string_length( 'test', 'name', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', 'name', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) def test_check_string_length_noname(self): self.assertIsNone(utils.check_string_length( 'test', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, max_length=255) class ValidateIntegerTestCase(test.NoDBTestCase): def test_exception_converted(self): self.assertRaises(exception.InvalidInput, utils.validate_integer, "im-not-an-int", "not-an-int") self.assertRaises(exception.InvalidInput, utils.validate_integer, 3.14, "Pie") self.assertRaises(exception.InvalidInput, utils.validate_integer, "299", "Sparta no-show", min_value=300, max_value=300) self.assertRaises(exception.InvalidInput, utils.validate_integer, 55, "doing 55 in a 54", max_value=54) self.assertRaises(exception.InvalidInput, utils.validate_integer, six.unichr(129), "UnicodeError", max_value=1000) class ValidateNeutronConfiguration(test.NoDBTestCase): def test_nova_network(self): self.flags(use_neutron=False) self.assertFalse(utils.is_neutron()) def test_neutron(self): self.flags(use_neutron=True) self.assertTrue(utils.is_neutron()) class AutoDiskConfigUtilTestCase(test.NoDBTestCase): def test_is_auto_disk_config_disabled(self): self.assertTrue(utils.is_auto_disk_config_disabled("Disabled ")) def test_is_auto_disk_config_disabled_none(self): self.assertFalse(utils.is_auto_disk_config_disabled(None)) def test_is_auto_disk_config_disabled_false(self): self.assertFalse(utils.is_auto_disk_config_disabled("false")) class GetSystemMetadataFromImageTestCase(test.NoDBTestCase): def get_image(self): image_meta = { "id": "fake-image", "name": "fake-name", "min_ram": 1, "min_disk": 1, "disk_format": "raw", "container_format": "bare", } return image_meta def get_flavor(self): flavor = { "id": "fake.flavor", "root_gb": 10, } return flavor def test_base_image_properties(self): image = self.get_image() # Verify that we inherit all the needed keys sys_meta = utils.get_system_metadata_from_image(image) for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that everything else is ignored self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS)) def test_inherit_image_properties(self): image = self.get_image() image["properties"] = {"foo1": "bar", "foo2": "baz"} sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in image["properties"].items(): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(sys_meta[sys_key], expected) def test_skip_image_properties(self): image = self.get_image() image["properties"] = { "foo1": "bar", "foo2": "baz", "mappings": "wizz", "img_block_device_mapping": "eek", } sys_meta = utils.get_system_metadata_from_image(image) # Verify that we inherit all the image properties for key, expected in image["properties"].items(): sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) if key in utils.SM_SKIP_KEYS: self.assertNotIn(sys_key, sys_meta) else: self.assertEqual(sys_meta[sys_key], expected) def test_vhd_min_disk_image(self): image = self.get_image() flavor = self.get_flavor() image["disk_format"] = "vhd" sys_meta = utils.get_system_metadata_from_image(image, flavor) # Verify that the min_disk property is taken from # flavor's root_gb when using vhd disk format sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk") self.assertEqual(sys_meta[sys_key], flavor["root_gb"]) def test_dont_inherit_empty_values(self): image = self.get_image() for key in utils.SM_INHERITABLE_KEYS: image[key] = None sys_meta = utils.get_system_metadata_from_image(image) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertNotIn(sys_key, sys_meta) class GetImageFromSystemMetadataTestCase(test.NoDBTestCase): def get_system_metadata(self): sys_meta = { "image_min_ram": 1, "image_min_disk": 1, "image_disk_format": "raw", "image_container_format": "bare", } return sys_meta def test_image_from_system_metadata(self): sys_meta = self.get_system_metadata() sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar" sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz" sys_meta["%simg_block_device_mapping" % utils.SM_IMAGE_PROP_PREFIX] = "eek" image = utils.get_image_from_system_metadata(sys_meta) # Verify that we inherit all the needed keys for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image[key], sys_meta.get(sys_key)) # Verify that we inherit the rest of metadata as properties self.assertIn("properties", image) for key in image["properties"]: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) self.assertEqual(image["properties"][key], sys_meta[sys_key]) self.assertNotIn("img_block_device_mapping", image["properties"]) def test_dont_inherit_empty_values(self): sys_meta = self.get_system_metadata() for key in utils.SM_INHERITABLE_KEYS: sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key) sys_meta[sys_key] = None image = utils.get_image_from_system_metadata(sys_meta) # Verify that the empty properties have not been inherited for key in utils.SM_INHERITABLE_KEYS: self.assertNotIn(key, image) class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase): def test_inherit_image_properties(self): properties = {"fake_prop": "fake_value"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(properties, image_meta["properties"]) def test_image_size(self): volume = {"size": 10} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(10 * units.Gi, image_meta["size"]) def test_image_status(self): volume = {} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual("active", image_meta["status"]) def test_values_conversion(self): properties = {"min_ram": "5", "min_disk": "7"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual(5, image_meta["min_ram"]) self.assertEqual(7, image_meta["min_disk"]) def test_suppress_not_image_properties(self): properties = {"min_ram": "256", "min_disk": "128", "image_id": "fake_id", "image_name": "fake_name", "container_format": "ami", "disk_format": "ami", "size": "1234", "checksum": "fake_checksum"} volume = {"volume_image_metadata": properties} image_meta = utils.get_image_metadata_from_volume(volume) self.assertEqual({}, image_meta["properties"]) self.assertEqual(0, image_meta["size"]) # volume's properties should not be touched self.assertNotEqual({}, properties) class ResourceFilterTestCase(test.NoDBTestCase): def _assert_filtering(self, res_list, filts, expected_tags): actual_tags = utils.filter_and_format_resource_metadata('instance', res_list, filts, 'metadata') self.assertJsonEqual(expected_tags, actual_tags) def test_filter_and_format_resource_metadata(self): # Create some tags # One overlapping pair, and one different key value pair # i1 : foo=bar, bax=wibble # i2 : foo=bar, baz=quux # resources i1 = { 'uuid': '1', 'metadata': {'foo': 'bar', 'bax': 'wibble'}, } i2 = { 'uuid': '2', 'metadata': {'foo': 'bar', 'baz': 'quux'}, } # Resources list rl = [i1, i2] # tags i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'} i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'} i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'} i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'} # No filter self._assert_filtering(rl, [], [i11, i12, i21, i22]) self._assert_filtering(rl, {}, [i11, i12, i21, i22]) # Key search # Both should have tags with key 'foo' and value 'bar' self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21]) # Both should have tags with key 'foo' self._assert_filtering(rl, {'key': 'foo'}, [i11, i21]) # Only i2 should have tags with key 'baz' and value 'quux' self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22]) # Only i2 should have tags with value 'quux' self._assert_filtering(rl, {'value': 'quux'}, [i22]) # Empty list should be returned when no tags match self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, []) # Multiple values # Only i2 should have tags with key 'baz' and values in the set # ['quux', 'wibble'] self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']}, [i22]) # But when specified as two different filters, no tags should be # returned. This is because, the filter will mean "return tags which # have (key=baz AND value=quux) AND (key=baz AND value=wibble) self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'}, {'key': 'baz', 'value': 'wibble'}], []) # Test for regex self._assert_filtering(rl, {'value': '\\Aqu..*\\Z(?s)'}, [i22]) # Make sure bug #1365887 is fixed i1['metadata']['key3'] = 'a' self._assert_filtering(rl, {'value': 'banana'}, []) class SafeTruncateTestCase(test.NoDBTestCase): def test_exception_to_dict_with_long_message_3_bytes(self): # Generate Chinese byte string whose length is 300. This Chinese UTF-8 # character occupies 3 bytes. After truncating, the byte string length # should be 255. msg = u'\u8d75' * 100 truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(255, len(byte_message)) def test_exception_to_dict_with_long_message_2_bytes(self): # Generate Russian byte string whose length is 300. This Russian UTF-8 # character occupies 2 bytes. After truncating, the byte string length # should be 254. msg = encodeutils.safe_decode('\xd0\x92' * 150) truncated_msg = utils.safe_truncate(msg, 255) byte_message = encodeutils.safe_encode(truncated_msg) self.assertEqual(254, len(byte_message)) class SpawnNTestCase(test.NoDBTestCase): def setUp(self): super(SpawnNTestCase, self).setUp() self.useFixture(context_fixture.ClearRequestContext()) self.spawn_name = 'spawn_n' def test_spawn_n_no_context(self): self.assertIsNone(common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual('test', args[0]) def fake(arg): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, 'test') self.assertIsNone(common_context.get_current()) def test_spawn_n_context(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) def test_spawn_n_context_different_from_passed(self): self.assertIsNone(common_context.get_current()) ctxt = context.RequestContext('user', 'project') ctxt_passed = context.RequestContext('user', 'project', overwrite=False) self.assertEqual(ctxt, common_context.get_current()) def _fake_spawn(func, *args, **kwargs): # call the method to ensure no error is raised func(*args, **kwargs) self.assertEqual(ctxt_passed, args[0]) self.assertEqual('test', kwargs['kwarg1']) def fake(context, kwarg1=None): pass with mock.patch.object(eventlet, self.spawn_name, _fake_spawn): getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test') self.assertEqual(ctxt, common_context.get_current()) class SpawnTestCase(SpawnNTestCase): def setUp(self): super(SpawnTestCase, self).setUp() self.spawn_name = 'spawn' class UT8TestCase(test.NoDBTestCase): def test_none_value(self): self.assertIsInstance(utils.utf8(None), type(None)) def test_bytes_value(self): some_value = b"fake data" return_value = utils.utf8(some_value) # check that type of returned value doesn't changed self.assertIsInstance(return_value, type(some_value)) self.assertEqual(some_value, return_value) def test_not_text_type(self): return_value = utils.utf8(1) self.assertEqual(b"1", return_value) self.assertIsInstance(return_value, six.binary_type) def test_text_type_with_encoding(self): some_value = 'test\u2026config' self.assertEqual(some_value, utils.utf8(some_value).decode("utf-8")) class TestObjectCallHelpers(test.NoDBTestCase): def test_with_primitives(self): tester = mock.Mock() tester.foo(1, 'two', three='four') self.assertTrue( test_utils.obj_called_with(tester.foo, 1, 'two', three='four')) self.assertFalse( test_utils.obj_called_with(tester.foo, 42, 'two', three='four')) def test_with_object(self): obj_base.NovaObjectRegistry.register(test_objects.MyObj) obj = test_objects.MyObj(foo=1, bar='baz') tester = mock.Mock() tester.foo(1, obj) self.assertTrue( test_utils.obj_called_with( tester.foo, 1, test_objects.MyObj(foo=1, bar='baz'))) self.assertFalse( test_utils.obj_called_with( tester.foo, 1, test_objects.MyObj(foo=2, bar='baz'))) def test_with_object_multiple(self): obj_base.NovaObjectRegistry.register(test_objects.MyObj) obj1 = test_objects.MyObj(foo=1, bar='baz') obj2 = test_objects.MyObj(foo=3, bar='baz') tester = mock.Mock() tester.foo(1, obj1) tester.foo(1, obj1) tester.foo(3, obj2) # Called at all self.assertTrue( test_utils.obj_called_with( tester.foo, 1, test_objects.MyObj(foo=1, bar='baz'))) # Called once (not true) self.assertFalse( test_utils.obj_called_once_with( tester.foo, 1, test_objects.MyObj(foo=1, bar='baz'))) # Not called with obj.foo=2 self.assertFalse( test_utils.obj_called_with( tester.foo, 1, test_objects.MyObj(foo=2, bar='baz'))) # Called with obj.foo.3 self.assertTrue( test_utils.obj_called_with( tester.foo, 3, test_objects.MyObj(foo=3, bar='baz'))) # Called once with obj.foo.3 self.assertTrue( test_utils.obj_called_once_with( tester.foo, 3, test_objects.MyObj(foo=3, bar='baz'))) class GetKSAAdapterTestCase(test.NoDBTestCase): """Tests for nova.utils.get_endpoint_data().""" def setUp(self): super(GetKSAAdapterTestCase, self).setUp() self.sess = mock.create_autospec(ks_session.Session, instance=True) self.auth = mock.create_autospec(ks_identity.BaseIdentityPlugin, instance=True) load_sess_p = mock.patch( 'keystoneauth1.loading.load_session_from_conf_options') self.addCleanup(load_sess_p.stop) self.load_sess = load_sess_p.start() self.load_sess.return_value = self.sess load_adap_p = mock.patch( 'keystoneauth1.loading.load_adapter_from_conf_options') self.addCleanup(load_adap_p.stop) self.load_adap = load_adap_p.start() load_auth_p = mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') self.addCleanup(load_auth_p.stop) self.load_auth = load_auth_p.start() self.load_auth.return_value = self.auth def test_bogus_service_type(self): self.assertRaises(exception.ConfGroupForServiceTypeNotFound, utils.get_ksa_adapter, 'bogus') self.load_auth.assert_not_called() self.load_sess.assert_not_called() self.load_adap.assert_not_called() def test_all_params(self): ret = utils.get_ksa_adapter( 'image', ksa_auth='auth', ksa_session='sess', min_version='min', max_version='max') # Returned the result of load_adapter_from_conf_options self.assertEqual(self.load_adap.return_value, ret) # Because we supplied ksa_auth, load_auth* not called self.load_auth.assert_not_called() # Ditto ksa_session/load_session* self.load_sess.assert_not_called() # load_adapter* called with what we passed in (and the right group) self.load_adap.assert_called_once_with( utils.CONF, 'glance', session='sess', auth='auth', min_version='min', max_version='max', raise_exc=False) def test_auth_from_session(self): self.sess.auth = 'auth' ret = utils.get_ksa_adapter('baremetal', ksa_session=self.sess) # Returned the result of load_adapter_from_conf_options self.assertEqual(self.load_adap.return_value, ret) # Because ksa_auth found in ksa_session, load_auth* not called self.load_auth.assert_not_called() # Because we supplied ksa_session, load_session* not called self.load_sess.assert_not_called() # load_adapter* called with the auth from the session self.load_adap.assert_called_once_with( utils.CONF, 'ironic', session=self.sess, auth='auth', min_version=None, max_version=None, raise_exc=False) def test_load_auth_and_session(self): ret = utils.get_ksa_adapter('volumev3') # Returned the result of load_adapter_from_conf_options self.assertEqual(self.load_adap.return_value, ret) # Had to load the auth self.load_auth.assert_called_once_with(utils.CONF, 'cinder') # Had to load the session, passed in the loaded auth self.load_sess.assert_called_once_with(utils.CONF, 'cinder', auth=self.auth) # load_adapter* called with the loaded auth & session self.load_adap.assert_called_once_with( utils.CONF, 'cinder', session=self.sess, auth=self.auth, min_version=None, max_version=None, raise_exc=False) class GetEndpointTestCase(test.NoDBTestCase): def setUp(self): super(GetEndpointTestCase, self).setUp() self.adap = mock.create_autospec(ks_adapter.Adapter, instance=True) self.adap.endpoint_override = None self.adap.service_type = 'stype' self.adap.interface = ['admin', 'public'] def test_endpoint_override(self): self.adap.endpoint_override = 'foo' self.assertEqual('foo', utils.get_endpoint(self.adap)) self.adap.get_endpoint_data.assert_not_called() self.adap.get_endpoint.assert_not_called() def test_image_good(self): self.adap.service_type = 'image' self.adap.get_endpoint_data.return_value.catalog_url = 'url' self.assertEqual('url', utils.get_endpoint(self.adap)) self.adap.get_endpoint_data.assert_called_once_with() self.adap.get_endpoint.assert_not_called() def test_image_bad(self): self.adap.service_type = 'image' self.adap.get_endpoint_data.side_effect = AttributeError self.adap.get_endpoint.return_value = 'url' self.assertEqual('url', utils.get_endpoint(self.adap)) self.adap.get_endpoint_data.assert_called_once_with() self.adap.get_endpoint.assert_called_once_with() def test_nonimage_good(self): self.adap.get_endpoint.return_value = 'url' self.assertEqual('url', utils.get_endpoint(self.adap)) self.adap.get_endpoint_data.assert_not_called() self.adap.get_endpoint.assert_called_once_with() def test_nonimage_try_interfaces(self): self.adap.get_endpoint.side_effect = (ks_exc.EndpointNotFound, 'url') self.assertEqual('url', utils.get_endpoint(self.adap)) self.adap.get_endpoint_data.assert_not_called() self.assertEqual(2, self.adap.get_endpoint.call_count) self.assertEqual('admin', self.adap.interface) def test_nonimage_try_interfaces_fail(self): self.adap.get_endpoint.side_effect = ks_exc.EndpointNotFound self.assertRaises(ks_exc.EndpointNotFound, utils.get_endpoint, self.adap) self.adap.get_endpoint_data.assert_not_called() self.assertEqual(3, self.adap.get_endpoint.call_count) self.assertEqual('public', self.adap.interface) class RunOnceTests(test.NoDBTestCase): fake_logger = mock.MagicMock() @utils.run_once("already ran once", fake_logger) def dummy_test_func(self, fail=False): if fail: raise ValueError() return True def setUp(self): super(RunOnceTests, self).setUp() self.dummy_test_func.reset() RunOnceTests.fake_logger.reset_mock() def test_wrapped_funtions_called_once(self): self.assertFalse(self.dummy_test_func.called) result = self.dummy_test_func() self.assertTrue(result) self.assertTrue(self.dummy_test_func.called) # assert that on second invocation no result # is returned and that the logger is invoked. result = self.dummy_test_func() RunOnceTests.fake_logger.assert_called_once() self.assertIsNone(result) def test_wrapped_funtions_called_once_raises(self): self.assertFalse(self.dummy_test_func.called) self.assertRaises(ValueError, self.dummy_test_func, fail=True) self.assertTrue(self.dummy_test_func.called) # assert that on second invocation no result # is returned and that the logger is invoked. result = self.dummy_test_func() RunOnceTests.fake_logger.assert_called_once() self.assertIsNone(result) def test_wrapped_funtions_can_be_reset(self): # assert we start with a clean state self.assertFalse(self.dummy_test_func.called) result = self.dummy_test_func() self.assertTrue(result) self.dummy_test_func.reset() # assert we restored a clean state self.assertFalse(self.dummy_test_func.called) result = self.dummy_test_func() self.assertTrue(result) # assert that we never called the logger RunOnceTests.fake_logger.assert_not_called() def test_reset_calls_cleanup(self): mock_clean = mock.Mock() @utils.run_once("already ran once", self.fake_logger, cleanup=mock_clean) def f(): pass f() self.assertTrue(f.called) f.reset() self.assertFalse(f.called) mock_clean.assert_called_once_with() def test_clean_is_not_called_at_reset_if_wrapped_not_called(self): mock_clean = mock.Mock() @utils.run_once("already ran once", self.fake_logger, cleanup=mock_clean) def f(): pass self.assertFalse(f.called) f.reset() self.assertFalse(f.called) self.assertFalse(mock_clean.called) def test_reset_works_even_if_cleanup_raises(self): mock_clean = mock.Mock(side_effect=ValueError()) @utils.run_once("already ran once", self.fake_logger, cleanup=mock_clean) def f(): pass f() self.assertTrue(f.called) self.assertRaises(ValueError, f.reset) self.assertFalse(f.called) mock_clean.assert_called_once_with()
mikalstill/nova
nova/tests/unit/test_utils.py
Python
apache-2.0
57,298
#!/usr/bin/python # -*- coding: utf-8 -*- from collections import defaultdict import re, sys import traceback import cgi, cgitb import json from paths import ether_url from modules.gitdox_sql import * from modules.ether import get_socialcalc, make_spreadsheet, exec_via_temp, get_timestamps, parse_ether from modules.validation.xml_validator import XmlValidator from modules.validation.meta_validator import MetaValidator from modules.validation.ether_validator import EtherValidator from modules.validation.export_validator import ExportValidator from modules.validation.bulk_export_validator import BulkExportValidator import modules.redis_cache as cache def highlight_cells(cells, ether_url, ether_doc_name): old_ether = get_socialcalc(ether_url, ether_doc_name) old_ether_lines = old_ether.splitlines() new_ether_lines = [] old_color_numbers = [] new_color_number = '1' for line in old_ether_lines: color_line = re.match(r'color:(\d+):(rgb.*$)', line) if color_line is not None: if color_line.group(2) == 'rgb(242, 242, 142)': old_color_numbers.append(color_line.group(1)) else: new_color_number = str(1 + int(color_line.group(1))) if len(old_color_numbers) > 0: new_color_number = old_color_numbers[0] for line in old_ether_lines: parts = line.split(":") # Check for pure formatting cells, e.g. cell:K15:f:1 if len(parts) == 4: if parts[2] == "f": # Pure formatting cell, no content continue parsed_cell = re.match(r'cell:([A-Z]+)(\d+)(:.*)$', line) if parsed_cell is not None: col = parsed_cell.group(1) row = parsed_cell.group(2) col_row = col + row other = parsed_cell.group(3) bg = re.search(r':bg:(\d+)($|:)', other) if bg is not None: bg = bg.group(1) span = parts[-1] if "rowspan:" in line else "1" spanned_rows = [col + str(int(row) + x) for x in range(int(span))] highlighted_spanned_rows = [x for x in spanned_rows if x in cells] if len(highlighted_spanned_rows) > 0: if bg is not None: if bg != new_color_number: new_line = re.sub(r':bg:' + bg, r':bg:' + new_color_number, line) else: new_line = line else: new_line = line + ':bg:' + new_color_number else: if bg is not None: if bg in old_color_numbers: new_line = re.sub(r':bg:' + bg, r'', line) else: new_line = line else: new_line = line new_ether_lines.append(new_line) elif re.match(r'sheet:', line) is not None: new_ether_lines.append(line) if new_color_number not in old_color_numbers: new_ether_lines.append('color:' + new_color_number + ':rgb(242, 242, 142)') else: new_ether_lines.append(line) new_ether = '\n'.join(new_ether_lines) make_spreadsheet(new_ether, ether_url + "_/" + ether_doc_name, "socialcalc") def validate_doc_xml(doc_id, rules): doc_info = get_doc_info(doc_id) doc_name = doc_info[0] doc_corpus = doc_info[1] doc_content = get_doc_content(doc_id) cache_hit = cache.get_report(doc_id, "xml") if cache_hit: return cache_hit report = "" xml_rule_fired = False for rule in rules: if not rule.applies(doc_name, doc_corpus): continue xml_rule_fired = True res = rule.validate(doc_content) report += res if not xml_rule_fired: report = "<strong>No applicable XML schemas</strong><br>" elif report: report = "<strong>XML problems:</strong><br>" + report else: report = "<strong>XML is valid</strong><br>" cache.cache_validation_result(doc_id, "xml", report) return report def validate_doc_meta(doc_id, rules): # metadata validation report = "" meta = get_doc_meta(doc_id) doc_info = get_doc_info(doc_id) doc_name = doc_info[0] doc_corpus = doc_info[1] cache_hit = cache.get_report(doc_id, "meta") if cache_hit: return cache_hit meta_rule_fired = False for rule in rules: if not rule.applies(doc_name, doc_corpus): continue meta_rule_fired = True res = rule.validate(meta) if len(res['tooltip']) > 0: report += ("""<div class="tooltip">""" + res['report'][:-5] + """ <i class="fa fa-ellipsis-h"></i>""" + "<span class=\"msg\">" + res['tooltip'] + "</span>" + "</div>") else: report += res['report'] if not meta_rule_fired: report = "<strong>No applicable metadata rules</strong><br>" elif len(report) == 0: report = "<strong>Metadata is valid<br></strong>" else: report = "<strong>Metadata Problems:</strong><br>" + report cache.cache_validation_result(doc_id, "meta", report) return report def validate_doc_ether(doc_id, rules, timestamps=None, editor=False): doc_info = get_doc_info(doc_id) doc_name = doc_info[0] doc_corpus = doc_info[1] ether_doc_name = "gd_" + doc_corpus + "_" + doc_name if not timestamps: timestamps = get_timestamps(ether_url) last_edit = int(timestamps[ether_doc_name]) if last_edit <= int(cache.get_timestamp(doc_id, "ether")): return cache.get_report(doc_id, "ether") socialcalc = get_socialcalc(ether_url, ether_doc_name) parsed_ether = parse_ether(socialcalc,doc_id=doc_id) report = '' cells = [] ether_rule_fired = False for rule in rules: if not rule.applies(doc_name, doc_corpus): continue ether_rule_fired = True res = rule.validate(parsed_ether) if len(res['tooltip']) > 0: report += ("""<div class="tooltip">""" + res['report'][:-5] + """ <i class="fa fa-ellipsis-h"></i>""" + "<span class=\"msg\">" + res['tooltip'] + "</span>" + "</div>") else: report += res['report'] cells += res['cells'] if not ether_rule_fired: report = "<strong>No applicable spreadsheet validation rules</strong><br>" elif report: report = "<strong>Spreadsheet Problems:</strong><br>" + report else: report = "<strong>Spreadsheet is valid</strong><br>" cache.cache_timestamped_validation_result(doc_id, "ether", report, last_edit) if editor: highlight_cells(cells, ether_url, ether_doc_name) return report def validate_doc_export(doc_id, rules, timestamps=None): doc_info = get_doc_info(doc_id) doc_name = doc_info[0] doc_corpus = doc_info[1] doc_content = get_doc_content(doc_id) ether_doc_name = "gd_" + doc_corpus + "_" + doc_name if not timestamps: timestamps = get_timestamps(ether_url) last_edit = int(timestamps[ether_doc_name]) if last_edit <= int(cache.get_timestamp(doc_id, "export")): return cache.get_report(doc_id, "export") socialcalc = get_socialcalc(ether_url, ether_doc_name) report = "" export_rule_fired = False for rule in rules: if not rule.applies(doc_name, doc_corpus): continue export_rule_fired = True res = rule.validate(socialcalc, doc_id) report += res if not export_rule_fired: report = "<strong>No applicable export schemas</strong><br>" elif report: report = "<strong>Export problems:</strong><br>" + report else: report = "<strong>Export is valid</strong><br>" cache.cache_timestamped_validation_result(doc_id, "export", report, last_edit) return report def validate_doc(doc_id): _, _, _, _, _, doc_mode, _ = get_doc_info(doc_id) report = "" # metadata meta_rules = [MetaValidator(x) for x in get_meta_rules()] report += validate_doc_meta(doc_id, meta_rules) # data if doc_mode == "xml": xml_rules = [XmlValidator(x) for x in get_xml_rules()] report += validate_doc_xml(doc_id, xml_rules) else: ether_rules = [EtherValidator(x) for x in get_ether_rules()] report += validate_doc_ether(doc_id, ether_rules, editor=True) export_rules = [ExportValidator(x) for x in get_export_rules()] report += validate_doc_export(doc_id, export_rules) return report def validate_all_meta(docs): reports = {} rules = [MetaValidator(x) for x in get_meta_rules()] for doc in docs: doc_id, doc_name, corpus, doc_mode, doc_schema, validation, timestamp = doc reports[doc_id] = validate_doc_meta(doc_id, rules) return json.dumps(reports) def validate_all_xml(docs): reports = {} rules = [XmlValidator(x) for x in get_xml_rules()] for doc in docs: doc_id, doc_name, corpus, doc_mode, doc_schema, validation, timestamp = doc if doc_mode != "xml": continue reports[doc_id] = validate_doc_xml(doc_id, rules) return json.dumps(reports) def validate_all_ether(docs): reports = {} rules = [EtherValidator(x) for x in get_ether_rules()] timestamps = get_timestamps(ether_url) for doc in docs: doc_id, doc_name, corpus, doc_mode, doc_schema, validation, timestamp = doc if doc_mode != "ether": continue reports[doc_id] = validate_doc_ether(doc_id, rules, timestamps=timestamps) return json.dumps(reports) def validate_all_export(docs): reports = {} rules = [ExportValidator(x) for x in get_export_rules()] timestamps = get_timestamps(ether_url) for doc in docs: doc_id, doc_name, corpus, doc_mode, doc_schema, validation, timestamp = doc if doc_mode != "ether": continue reports[doc_id] = validate_doc_export(doc_id, rules, timestamps=timestamps) return json.dumps(reports) def validate_all_export_bulk(docs): cached_reports = {} reports = [] rules = [BulkExportValidator(x) for x in get_export_rules()] timestamps = get_timestamps(ether_url) doc_ids = [] for doc in docs: doc_id, doc_name, doc_corpus, doc_mode, doc_schema, validation, timestamp = doc if doc_mode != "ether": continue ether_doc_name = "gd_" + doc_corpus + "_" + doc_name last_edit = int(timestamps[ether_doc_name]) if last_edit <= int(cache.get_timestamp(doc_id, "export")): cached_reports[doc_id] = cache.get_report(doc_id, "export") continue doc_ids.append(doc_id) for rule in rules: report, fired = rule.validate(doc_ids) if fired: reports.append(report) def merge_dicts(dictlist): keys = apply(set().union, dictlist) ret_dict = {} for k in keys: for d in dictlist: ret_dict[k] = "".join(d.get(k,'')) #return {k: "".join(d.get(k, '') for d in dictlist) for k in keys} return ret_dict reports = merge_dicts([cached_reports] + reports) for doc_id in doc_ids: if doc_id not in reports: reports[doc_id] = "No applicable export schemas" for doc_id, report in reports.items(): if doc_id in cached_reports: continue doc_name, doc_corpus, _, _, _, _, _ = get_doc_info(doc_id) ether_doc_name = "gd_" + doc_corpus + "_" + doc_name last_edit = int(timestamps[ether_doc_name]) cache.cache_timestamped_validation_result(doc_id, "export", report, last_edit) return json.dumps(reports) #@profile def validate_all_docs(validation_type): docs = generic_query("SELECT id, name, corpus, mode, schema, validation, timestamp FROM docs", None) if validation_type == "meta": return validate_all_meta(docs) elif validation_type == "xml": return validate_all_xml(docs) elif validation_type == "ether": return validate_all_ether(docs) elif validation_type == "export": # Faster, but not correct at the moment because of xmllint output oddities # E.g.: when only one doc is supplied, output does not appear to contain the # "<docname> fails to validate" text #return validate_all_export_bulk(docs) return validate_all_export(docs) else: raise Exception("Unknown validation type: " + validation_type) if __name__ == "__main__": mode = "" schema = "" if len(sys.argv) > 1: from argparse import ArgumentParser p = ArgumentParser() p.add_argument("-d","--doc",help="doc ID in gitdox.db or 'all'", default="all") p.add_argument("-t","--type",help="if --doc is all, the kind of validation (meta, xml, ether, or export)", default="export") p.add_argument("-i","--invalidate",action="store_true",help="invalidate all documents before running validation") opts = p.parse_args() doc_id = opts.doc if opts.invalidate: invalidate_doc_by_name("%","%") if doc_id != "all": _, _, _, _, _, mode, schema = get_doc_info(doc_id) else: parameter = cgi.FieldStorage() doc_id = parameter.getvalue("doc_id") mode = parameter.getvalue("mode") schema = parameter.getvalue("schema") # Either we validate specific docs... if doc_id != "all": print("Content-type:text/html\n\n") try: print(validate_doc(doc_id).encode("utf8")) except Exception as e: print("""<html><body><h1>Loading Error</h1> <p>For some reason, this page failed to load.</p> <p>Please send this to your system administrator:</p> <pre>""") traceback.print_exc(e, file=sys.stdout) print("""</pre></body></html>""") # or we validate all docs, but only for one kind of validation else: print("Content-type:application/json\n\n") form = cgi.FieldStorage() validation_type = opts.type if len(sys.argv) > 1 else form['validation_type'].value print validate_all_docs(validation_type).encode('utf8') #print validate_all_docs().encode("utf8")
cligu/gitdox
validate.py
Python
apache-2.0
12,616
# dynamic authenticator setup; see config4.json as well from autobahn.wamp.types import Deny def _authenticate(realm, authid, details): """ Authenticate something connecting to the backend. This should only be other proxy processes. We have access to all the "real" front-end credentials the proxy negotiated with the "real" client .. so, we could base decisions here on that if we want. """ if details['authextra']['proxy_authid'] not in ["user1", "user2"]: return Deny("Unknown user '{}'".format(details['authextra']['proxy_authid'])) if details['authextra']['proxy_authrole'] != 'user': return Deny("only anonymous access") # this is the pubkey for our node (key.pub from .crossbar # directory) because the "client" (the proxy process) will use # key.priv by default return { "pubkey": "a1fd4c2c2954b92ef784b4d14442e2eb159cc74040bb59e43e84b3c56719256f", "role": "anonymous" } def setup(session, details): """ This hook is called because of this stanza from config4.json: "components": [ { "type": "function", "realm": "realm1", "role": "auth", "callbacks": { "join": "dynauth.setup" } } ] ...which configures the method 'dynauth.setup' (this function) as the 'on_join' callback. All we have to do here is register our authenticator function (we could do more work async if required). """ return session.register(_authenticate, "auth.backend_cryptosign")
crossbario/crossbar-examples
proxy/dynauth.py
Python
apache-2.0
1,665
"""Philips Hue sensors platform tests.""" import asyncio from collections import deque import logging from unittest.mock import Mock import aiohue from aiohue.sensors import Sensors import pytest from homeassistant import config_entries from homeassistant.components import hue from homeassistant.components.hue import sensor_base as hue_sensor_base _LOGGER = logging.getLogger(__name__) PRESENCE_SENSOR_1_PRESENT = { "state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Living room sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_1 = { "state": { "lightlevel": 1, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 1", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_1 = { "state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 1", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:77-02-0402", "capabilities": {"certified": True}, } PRESENCE_SENSOR_2_NOT_PRESENT = { "state": {"presence": False, "lastupdated": "2019-01-01T00:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Kitchen sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_2 = { "state": { "lightlevel": 10001, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 2", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_2 = { "state": {"temperature": 1875, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 2", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:88-02-0402", "capabilities": {"certified": True}, } PRESENCE_SENSOR_3_PRESENT = { "state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "sensitivity": 2, "sensitivitymax": 2, "pending": [], }, "name": "Bedroom sensor", "type": "ZLLPresence", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue motion sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0406", "capabilities": {"certified": True}, } LIGHT_LEVEL_SENSOR_3 = { "state": { "lightlevel": 1, "dark": True, "daylight": True, "lastupdated": "2019-01-01T01:00:00", }, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "tholddark": 12467, "tholdoffset": 7000, "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue ambient light sensor 3", "type": "ZLLLightLevel", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue ambient light sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0400", "capabilities": {"certified": True}, } TEMPERATURE_SENSOR_3 = { "state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"}, "swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"}, "config": { "on": True, "battery": 100, "reachable": True, "alert": "none", "ledindication": False, "usertest": False, "pending": [], }, "name": "Hue temperature sensor 3", "type": "ZLLTemperature", "modelid": "SML001", "manufacturername": "Philips", "productname": "Hue temperature sensor", "swversion": "6.1.1.27575", "uniqueid": "00:11:22:33:44:55:66:99-02-0402", "capabilities": {"certified": True}, } UNSUPPORTED_SENSOR = { "state": {"status": 0, "lastupdated": "2019-01-01T01:00:00"}, "config": {"on": True, "reachable": True}, "name": "Unsupported sensor", "type": "CLIPGenericStatus", "modelid": "PHWA01", "manufacturername": "Philips", "swversion": "1.0", "uniqueid": "arbitrary", "recycle": True, } SENSOR_RESPONSE = { "1": PRESENCE_SENSOR_1_PRESENT, "2": LIGHT_LEVEL_SENSOR_1, "3": TEMPERATURE_SENSOR_1, "4": PRESENCE_SENSOR_2_NOT_PRESENT, "5": LIGHT_LEVEL_SENSOR_2, "6": TEMPERATURE_SENSOR_2, } def create_mock_bridge(hass): """Create a mock Hue bridge.""" bridge = Mock( hass=hass, available=True, authorized=True, allow_unreachable=False, allow_groups=False, api=Mock(), reset_jobs=[], spec=hue.HueBridge, ) bridge.sensor_manager = hue_sensor_base.SensorManager(bridge) bridge.mock_requests = [] # We're using a deque so we can schedule multiple responses # and also means that `popleft()` will blow up if we get more updates # than expected. bridge.mock_sensor_responses = deque() async def mock_request(method, path, **kwargs): kwargs["method"] = method kwargs["path"] = path bridge.mock_requests.append(kwargs) if path == "sensors": return bridge.mock_sensor_responses.popleft() return None async def async_request_call(task): await task() bridge.async_request_call = async_request_call bridge.api.config.apiversion = "9.9.9" bridge.api.sensors = Sensors({}, mock_request) return bridge @pytest.fixture def mock_bridge(hass): """Mock a Hue bridge.""" return create_mock_bridge(hass) async def setup_bridge(hass, mock_bridge, hostname=None): """Load the Hue platform with the provided bridge.""" if hostname is None: hostname = "mock-host" hass.config.components.add(hue.DOMAIN) config_entry = config_entries.ConfigEntry( 1, hue.DOMAIN, "Mock Title", {"host": hostname}, "test", config_entries.CONN_CLASS_LOCAL_POLL, system_options={}, ) mock_bridge.config_entry = config_entry hass.data[hue.DOMAIN] = {config_entry.entry_id: mock_bridge} await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor") await hass.config_entries.async_forward_entry_setup(config_entry, "sensor") # and make sure it completes before going further await hass.async_block_till_done() async def test_no_sensors(hass, mock_bridge): """Test the update_items function when no sensors are found.""" mock_bridge.allow_groups = True mock_bridge.mock_sensor_responses.append({}) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 0 async def test_sensors_with_multiple_bridges(hass, mock_bridge): """Test the update_items function with some sensors.""" mock_bridge_2 = create_mock_bridge(hass) mock_bridge_2.mock_sensor_responses.append( { "1": PRESENCE_SENSOR_3_PRESENT, "2": LIGHT_LEVEL_SENSOR_3, "3": TEMPERATURE_SENSOR_3, } ) mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) await setup_bridge(hass, mock_bridge_2, hostname="mock-bridge-2") assert len(mock_bridge.mock_requests) == 1 assert len(mock_bridge_2.mock_requests) == 1 # 3 "physical" sensors with 3 virtual sensors each assert len(hass.states.async_all()) == 9 async def test_sensors(hass, mock_bridge): """Test the update_items function with some sensors.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 "physical" sensors with 3 virtual sensors each assert len(hass.states.async_all()) == 6 presence_sensor_1 = hass.states.get("binary_sensor.living_room_sensor_motion") light_level_sensor_1 = hass.states.get("sensor.living_room_sensor_light_level") temperature_sensor_1 = hass.states.get("sensor.living_room_sensor_temperature") assert presence_sensor_1 is not None assert presence_sensor_1.state == "on" assert light_level_sensor_1 is not None assert light_level_sensor_1.state == "1.0" assert light_level_sensor_1.name == "Living room sensor light level" assert temperature_sensor_1 is not None assert temperature_sensor_1.state == "17.75" assert temperature_sensor_1.name == "Living room sensor temperature" presence_sensor_2 = hass.states.get("binary_sensor.kitchen_sensor_motion") light_level_sensor_2 = hass.states.get("sensor.kitchen_sensor_light_level") temperature_sensor_2 = hass.states.get("sensor.kitchen_sensor_temperature") assert presence_sensor_2 is not None assert presence_sensor_2.state == "off" assert light_level_sensor_2 is not None assert light_level_sensor_2.state == "10.0" assert light_level_sensor_2.name == "Kitchen sensor light level" assert temperature_sensor_2 is not None assert temperature_sensor_2.state == "18.75" assert temperature_sensor_2.name == "Kitchen sensor temperature" async def test_unsupported_sensors(hass, mock_bridge): """Test that unsupported sensors don't get added and don't fail.""" response_with_unsupported = dict(SENSOR_RESPONSE) response_with_unsupported["7"] = UNSUPPORTED_SENSOR mock_bridge.mock_sensor_responses.append(response_with_unsupported) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 2 "physical" sensors with 3 virtual sensors each assert len(hass.states.async_all()) == 6 async def test_new_sensor_discovered(hass, mock_bridge): """Test if 2nd update has a new sensor.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 6 new_sensor_response = dict(SENSOR_RESPONSE) new_sensor_response.update( { "7": PRESENCE_SENSOR_3_PRESENT, "8": LIGHT_LEVEL_SENSOR_3, "9": TEMPERATURE_SENSOR_3, } ) mock_bridge.mock_sensor_responses.append(new_sensor_response) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 9 presence = hass.states.get("binary_sensor.bedroom_sensor_motion") assert presence is not None assert presence.state == "on" temperature = hass.states.get("sensor.bedroom_sensor_temperature") assert temperature is not None assert temperature.state == "17.75" async def test_sensor_removed(hass, mock_bridge): """Test if 2nd update has removed sensor.""" mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 6 mock_bridge.mock_sensor_responses.clear() keys = ("1", "2", "3") mock_bridge.mock_sensor_responses.append({k: SENSOR_RESPONSE[k] for k in keys}) # Force updates to run again await mock_bridge.sensor_manager.coordinator.async_refresh() # To flush out the service call to update the group await hass.async_block_till_done() assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 3 sensor = hass.states.get("binary_sensor.living_room_sensor_motion") assert sensor is not None removed_sensor = hass.states.get("binary_sensor.kitchen_sensor_motion") assert removed_sensor is None async def test_update_timeout(hass, mock_bridge): """Test bridge marked as not available if timeout error during update.""" mock_bridge.api.sensors.update = Mock(side_effect=asyncio.TimeoutError) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 async def test_update_unauthorized(hass, mock_bridge): """Test bridge marked as not authorized if unauthorized during update.""" mock_bridge.api.sensors.update = Mock(side_effect=aiohue.Unauthorized) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 assert len(mock_bridge.handle_unauthorized_error.mock_calls) == 1
postlund/home-assistant
tests/components/hue/test_sensor_base.py
Python
apache-2.0
15,646
import os def pytest_configure(): from django.conf import settings BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) settings.configure( DEBUG_PROPAGATE_EXCEPTIONS=True, DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}}, BASE_DIR=BASE_DIR, SITE_ID=1, SECRET_KEY='not very secret in tests', USE_I18N=True, USE_L10N=True, STATIC_URL='/static/', ROOT_URLCONF='lisa_api.lisa.urls', TEMPLATE_LOADERS=( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ), MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ), INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', 'lisa_api', 'lisa_api.api', 'rest_framework', 'rest_framework.authtoken', 'tests', 'lisa_plugins_test', ), PASSWORD_HASHERS=( 'django.contrib.auth.hashers.MD5PasswordHasher', ), ) # guardian is optional try: import guardian # NOQA except ImportError: pass else: settings.ANONYMOUS_USER_ID = -1 settings.AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'guardian.backends.ObjectPermissionBackend', ) settings.INSTALLED_APPS += ( 'guardian', ) try: import django django.setup() except AttributeError: pass
Seraf/lisa-plugins-test
tests/conftest.py
Python
apache-2.0
2,016
# -*- coding: utf-8 -*- import networkx as nx from ryu.base import app_manager from ryu.controller import ofp_event from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.ofproto import ofproto_v1_3 from ryu.topology.api import get_all_switch, get_all_link ''' ###reduce_t### --> network topology monitor ''' class NetworkMonitor(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] def __init__(self, *args, **kwargs): super(NetworkMonitor, self).__init__(*args, **kwargs) self.name = 'NetworkMonitor' # {dpid:{port:mac,port:mac,...},dpid:{port:mac,port:mac,...},...} only switches'mac self.dpids_port_to_mac = dict() # [dpid,dpid,...] self.dpids = list() self.access_dpids = list() # {dpid:dp, dpid:dp, dpid:dp,...} self.dpid_to_dp = dict() # {dpid:[1],dpid:[1,2],dpid:[4],...} self.dpids_to_access_port = dict() #{(src_dpid,dst_dpid):(src_port,dst_port),():(),...} self.links_dpid_to_port = dict() # [(src_dpid,dst_dpid),(src_dpid,dst_dpid),...] self.links = list() # {(dpid,port):host_mac,(dpid,port):host_mac,...} only hosts'mac self.dpids_port_to_host = dict() #[host_mac,host_mac,host_mac,...] self.hosts = list() self.adjacency_matrix = dict() self.pre_adjacency_matrix = dict() self.dpid_ip_to_port = dict() # {dpid:{host_ip:port,host_ip:port,...},...} self.access_table = dict() # {(dpid,port):ip,(dpid,port):ip,...} @set_ev_cls(ofp_event.EventOFPStateChange,[MAIN_DISPATCHER, DEAD_DISPATCHER]) def state_change_handler(self, ev): datapath = ev.datapath if ev.state == MAIN_DISPATCHER: if not datapath.id in self.dpid_to_dp: self.logger.info('register datapath: %04x', datapath.id) self.dpid_to_dp[datapath.id] = datapath elif ev.state == DEAD_DISPATCHER: if datapath.id in self.dpid_to_dp: self.logger.info('un register datapath: %04x', datapath.id) del self.dpid_to_dp[datapath.id] def update_topology(self): switch_list = get_all_switch(self) if len(switch_list) != 0: self.dpids_port_to_mac = self._get_dpids_port_to_mac(switch_list) self.dpids = self._get_dpids(switch_list) #[dpid,dpid,dpid,...] link_dict = get_all_link(self) if len(link_dict) != 0: self.links_dpid_to_port = self._get_links_dpid_to_port(link_dict) self.links = self._get_links(self.links_dpid_to_port) #[(src.dpid,dst.dpid),(src.dpid,dst.dpid),...] if self.dpids_port_to_mac and self.links_dpid_to_port: self.dpids_to_access_port = self._get_access_port(self.links_dpid_to_port, self.dpids_port_to_mac) self.access_dpids = self.get_access_dpids(self.dpids_to_access_port) if self.dpids and self.links: self.adjacency_matrix = self._get_adjacency_matrix(self.dpids, self.links) def _get_dpids_port_to_mac(self,switch_list): table = dict() for switch in switch_list: dpid = switch.dp.id table.setdefault(dpid,{}) ports = switch.ports for port in ports: table[dpid][port.port_no] = port.hw_addr return table def _get_dpids(self,switch_list): dpid_list = list() for switch in switch_list: dpid_list.append(switch.dp.id) return dpid_list def _get_links_dpid_to_port(self,link_dict): table = dict() for link in link_dict.keys(): src = link.src #ryu.topology.switches.Port dst = link.dst table[(src.dpid,dst.dpid)] = (src.port_no, dst.port_no) return table def _get_links(self,link_ports_table): return link_ports_table.keys() def _get_access_port(self,links_dpid_to_port, dpids_port_to_mac): table = dict() for dpid in dpids_port_to_mac.keys(): table.setdefault(dpid,[]) all_ports = self.dpids_port_to_mac[dpid].keys() interior_ports = [] for dpid_pair in links_dpid_to_port.keys(): if dpid_pair[0] == dpid: port = links_dpid_to_port[dpid_pair][0] if port not in interior_ports: interior_ports.append(port) elif dpid_pair[1] == dpid: port = links_dpid_to_port[dpid_pair][1] if port not in interior_ports: interior_ports.append(port) for each_port in all_ports: if each_port not in interior_ports: table[dpid].append(each_port) return table # {dpid:[1],dpid:[1,2],dpid:[4],...} def get_access_dpids(self, dpids_to_access_port): access_dpids = list() for dpid in dpids_to_access_port.keys(): if len(dpids_to_access_port[dpid]) != 0: access_dpids.append(dpid) return access_dpids def _get_adjacency_matrix(self,dpids,links): graph = dict() for src in dpids: graph[src] = dict() for dst in dpids: graph[src][dst] = float('inf') if src == dst: graph[src][dst] = 0 elif (src, dst) in links: graph[src][dst] = 1 return graph def get_tree(self): g = nx.Graph() for i in self.adjacency_matrix: for j in self.adjacency_matrix[i]: if self.adjacency_matrix[i][j] == 1: g.add_edge(i, j) tree = nx.minimum_spanning_tree(g) return tree #---------------------Print_to_debug------------------------ def _show_matrix(self): switch_num = len(self.adjacency_matrix) print "---------------------adjacency_matrix---------------------" print '%10s' % ("switch"), for i in range(1, switch_num + 1): print '%10d' % i, print "" for i in self.adjacency_matrix.keys(): print '%10d' % i, for j in self.adjacency_matrix[i].values(): print '%10.0f' % j, print "" def _show_dpids(self): print "---------------------dpids---------------------" for each in self.dpids: print each, print"" def _show_hosts(self): print "---------------------!hosts!---------------------" for each in self.hosts: print each, print"" def _show_links(self): print "----------------------links--------------------" for each in self.links: print each, print"" def _show_dpid_port_to_mac(self): print "----------------------dpid_port_to_mac--------------------" for dpid in self.dpids_port_to_mac.keys(): print "dpid:",dpid for port in self.dpids_port_to_mac[dpid].keys(): print "port:",port,"->","mac",self.dpids_port_to_mac[dpid][port] print"" def _show_dpid_port_to_host(self): print "----------------------!dpid_port_to_host!--------------------" for sw in self.dpids_port_to_host.keys(): print "(sw_dpid:",sw[0],",","sw_port:",sw[1],") ->","host_mac:",self.dpids_port_to_host[sw] print"" def _show_links_dpid_to_port(self): print "----------------------links_dpid_to_port--------------------" for each in self.links_dpid_to_port: print "link_dpid:",each,"->","link_port:",self.links_dpid_to_port[each] print""
Zouyiran/ryu
ryu/app/reduce_t/network_monitor.py
Python
apache-2.0
7,749
""" Support for building models. Every model must inherit from `Model` and should inherit from the `EntityMixin`. """ from datetime import datetime from time import time from uuid import uuid4 from dateutil.tz import tzutc from pytz import utc from sqlalchemy import Column, Float, types from sqlalchemy.ext.declarative import declarative_base from sqlalchemy_utils import UUIDType EPOCH = datetime(1970, 1, 1) Model = declarative_base() def utcnow(): """ Create a non-naive UTC datetime for the current time. Needed when *updating* UTCDateTime values because result values are currently converted to non-naive datetimes and SQLAlchemy cannot compare these values with naive datetimes generated from `datetime.utcnow()` """ return datetime.now(utc) class UTCDateTime(types.TypeDecorator): """ SQLAlchemy type definition that converts stored datetime to UTC automatically. Source: http://stackoverflow.com/a/2528453 """ impl = types.DateTime def process_bind_param(self, value, engine): if value is not None: result = value.replace(tzinfo=None) return result else: return value def process_result_value(self, value, engine): if value is not None: result = datetime( value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond, tzinfo=tzutc(), ) return result else: return value class PrimaryKeyMixin: """ Define a model with a randomized UUID primary key and tracking created/updated times. """ id = Column(UUIDType(), primary_key=True, default=uuid4) created_at = Column(UTCDateTime, default=utcnow, nullable=False) updated_at = Column(UTCDateTime, default=utcnow, onupdate=utcnow, nullable=False) def new_timestamp(self): return utcnow() @property def created_timestamp(self): return (self.created_at.replace(tzinfo=None) - EPOCH).total_seconds() @property def updated_timestamp(self): return (self.updated_at.replace(tzinfo=None) - EPOCH).total_seconds() class UnixTimestampPrimaryKeyMixin: """ Define a model with a randomized UUID primary key and tracking created/updated times. """ id = Column(UUIDType(), primary_key=True, default=uuid4) created_at = Column(Float, default=time, nullable=False) updated_at = Column(Float, default=time, onupdate=time, nullable=False) def new_timestamp(self): return time() @property def created_timestamp(self): return self.created_at @property def updated_timestamp(self): return self.updated_at class IdentityMixin: """ Define model identity in terms of members. This form of equality isn't always appropriate, but it's a good place to start, especially for writing test assertions. """ def _members(self): """ Return a dict of non-private members. """ return { key: value for key, value in self.__dict__.items() # NB: ignore internal SQLAlchemy state and nested relationships if not key.startswith("_") and not isinstance(value, Model) } def __eq__(self, other): return type(other) is type(self) and self._members() == other._members() def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return id(self) if self.id is None else hash(self.id) class SmartMixin: """ Define a model with short cuts for CRUD operations against its `Store`. These short cuts still delegate responsibility for persistence to the store (which must be instantiated first). """ def create(self): return self.__class__.store.create(self) def delete(self): return self.__class__.store.delete(self.id) def update(self): return self.__class__.store.update(self.id, self) def update_with_diff(self): return self.__class__.store.update_with_diff(self.id, self) def replace(self): return self.__class__.store.replace(self.id, self) @classmethod def search(cls, *criterion, **kwargs): return cls.store.search(*criterion, **kwargs) @classmethod def count(cls, *criterion): return cls.store.count(*criterion) @classmethod def retrieve(cls, identifier): return cls.store.retrieve(identifier) class EntityMixin(PrimaryKeyMixin, IdentityMixin, SmartMixin): """ Convention for persistent entities combining other mixins. """ pass class UnixTimestampEntityMixin(UnixTimestampPrimaryKeyMixin, IdentityMixin, SmartMixin): """ Convention for persistent entities combining other mixins. """ pass
globality-corp/microcosm-postgres
microcosm_postgres/models.py
Python
apache-2.0
4,867
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for UpdateSessionEntityType # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflowcx # [START dialogflow_v3_generated_SessionEntityTypes_UpdateSessionEntityType_sync] from google.cloud import dialogflowcx_v3 def sample_update_session_entity_type(): # Create a client client = dialogflowcx_v3.SessionEntityTypesClient() # Initialize request argument(s) session_entity_type = dialogflowcx_v3.SessionEntityType() session_entity_type.name = "name_value" session_entity_type.entity_override_mode = "ENTITY_OVERRIDE_MODE_SUPPLEMENT" session_entity_type.entities.value = "value_value" session_entity_type.entities.synonyms = ['synonyms_value_1', 'synonyms_value_2'] request = dialogflowcx_v3.UpdateSessionEntityTypeRequest( session_entity_type=session_entity_type, ) # Make the request response = client.update_session_entity_type(request=request) # Handle the response print(response) # [END dialogflow_v3_generated_SessionEntityTypes_UpdateSessionEntityType_sync]
googleapis/python-dialogflow-cx
samples/generated_samples/dialogflow_v3_generated_session_entity_types_update_session_entity_type_sync.py
Python
apache-2.0
1,895
#!/usr/bin/env python """Helper functions used by client building/repacking process.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import io import logging import os import shutil import struct import tempfile from future.builtins import str from future.utils import iteritems from future.utils import iterkeys from future.utils import itervalues from typing import Optional, Sequence, Text, Tuple from grr_response_client_builder import build from grr_response_core import config from grr_response_core import version from grr_response_core.config import contexts from grr_response_core.lib import config_validator_base from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils # pylint: disable=unused-import # Pull in local config validators. from grr_response_core.lib.local import plugins from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import crypto as rdf_crypto from grr_response_core.lib.util.compat import yaml # pylint: enable=unused-import # pylint: disable=g-import-not-at-top,unused-import # This is a workaround so we don't need to maintain the whole PyInstaller # codebase as a full-fledged dependency. try: # pytype: disable=import-error from PyInstaller import __main__ as PyInstallerMain # pytype: enable=import-error except ImportError: # We ignore this failure since most people running the code don't build their # own clients and printing an error message causes confusion. Those building # their own clients will need PyInstaller installed. pass # pylint: enable=g-import-not-at-top,unused-import Context = Sequence[Text] def GenerateDirectory(input_dir = None, output_dir = None, replacements = None, context = None): """Copies an a directory rewriting file names according to spec.""" if context is None: raise ValueError("context must be provided") input_dir = utils.NormalizePath(input_dir) output_dir = utils.NormalizePath(output_dir) replacements = replacements or [] for (root, _, files) in os.walk(input_dir): for filename in files: in_file = utils.JoinPath(root, filename) out_file = in_file.replace(input_dir, output_dir) for (s, replacement) in replacements: out_file = out_file.replace(s, replacement) utils.EnsureDirExists(os.path.dirname(out_file)) GenerateFile(in_file, out_file, context=context) def GenerateFile(input_filename = None, output_filename = None, context = None): """Generates a file from a template, interpolating config values.""" if context is None: raise ValueError("context must be provided.") if input_filename is None: input_filename = output_filename + ".in" if output_filename[-3:] == ".in": output_filename = output_filename[:-3] logging.debug("Generating file %s from %s", output_filename, input_filename) with io.open(input_filename, "r") as fd: data = fd.read() with io.open(output_filename, "w") as fd: fd.write(config.CONFIG.InterpolateValue(data, context=context)) def CleanDirectory(directory): logging.info("Clearing directory %s", directory) try: shutil.rmtree(directory) except OSError: pass utils.EnsureDirExists(directory) def MakeBuildDirectory(context=None): """Prepares the build and work directories.""" if context is None: raise ValueError("context can't be None") build_dir = config.CONFIG.Get("PyInstaller.build_dir", context=context) work_path = config.CONFIG.Get("PyInstaller.workpath_dir", context=context) CleanDirectory(build_dir) CleanDirectory(work_path) def BuildWithPyInstaller(context=None): """Use pyinstaller to build a client package.""" if context is None: raise ValueError("context has to be specified") CleanDirectory(config.CONFIG.Get("PyInstaller.distpath", context=context)) logging.info("Copying pyinstaller support files") build_dir = config.CONFIG.Get("PyInstaller.build_dir", context=context) spec_file = os.path.join(build_dir, "grr.spec") with io.open(spec_file, "w") as fd: fd.write(config.CONFIG.Get("PyInstaller.spec", context=context)) with io.open(os.path.join(build_dir, "version.txt"), "w") as fd: fd.write(config.CONFIG.Get("PyInstaller.version", context=context)) shutil.copy( src=config.CONFIG.Get("PyInstaller.icon_path", context=context), dst=os.path.join(build_dir, "grr.ico")) # We expect the onedir (a one-folder bundle containing an executable) output # at this location. output_dir = os.path.join( config.CONFIG.Get("PyInstaller.distpath", context=context), "grr-client") args = [ "--distpath", config.CONFIG.Get("PyInstaller.distpath", context=context), "--workpath", config.CONFIG.Get("PyInstaller.workpath_dir", context=context), spec_file, ] logging.info("Running pyinstaller: %s", args) PyInstallerMain.run(pyi_args=args) # Clear out some crud that pyinstaller includes. for path in ["tcl", "tk", "pytz"]: dir_path = os.path.join(output_dir, path) try: shutil.rmtree(dir_path) except OSError: logging.error("Unable to remove directory: %s", dir_path) try: os.mkdir(dir_path) except OSError: logging.error("Unable to create directory: %s", dir_path) file_path = os.path.join(dir_path, path) try: # Create an empty file so the directories get put in the installers. with io.open(file_path, "wb"): pass except IOError: logging.error("Unable to create file: %s", file_path) version_ini = config.CONFIG.Get( "ClientBuilder.version_ini_path", default=version.VersionPath()) shutil.copy(version_ini, os.path.join(output_dir, "version.ini")) with io.open(os.path.join(output_dir, "build.yaml"), "wb") as fd: WriteBuildYaml(fd, context=context) return output_dir def WriteBuildYaml(fd, build_timestamp=True, context=None): """Write build spec to fd.""" if context is None: raise ValueError("context has to be specified") output = { "Client.build_environment": rdf_client.Uname.FromCurrentSystem().signature(), "Template.build_type": config.CONFIG.Get("ClientBuilder.build_type", context=context), "Template.version_major": config.CONFIG.Get("Source.version_major", context=context), "Template.version_minor": config.CONFIG.Get("Source.version_minor", context=context), "Template.version_revision": config.CONFIG.Get("Source.version_revision", context=context), "Template.version_release": config.CONFIG.Get("Source.version_release", context=context), "Template.arch": config.CONFIG.Get("Client.arch", context=context) } yaml_keys = set(build.REQUIRED_BUILD_YAML_KEYS) if build_timestamp: output["Client.build_time"] = rdfvalue.RDFDatetime.Now() else: yaml_keys.remove("Client.build_time") for key, value in iteritems(output): output[key] = str(value) output["Template.build_context"] = context output_keys = set(iterkeys(output)) if output_keys != yaml_keys: raise RuntimeError("Bad build.yaml: expected %s, got %s" % (yaml_keys, output_keys)) for k, v in output.items(): if v is None: raise RuntimeError("Bad build.yaml: expected %s to be not None" % k) fd.write(yaml.Dump(output).encode("utf-8")) def ValidateEndConfig(config_obj, errors_fatal=True, context=None): """Given a generated client config, attempt to check for common errors.""" if context is None: raise ValueError("context can't be None") errors = [] if not config.CONFIG["Client.fleetspeak_enabled"]: location = config_obj.Get("Client.server_urls", context=context) if not location: errors.append("Empty Client.server_urls") for url in location: if not url.startswith("http"): errors.append("Bad Client.server_urls specified %s" % url) certificate = config_obj.GetRaw( "CA.certificate", default=None, context=context) if certificate is None or not certificate.startswith("-----BEGIN CERTIF"): errors.append("CA certificate missing from config.") key_data = config_obj.GetRaw( "Client.executable_signing_public_key", default=None, context=context) if key_data is None: errors.append("Missing Client.executable_signing_public_key.") elif not key_data.startswith("-----BEGIN PUBLIC"): errors.append("Invalid Client.executable_signing_public_key: %s" % key_data) else: rdf_crypto.RSAPublicKey.FromHumanReadable(key_data) for bad_opt in ["Client.private_key"]: if config_obj.Get(bad_opt, context=context, default=""): errors.append("Client cert in conf, this should be empty at deployment" " %s" % bad_opt) if errors_fatal and errors: for error in errors: logging.error("Build Config Error: %s", error) raise RuntimeError("Bad configuration generated. Terminating.") else: return errors # Config options that have to make it to a deployable binary. _CONFIG_SECTIONS = [ "CA", "Client", "ClientRepacker", "Logging", "Config", "Nanny", "Osquery", "Installer", "Template" ] # Config options that should never make it to a deployable binary. _SKIP_OPTION_LIST = ["Client.private_key"] def GetClientConfig(context, validate=True, deploy_timestamp=True): """Generates the client config file for inclusion in deployable binaries.""" with utils.TempDirectory() as tmp_dir: # Make sure we write the file in yaml format. filename = os.path.join( tmp_dir, config.CONFIG.Get("ClientBuilder.config_filename", context=context)) new_config = config.CONFIG.MakeNewConfig() new_config.Initialize(reset=True, data="") new_config.SetWriteBack(filename) # Only copy certain sections to the client. We enumerate all # defined options and then resolve those from the config in the # client's context. The result is the raw option as if the # client read our config file. client_context = context[:] while contexts.CLIENT_BUILD_CONTEXT in client_context: client_context.remove(contexts.CLIENT_BUILD_CONTEXT) for descriptor in sorted(config.CONFIG.type_infos, key=lambda x: x.name): if descriptor.name in _SKIP_OPTION_LIST: continue if descriptor.section in _CONFIG_SECTIONS: value = config.CONFIG.GetRaw( descriptor.name, context=client_context, default=None) if value is not None: logging.debug("Copying config option to client: %s", descriptor.name) new_config.SetRaw(descriptor.name, value) if deploy_timestamp: deploy_time_string = str(rdfvalue.RDFDatetime.Now()) new_config.Set("Client.deploy_time", deploy_time_string) new_config.Write() if validate: ValidateEndConfig(new_config, context=context) private_validator = config.CONFIG.Get( "ClientBuilder.private_config_validator_class", context=context) if private_validator: try: validator = config_validator_base.PrivateConfigValidator.classes[ private_validator]() except KeyError: logging.error("Couldn't find config validator class %s", private_validator) raise validator.ValidateEndConfig(new_config, context) return io.open(filename, "r").read() def CopyFileInZip(from_zip, from_name, to_zip, to_name=None, signer=None): """Read a file from a ZipFile and write it to a new ZipFile.""" data = from_zip.read(from_name) if to_name is None: to_name = from_name if signer: logging.debug("Signing %s", from_name) data = signer.SignBuffer(data) to_zip.writestr(to_name, data) def CreateNewZipWithSignedLibs(z_in, z_out, ignore_files=None, signer=None, skip_signing_files=None): """Copies files from one zip to another, signing all qualifying files.""" ignore_files = ignore_files or [] skip_signing_files = skip_signing_files or [] extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"] to_sign = [] for template_file in z_in.namelist(): if template_file not in ignore_files: extension = os.path.splitext(template_file)[1].lower() if (signer and template_file not in skip_signing_files and extension in extensions_to_sign): to_sign.append(template_file) else: CopyFileInZip(z_in, template_file, z_out) temp_files = {} for filename in to_sign: fd, path = tempfile.mkstemp() with os.fdopen(fd, "wb") as temp_fd: temp_fd.write(z_in.read(filename)) temp_files[filename] = path try: signer.SignFiles(itervalues(temp_files)) except AttributeError: for f in itervalues(temp_files): signer.SignFile(f) for filename, tempfile_path in iteritems(temp_files): with io.open(tempfile_path, "rb") as fd: z_out.writestr(filename, fd.read()) def SetPeSubsystem(fd, console=True): """Takes file like obj and returns (offset, value) for the PE subsystem.""" current_pos = fd.tell() fd.seek(0x3c) # _IMAGE_DOS_HEADER.e_lfanew header_offset = struct.unpack("<I", fd.read(4))[0] # _IMAGE_NT_HEADERS.OptionalHeader.Subsystem ( 0x18 + 0x44) subsystem_offset = header_offset + 0x5c fd.seek(subsystem_offset) if console: fd.write(b"\x03") else: fd.write(b"\x02") fd.seek(current_pos)
dunkhong/grr
grr/client_builder/grr_response_client_builder/build_helpers.py
Python
apache-2.0
13,632
import sys,os import math import numpy as np def dcoptimize(dist): n = len(dist) m = 0 allds = [] for i in range(n): for j in range(i+1,n): allds.append(dist[i][j]) m += 1 allds.sort() maxd = allds[m-1] #calculate lower and upper boundaries for sigma counter = 0 while allds[counter]==0: counter += 1 lower = allds[counter] if lower<np.percentile(np.array(allds),1): lower = np.percentile(np.array(allds),1) upper = np.percentile(np.array(allds),5) if upper<lower: upper = lower + 0.01 hmin = 100000000000 dc = 0 sigma = lower h = 0 while(sigma<=upper): #calculate potentials potentials = [] z = 0.0 for i in range(n): pt = 0.0 for j in range(n): sq = (dist[i][j] / sigma) * (dist[i][j] / sigma) expsq = math.exp(-sq) if i!=j: pt += expsq potentials.append(pt) z += pt #calculate entropy h = 0.0 for i in range(n): a = potentials[i]/z if a>0.0: b = math.log(a) h += a*b h = h*(-1) #store dc and hmin if h<hmin: hmin = h dc = sigma sigma += 0.005 #print "dc = " + str(dc) return ((3/math.sqrt(2))*dc),maxd #dist matrix and wether to include halo or not in clusters def dclust(dist,haloflag,perc,dc): n = len(dist) #Initialize structures rho = [0.0 for k in range(n)] delta = [0.0 for k in range(n)] neigh = [0 for k in range(n)] cl = [-1 for k in range(n)] halo = [0 for k in range(n)] strhoaux = [(0,0.0) for k in range(n)] #calculate dc and maxdistance dc2,maxd = dcoptimize(dist) if dc==0: dc = dc2 #Calculate rho using gausian kernel for i in range(n): for j in range(i+1,n): sq = -(dist[i][j]/dc)*(dist[i][j]/dc) rho[i] += math.exp(sq) rho[j] += math.exp(sq) #Sort rho with keys descentdently for i in range(n): strhoaux[i] = (i,rho[i]) strho = sorted(strhoaux,key = lambda val:-val[1]) #Calculate delta for i in range(n): idxi,rhoidxi = strho[i] delta[idxi] = maxd for j in range(i): idxj,rhoidxj = strho[j] if delta[idxi]>dist[idxi][idxj]: delta[idxi] = dist[idxi][idxj] neigh[idxi] = idxj sortrho = sorted(rho) sortdelta = sorted(delta) rhothreshold = np.percentile(np.array(sortrho),perc) deltathreshold = np.percentile(np.array(sortdelta),75) #Identification of cluster centers nclust = 0 for i in range(n): if rho[i]>rhothreshold and delta[i]>deltathreshold: cl[i] = nclust #print i,nclust nclust+=1 #Assign clusters for i in range(n): idxi,rhoidxi = strho[i] if cl[idxi]==-1: cl[idxi] = cl[neigh[idxi]] #Find border densities bord_rho = [] for i in range(nclust): bord_rho.append(0) for j in range(n): if cl[j]==i: for k in range(n): if cl[k]!=cl[j] and dist[j][k]<=dc and rho[j]>bord_rho[i]: bord_rho[i] = rho[j] #Generate Halo sum = 0 #print n for i in range(n): #print i if rho[i]<bord_rho[cl[i]]: halo[i] = 1 elif cl[i]>=0: sum += 1 if haloflag==0: for i in range(n): if halo[i]==1: cl[i]=-1 return nclust,cl
idotu/SARNAclust
dpcluster.py
Python
apache-2.0
3,239
# Copyright 2016 James Hensman, Arno Solin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import GPflow import pandas as pd import tensorflow as tf import sys import time # Import the data data = pd.read_pickle('airline.pickle') # Convert time of day from hhmm to minutes since midnight data.ArrTime = 60*np.floor(data.ArrTime/100)+np.mod(data.ArrTime, 100) data.DepTime = 60*np.floor(data.DepTime/100)+np.mod(data.DepTime, 100) def subset(data, n): # Pick out the data Y = data['ArrDelay'].values names = ['Month', 'DayofMonth', 'DayOfWeek', 'plane_age', 'AirTime', 'Distance', 'ArrTime', 'DepTime'] X = data[names].values # Shuffle the data and only consider a subset of it perm = np.random.permutation(len(X)) X = X[perm] Y = Y[perm] XT = X[int(2*n/3):n] YT = Y[int(2*n/3):n] X = X[:int(2*n/3)] Y = Y[:int(2*n/3)] # Normalize Y scale and offset Ymean = Y.mean() Ystd = Y.std() Y = (Y - Ymean) / Ystd Y = Y.reshape(-1, 1) YT = (YT - Ymean) / Ystd YT = YT.reshape(-1, 1) # Normalize X on [0, 1] Xmin, Xmax = X.min(0), X.max(0) X = (X - Xmin) / (Xmax - Xmin) XT = (XT - Xmin) / (Xmax - Xmin) return X, Y, XT, YT # Number of repetitions repetitions = 10 # Sample sizes: [10000 100000 1000000 len(data)] sample_size = [10000, 100000, 1000000, len(data)] # MSE mse = np.zeros([repetitions, len(sample_size)]) nlpd = np.zeros([repetitions, len(sample_size)]) tc = np.zeros([repetitions, len(sample_size)]) tt = np.zeros([repetitions, len(sample_size)]) # For repetitions for i in range(repetitions): # Loop over the sample sizes for j in range(len(sample_size)): # Lock random seed np.random.seed(sample_size[j]+i) # Reset tensorflow tf.reset_default_graph() # Reset clocks tc0 = time.clock() tt0 = time.time() # Pick subset X, Y, XT, YT = subset(data, sample_size[j]) # get inducing point by k-means. Use a smallish randomsubset for kmeans else it's very slow. from scipy.cluster import vq ind = np.random.permutation(X.shape[0])[:10000] Z, _ = vq.kmeans(X[ind, :], 500) # Set up the model m = GPflow.svgp.SVGP(X, Y, kern=GPflow.kernels.RBF(X.shape[1], ARD=True), Z=Z, likelihood=GPflow.likelihoods.Gaussian(), minibatch_size=1000) # a callback so we can see what's happening _counter = 0 def cb(x): _counter += 1 if (_counter % 10) == 0: m.set_state(x) mu, _ = m.predict_y(XT) mse = ((mu-YT)**2).mean() print(_counter, m.compute_log_likelihood(), mse) sys.stdout.flush() # Optimise the hyperparameters o = tf.train.AdamOptimizer() m.optimize(o, maxiter=100000) # Evaluate test points in batches of 1e5 mu, var = np.zeros([XT.shape[0], 1]), np.zeros([XT.shape[0], 1]) for k in range(0, XT.shape[0], 100000): mu[k:k+100000], var[k:k+100000] = m.predict_y(XT[k:k+100000]) # Calculate MSE mse[i, j] = ((mu-YT)**2).mean() print(X.shape[0], mse[i, j]) # Calculate NLPD nlpd[i, j] = -np.mean(m.predict_density(XT, YT)) # Store time tc[i, j] = time.clock() - tc0 tt[i, j] = time.time() - tt0 # The results after this round print(mse[:i+1, :].mean(axis=0)) print(mse[:i+1, :].std(axis=0)) print('MSE:') print(mse.mean(axis=0)) print(mse.std(axis=0)) print('NLPD:') print(nlpd.mean(axis=0)) print(nlpd.std(axis=0)) print('Timing (clock):') print(tc.mean(axis=0)) print(tc.std(axis=0)) print('Timing (time):') print(tt.mean(axis=0)) print(tt.std(axis=0))
jameshensman/VFF
experiments/airline/airline_svigp_rbf.py
Python
apache-2.0
4,344
import uuid import hashlib from infosystem.common.subsystem import manager from infosystem.common.subsystem import operation class Create(operation.Operation): def pre(self, **kwargs): # FIXME(samueldmq): this method needs to receive the parameters # explicitly. if kwargs.get('user'): # FIXME(samueldmq): how to avoid someone simply passing the user # in the body and then having a valid token? self.user = kwargs['user'] else: domain_name = kwargs.get('domain_name', None) username = kwargs.get('username', None) email = kwargs.get('email', None) password = kwargs.get('password', None) # TODO(samueldmq): allow get by unique attrs domains = self.manager.api.domains.list(name=domain_name) if not domains: return False domain_id = domains[0].id password_hash = hashlib.sha256( password.encode('utf-8')).hexdigest() if (email is None): users = self.manager.api.users.list( domain_id=domain_id, name=username, password=password_hash) else: users = self.manager.api.users.list( domain_id=domain_id, email=email, password=password_hash) if not users: return False self.user = users[0] return self.user.is_stable() def do(self, session, **kwargs): # TODO(samueldmq): use self.user.id instead of self.user_id token = self.driver.instantiate( id=uuid.uuid4().hex, created_by=self.user.id, user_id=self.user.id) self.driver.create(token, session=session) return token class Manager(manager.Manager): def __init__(self, driver): super().__init__(driver) self.create = Create(self)
samueldmq/infosystem
infosystem/subsystem/token/manager.py
Python
apache-2.0
1,938
# -*- coding: utf-8 -*- # # tests/entidades/test_detalhamento.py # # Copyright 2019 Base4 Sistemas Ltda ME # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from decimal import Decimal from satcomum import constantes from satcfe.entidades import Detalhamento from satcfe.entidades import ProdutoServico from satcfe.entidades import Imposto from satcfe.entidades import PISSN from satcfe.entidades import COFINSSN from satcfe.entidades import ICMSSN102 def test_simples(): """XML esperado: .. sourcecode:: xml <det nItem="1"> <prod> <cProd>123456</cProd> <xProd>BORRACHA STAEDTLER</xProd> <CFOP>5102</CFOP> <uCom>UN</uCom> <qCom>1.0000</qCom> <vUnCom>5.75</vUnCom> <indRegra>A</indRegra> </prod> <imposto> <ICMS> <ICMSSN102> <Orig>2</Orig> <CSOSN>500</CSOSN> </ICMSSN102> </ICMS> <PIS> <PISSN> <CST>49</CST> </PISSN> </PIS> <COFINS> <COFINSSN> <CST>49</CST> </COFINSSN> </COFINS> </imposto> <infAdProd>Teste</infAdProd> </det> """ det = Detalhamento( produto=ProdutoServico( cProd='123456', xProd='BORRACHA STAEDTLER', CFOP='5102', uCom='UN', qCom=Decimal('1.0000'), vUnCom=Decimal('5.75'), indRegra=constantes.I11_ARREDONDAMENTO), imposto=Imposto( pis=PISSN(CST='49'), cofins=COFINSSN(CST='49'), icms=ICMSSN102(Orig='2', CSOSN='500')), infAdProd='Teste') el = det._xml(nItem=1) # xml.etree.ElementTree.Element assert el.tag == 'det' assert el.attrib['nItem'] == '1' assert el.find('infAdProd').text == 'Teste' prod = el.find('prod') assert prod.find('cProd').text == '123456' assert prod.find('xProd').text == 'BORRACHA STAEDTLER' assert prod.find('CFOP').text == '5102' assert prod.find('uCom').text == 'UN' assert prod.find('qCom').text == '1.0000' assert prod.find('vUnCom').text == '5.75' assert prod.find('indRegra').text == constantes.I11_ARREDONDAMENTO imposto = el.find('imposto') el_ICMS = imposto.find('ICMS') el_ICMSSN102 = el_ICMS.find('ICMSSN102') assert el_ICMSSN102.find('Orig').text == '2' assert el_ICMSSN102.find('CSOSN').text == '500' el_PIS = imposto.find('PIS') el_PISSN = el_PIS.find('PISSN') assert el_PISSN.find('CST').text == '49' el_COFINS = imposto.find('COFINS') el_COFINSSN = el_COFINS.find('COFINSSN') assert el_COFINSSN.find('CST').text == '49'
base4sistemas/satcfe
tests/entidades/test_detalhamento.py
Python
apache-2.0
3,651
# Lint as: python3 # # Copyright 2020 The XLS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Multi-process fuzz driver library.""" import datetime import enum import hashlib import itertools import multiprocessing as mp import os import queue as queue_mod import shutil import sys import tempfile import time from typing import Text, Optional, Tuple, NamedTuple import termcolor from xls.common import gfile from xls.common import multiprocess from xls.fuzzer import run_fuzz from xls.fuzzer import sample_runner from xls.fuzzer.python import cpp_ast_generator as ast_generator from xls.fuzzer.python.cpp_sample import Sample from xls.fuzzer.python.cpp_sample import SampleOptions class Command(enum.Enum): """Command sent from generator process to worker processes.""" RUN = 1 # Run the accompanying payload. STOP = 2 # Terminate, no further work items. class QueueMessage(NamedTuple): """An message in the multiprocess queue.""" command: Command sample: Optional[Sample] = None sampleno: Optional[int] = None # The elapsed time to generate the sample in nanoseconds. generate_sample_ns: Optional[int] = None def record_crasher(workerno: int, sampleno: int, minimize_ir: bool, sample: Sample, run_dir: Text, crash_path: Text, num_crashers: int, exception: sample_runner.SampleError): """Records and writes details of a failing test as a crasher.""" print('--- Worker {} observed an exception for sampleno {}'.format( workerno, sampleno)) # Try to prune down the IR to a minimal reproducer as long as it isn't a # timeout. if minimize_ir and not exception.is_timeout: print('--- Worker {} attempting to minimize IR'.format(workerno)) minimized_ir_path = run_fuzz.minimize_ir(sample, run_dir) if minimized_ir_path: print('--- Worker {} minimized IR saved in {}'.format( workerno, os.path.basename(minimized_ir_path))) else: print('--- Worker {} unable to minimize IR'.format(workerno)) # Create a directory under crash_path containing the entire contents of # the run directory along with a crasher file. Name of directory is the # first eight characters of the hash of the code sample. digest = hashlib.sha256(sample.input_text.encode('utf-8')).hexdigest()[:8] sample_crasher_dir = os.path.join(crash_path, digest) termcolor.cprint( '--- Worker {} noted crasher #{} for sampleno {} in {}'.format( workerno, num_crashers, sampleno, sample_crasher_dir), color='red') sys.stdout.flush() gfile.recursively_copy_dir( run_dir, sample_crasher_dir, preserve_file_mask=True) crasher_path = os.path.join( sample_crasher_dir, 'crasher_{}_{}.x'.format(datetime.date.today().strftime('%Y-%m-%d'), digest[:4])) with gfile.open(crasher_path, 'w') as f: f.write(sample.to_crasher(str(exception))) def do_worker_task(workerno: int, queue: Optional[mp.Queue], crash_path: Text, summary_path: Optional[Text] = None, save_temps_path: Optional[Text] = None, minimize_ir: bool = True) -> None: """Runs worker task, receiving commands from generator and executing them.""" queue = queue or multiprocess.get_user_data()[workerno] crashers = 0 calls = 0 print('---- Started worker {}'.format(workerno)) sys.stdout.flush() start = datetime.datetime.now() # Local file to write the summary information to before writing out to the # potentially remote (i.e. CNS) summary file. Avoids a potential CNS write # with every sample. Instead data is written out in batches. summary_file = os.path.join(summary_path, 'summary_%d.binarypb' % workerno) if summary_path else None summary_temp_file = tempfile.mkstemp( prefix='temp_summary_')[1] if summary_path else None i = 0 # Silence pylint warning. for i in itertools.count(): message = queue.get() if message.command == Command.STOP: break assert message.command == Command.RUN, message.command calls += len(message.sample.args_batch) run_dir = None if save_temps_path: run_dir = os.path.join(save_temps_path, str(message.sampleno)) os.makedirs(run_dir) else: run_dir = tempfile.mkdtemp(prefix='run_fuzz_') try: run_fuzz.run_sample( message.sample, run_dir, summary_file=summary_temp_file, generate_sample_ns=message.generate_sample_ns) except sample_runner.SampleError as e: crashers += 1 record_crasher(workerno, message.sampleno, minimize_ir, message.sample, run_dir, crash_path, crashers, e) if summary_file and i % 25 == 0: # Append the local temporary summary file to the actual, potentially # remote one, and delete the temporary file. with gfile.open(summary_temp_file, 'rb') as f: summaries = f.read() with gfile.open(summary_file, 'ab+') as f: f.write(summaries) gfile.remove(summary_temp_file) if not save_temps_path: shutil.rmtree(run_dir) # TODO(leary): 2020-08-28 Turn this into an option. if i != 0 and i % 16 == 0: elapsed = (datetime.datetime.now() - start).total_seconds() print('---- Worker {:3}: {:8.2f} samples/s {:8.2f} calls/s'.format( workerno, i / elapsed, calls / elapsed)) sys.stdout.flush() elapsed = (datetime.datetime.now() - start).total_seconds() print( '---- Worker {:3} finished! {:3} crashers; {:8.2f} samples/s; {:8.2f} calls/s' .format(workerno, crashers, i / elapsed, calls / elapsed)) sys.stdout.flush() def print_with_linenos(text: Text): for i, line in enumerate(text.splitlines(), 1): print('{:04d} {}'.format(i, line)) def do_generator_task(queues: Tuple[mp.Queue, ...], seed: int, ast_generator_options: ast_generator.AstGeneratorOptions, sample_count: int, calls_per_sample: int, default_sample_options: SampleOptions, duration: Optional[datetime.timedelta] = None, print_samples: bool = False) -> int: """Makes DSLX text / args as fuzz samples and pushes them to workers.""" start = datetime.datetime.now() i = 0 queue_sweeps = 0 rng = ast_generator.RngState(seed) while True: if duration: # Note: duration overrides sample count. if datetime.datetime.now() - start >= duration: print('-- Hit target generator duration of {}'.format(duration)) sys.stdout.flush() break elif i >= sample_count: print('-- Hit target sample_count of {}'.format(sample_count)) sys.stdout.flush() break if i != 0 and i % len(queues) == 0: queue_sweeps += 1 # Every 16 (arbitrary) sweeps through the queue we print out the generator # rate and flush stdout. if queue_sweeps & 0xf == 0: delta = datetime.datetime.now() - start elapsed = delta.total_seconds() print(f'-- Generating sample {i:8,d}; elapsed: {delta}; ' f'aggregate generate samples/s: {i/elapsed:6.2f}') sys.stdout.flush() # Generate a command message. with sample_runner.Timer() as t: sample = ast_generator.generate_sample(ast_generator_options, calls_per_sample, default_sample_options, rng) if print_samples: print_with_linenos(sample.input_text) message = QueueMessage( command=Command.RUN, sample=sample, sampleno=i, generate_sample_ns=t.elapsed_ns) # Cycle through the queues seeing if we can find one to enqueue into. In the # common case where queues are not full it'll happen on the first one. This # helps avoid the case where a single worker gums up other (ready) workers # from receiving samples. queueno = i while True: queue = queues[queueno % len(queues)] try: queue.put_nowait(message) except queue_mod.Full: queueno += 1 else: break if (queueno - i) % len(queues) == 0: # Avoid burning this core on spin polling all the time by sleeping for a # millisecond after we've visited all the queues. time.sleep(0.001) # Bump the generated sample count. i += 1 print('-- Putting stop command in worker queues after generating {} samples' .format(i)) sys.stdout.flush() for queue in queues: queue.put(QueueMessage(command=Command.STOP)) print('-- Generator task complete') sys.stdout.flush() return i
google/xls
xls/fuzzer/run_fuzz_multiprocess.py
Python
apache-2.0
9,267
# Copyright 2015-2017 Cisco Systems, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Address family numbers, from http://www.iana.org/assignments/address-family-numbers """ class AFI(int): INET = 0x01 INET6 = 0x02 L2VPN = 0x19 BGPLS = 0x4004 hex_code = { 'ipv4': INET, 'ipv6': INET6, 'l2vpn': L2VPN, 'linkstate': BGPLS } def __str__(self): if self == self.INET: return 'ipv4' elif self == self.INET6: return 'ipv6' elif self == self.L2VPN: return 'l2vpn' elif self == self.BGPLS: return 'linkstate' class SAFI(int): UNICAST = 0x01 MPLS_LABEL = 0x04 EVPN = 70 BGPLS = 71 MPLS_VPN = 128 FLOWSPEC = 133 hex_code = { 'unicast': UNICAST, 'label_unicast': MPLS_LABEL, 'evpn': EVPN, 'linkstate': BGPLS, 'mplsvpn': MPLS_VPN, 'flowspec': FLOWSPEC } def __str__(self): if self == self.UNICAST: return 'unicast' elif self == self.MPLS_LABEL: return 'label_unicast' elif self == self.EVPN: return 'evpn' elif self == self.BGPLS: return 'linkstate' elif self == self.FLOWSPEC: return 'flowspec' elif self == self.MPLS_VPN: return 'mplsvpn' class Family (object): __slots__ = ['afi', 'safi'] def __init__(self, afi, safi): self.afi = AFI(afi) self.safi = SAFI(safi) def __str__(self): return '%s-%s' % (str(self.afi), str(self.safi)) @staticmethod def str_2_int(afi_safi): afi, safi = afi_safi.split('-') return AFI.hex_code.get(afi), SAFI.hex_code.get(safi)
smartbgp/libbgp
libbgp/net/family.py
Python
apache-2.0
2,325
#Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
facebook/IT-CPE
pantri/scripts/lib/__init__.py
Python
apache-2.0
71
#!/usr/bin/env python3 import subprocess import warnings import os import sys import utils def call_train_and_predict(settings_file, verbose=False): settings = utils.get_settings(settings_file) null = open(os.devnull, 'w') train_retcode = subprocess.call(['./train.py', '-s', settings_file], stdout=null, stderr=null) # Raise a warning if it was non-zero and return if train_retcode != 0: warnings.warn("train.py -s {0} did not complete successfully".format( settings_file)) return None # Start ./predict proc predict_retcode = subprocess.call(['./predict.py', '-s', settings_file], stdout=null, stderr=null) # Raise warning if predict failed and return if predict_retcode != 0: warnings.warn("predict.py -s {0} did not complete successfully".format( settings_file)) return None return None null.close() out_file.close() if __name__ == '__main__': call_train_and_predict(sys.argv[1])
Neuroglycerin/hail-seizure
python/train_and_predict.py
Python
apache-2.0
1,079
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import tensorflow.compat.v1 as tf from tensorflow.contrib import rnn as contrib_rnn from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest rnn = contrib_rnn _BIAS_VARIABLE_NAME = 'biases' _WEIGHTS_VARIABLE_NAME = 'weights' ACTIVATION_DICT = dict(sigmoid=tf.nn.sigmoid, tanh=tf.nn.tanh) def dice(x): """DiCE: The Infinitely Differentiable Monte-Carlo Estimator.""" return tf.exp(x - tf.stop_gradient(x)) def st_estimator(f, g): """Function which acts as f in the forward pass and g in the backward pass.""" return tf.stop_gradient(f - g) + g def create_var_and_placeholder(name, shape, dtype, trainable=False, initializer=None, default=None): """Creates a variable and a corresponding initializer op with placeholder.""" if default is not None: placeholder = tf.placeholder_with_default( tf.constant(default, dtype), shape=shape) else: placeholder = tf.placeholder( dtype, shape=shape, name='{}_init_pc'.format(name)) variable = tf.get_variable( name, dtype=dtype, shape=shape, initializer=initializer, trainable=trainable) init_op = variable.assign(placeholder) return variable, placeholder, init_op def tensormul(t1, t2): """Basically matmul, but t1 can have more dimensions than t2.""" dim1 = t1.get_shape().as_list()[-1] dim2 = t2.get_shape().as_list()[-1] result_shape_tensors = tf.unstack(tf.shape(t1)) result_shape_tensors[-1] = dim2 result_shape_tensor = tf.stack(result_shape_tensors) t1 = tf.reshape(t1, [-1, dim1]) result = tf.matmul(t1, t2) result = tf.reshape(result, result_shape_tensors) return result @contextlib.contextmanager def _checked_scope(cell, scope, reuse=None, **kwargs): if reuse is not None: kwargs['reuse'] = reuse with vs.variable_scope(scope, **kwargs) as checking_scope: scope_name = checking_scope.name if hasattr(cell, '_scope'): cell_scope = cell._scope # pylint: disable=protected-access if cell_scope.name != checking_scope.name: raise ValueError( 'Attempt to reuse RNNCell %s with a different variable scope than ' "its first use. First use of cell was with scope '%s', this " "attempt is with scope '%s'. Please create a new instance of the " 'cell if you would like it to use a different set of weights. ' 'If before you were using: MultiRNNCell([%s(...)] * num_layers), ' 'change to: MultiRNNCell([%s(...) for _ in range(num_layers)]). ' 'If before you were using the same cell instance as both the ' 'forward and reverse cell of a bidirectional RNN, simply create ' 'two instances (one for forward, one for reverse). ' "In May 2017, we will start transitioning this cell's behavior " 'to use existing stored weights, if any, when it is called ' 'with scope=None (which can lead to silent model degradation, so ' 'this error will remain until then.)' % (cell, cell_scope.name, scope_name, type(cell).__name__, type(cell).__name__)) else: weights_found = False try: with vs.variable_scope(checking_scope, reuse=True): vs.get_variable(_WEIGHTS_VARIABLE_NAME) weights_found = True except ValueError: pass if weights_found and reuse is None: raise ValueError( 'Attempt to have a second RNNCell use the weights of a variable ' "scope that already has weights: '%s'; and the cell was not " 'constructed as %s(..., reuse=True). ' 'To share the weights of an RNNCell, simply ' 'reuse it in your second calculation, or create a new one with ' 'the argument reuse=True.' % (scope_name, type(cell).__name__)) # Everything is OK. Update the cell's scope and yield it. cell._scope = checking_scope # pylint: disable=protected-access yield checking_scope class SeqAttentionCellWrapper(tf.nn.rnn_cell.RNNCell): """Basic attention cell wrapper. Implementation based on https://arxiv.org/abs/1409.0473. """ def __init__(self, cell, attn_inputs, attn_size, attn_vec_size, output_size=None, input_size=None, state_is_tuple=True, attn_masks=None, merge_output_attn='linear', reuse=None): """Create a cell with attention. Args: cell: an RNNCell, an attention is added to it. attn_inputs: a Tensor. attn_size: integer, the size of an attention vector. Equal to cell.output_size by default. attn_vec_size: integer, the number of convolutional features calculated on attention state and a size of the hidden layer built from base cell state. Equal to attn_size by default. input_size: integer, the size of a hidden linear layer, built from inputs and attention. Derived from the input tensor by default. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. By default (False), the states are all concatenated along the column axis. attn_mask: mask that should be applied to attention. If None, no masks will be applied. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. ValueError: if cell returns a state tuple but the flag `state_is_tuple` is `False` or if attn_length is zero or less. """ if not isinstance(cell, rnn.RNNCell): raise TypeError('The parameter cell is not RNNCell.') if nest.is_sequence(cell.state_size) and not state_is_tuple: raise ValueError( 'Cell returns tuple of states, but the flag ' 'state_is_tuple is not set. State size is: %s' % str(cell.state_size)) if not state_is_tuple: logging.warn( '%s: Using a concatenated state is slower and will soon be ' 'deprecated. Use state_is_tuple=True.', self) self._state_is_tuple = state_is_tuple if not state_is_tuple: raise NotImplementedError self._cell = cell self._input_size = input_size self._output_size = output_size if output_size is None: self._output_size = cell.output_size self._attn_size = attn_size self._reuse = reuse self._attn_inputs = attn_inputs self._attn_vec_size = attn_vec_size self.attn_masks = attn_masks self.merge_output_attn = merge_output_attn @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._output_size def zero_state(self, batch_size, dtype=tf.float32): zero_state = self._cell.zero_state(batch_size, dtype=dtype) return zero_state def __call__(self, inputs, state, scope=None): """Seq Attention wrapper.""" with _checked_scope( self, scope or 'attention_cell_wrapper', reuse=self._reuse): inner_output, new_state = self._cell(inputs, state) new_attns = self._attention(inner_output, self._attn_inputs) if self.merge_output_attn == 'linear': with vs.variable_scope('attn_output_projection'): output = linear([inner_output, new_attns], self._output_size, True) elif self.merge_output_attn == 'concat': output = tf.concat([inner_output, new_attns], axis=-1) else: raise ValueError( 'Unknown method to merge output and attention: {}'.format( self.merge_output_attn)) return output, new_state def _attention(self, query, attn_inputs): with vs.variable_scope('attention'): attn_query = tf.layers.dense( inputs=query, units=self._attn_vec_size, use_bias=True) attn_keys = tf.layers.dense( inputs=attn_inputs, units=self._attn_vec_size, use_bias=True) attn_contents = tf.layers.dense( inputs=attn_inputs, units=self._attn_size, use_bias=True) v_attn = vs.get_variable('attn_v', [self._attn_vec_size]) scores = attn_sum_bahdanau(v_attn, attn_keys, attn_query) if self.attn_masks is not None: score_masks = self.attn_masks scores = scores * score_masks + (1.0 - score_masks) * tf.float32.min attn_weights = nn_ops.softmax(scores) new_attns = math_ops.reduce_sum( tf.expand_dims(attn_weights, -1) * attn_contents, [1]) return new_attns def attn_sum_bahdanau(v_attn, keys, query): """Calculates a batch and timewise dot product with a variable.""" return tf.reduce_sum(v_attn * tf.tanh(keys + tf.expand_dims(query, 1)), [2]) def attn_sum_dot(keys, query): """Calculates a batch and timewise dot product.""" return tf.reduce_sum(keys * tf.expand_dims(query, 1), [2]) def linear(args, output_size, bias, bias_start=0.0): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError('`args` must be specified') if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError('linear is expecting 2D arguments: %s' % shapes) if shape[1].value is None: raise ValueError('linear expects shape[1] to be provided for shape %s, ' 'but saw %s' % (shape, shape[1])) else: total_arg_size += shape[1].value dtype = [a.dtype for a in args][0] # Now the computation. scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: weights = vs.get_variable( _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype) if len(args) == 1: res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(args, 1), weights) if not bias: return res with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) biases = vs.get_variable( _BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=init_ops.constant_initializer(bias_start, dtype=dtype)) return nn_ops.bias_add(res, biases) class ScoreWrapper(tf.nn.rnn_cell.RNNCell): """Creates a cell which outputs a scalar score value at each time step.""" def __init__(self, cell, activation='tanh'): self._cell = cell self._activation = ACTIVATION_DICT[activation] @property def state_size(self): return self._cell.state_size @property def output_size(self): return (self._cell.output_size, 1) def __call__(self, inputs, state, *args, **kwargs): inner_output, next_state = self._cell(inputs, state, *args, **kwargs) with vs.variable_scope('score_fn', reuse=tf.AUTO_REUSE): # No need for explicitly stop_gradient since the gradients contribution # from scores is stopped now using stop_gradients in `tf.gradients` score_input = inner_output # Moving to a 2 layer architechture since the one layer didn't improve # the meta learning loss much score_val = tf.layers.dense( score_input, units=16, use_bias=True, activation=tf.nn.relu, kernel_initializer=tf.ones_initializer(), bias_initializer=tf.ones_initializer()) score_val = tf.layers.dense( score_val, units=1, use_bias=True, activation=self._activation, kernel_initializer=tf.ones_initializer(), bias_initializer=tf.ones_initializer()) output = (inner_output, score_val) return output, next_state MemoryStateTuple = collections.namedtuple('MemoryStateTuple', ('memory', 'inner_state')) MemoryInputTuple = collections.namedtuple( 'MemoryInputTuple', ('read_ind', 'write_ind', 'valid_indices')) class MemoryWrapper(tf.nn.rnn_cell.RNNCell): """Augment RNNCell with a memory that the RNN can write to and read from. Each time step, 3 things are happening: 1) the RNNCell reads from one memory location (read_ind) as input to the inner RNN. 2) It also writes the output of the inner RNN to one memory location (write_ind). 1 indicates no writing. 3) It use the output of the inner RNN to compute the logits for the valid_indices, which will be used as input to compute a softmax distribution over them. Note that valid_indices always has the dimension max_n_valid_indices, use -1 to pad the dimensions the actual number of valid indices are less. """ def __init__(self, cell, mem_size, embed_size, max_n_valid_indices, use_score_wrapper=False, **kwargs): """Constructs a `ResidualWrapper` for `cell`. Args: cell: An instance of `RNNCell`. mem_size: size of the memory. embed_size: the size/dimension of the embedding in each memory location. max_n_valid_indices: maximum number of valid_indices. use_score_wrapper: Whether a score wrapper was used prior to passing the cell. **kwargs: Keyword arguments for score wrapper. """ self._use_score_wrapper = use_score_wrapper if use_score_wrapper: self._cell = ScoreWrapper(cell, **kwargs) else: self._cell = cell self._mem_size = mem_size self._embed_size = embed_size self._max_n_valid_indices = max_n_valid_indices @property def state_size(self): # This will be used to create zero states. return MemoryStateTuple( tf.TensorShape([self._mem_size, self._embed_size]), self._cell.state_size) @property def output_size(self): # The output is the logits of the valid_dices. if self._use_score_wrapper: return (self._max_n_valid_indices, 1) else: return self._max_n_valid_indices def __call__(self, inputs, state, scope=None, debug=False): """Unroll the memory augmented cell for one step. Args: inputs: (Possibly nested tuple of) Tensor, the input at this time step. state: An instance of MemoryStateTuple containing tensors from the previous time step. """ # B is batch size. # 1) Use read_ind to find memory location to read from # as input. # inputs.read_ind: (B, 1) # memory: (B, mem_size, embed_size) read_ind = tf.to_int32(inputs.read_ind) batch_size = tf.shape(read_ind)[0] if debug: print('batch size is', batch_size) print('read ind is', read_ind) mem_ind = tf.range(batch_size) # read_mem_ind: (B, 1) read_mem_ind = tf.expand_dims(mem_ind, axis=1) # read_ind: (B, 2) read_ind = tf.concat([read_mem_ind, read_ind], axis=1) if debug: print('processed read ind is', read_ind) # inner_inputs: (B, embed_size) inner_inputs = tf.gather_nd(state.memory, read_ind) if debug: print('inner_inputs is', inner_inputs) inner_state = state.inner_state # 2) Run the inner RNNCell. # inner_outputs: (B, embed_size) cell_outputs, new_inner_state = self._cell( inner_inputs, inner_state, scope=scope) if self._use_score_wrapper: inner_outputs, score_val = cell_outputs else: inner_outputs = cell_outputs if debug: print('inner_outputs is', inner_outputs) # 3) Compute logits for valid indices (using logit_masks # to mask out padded valid indices (-1)). # valid_indices: (B, max_n_valid_indices) valid_indices = tf.to_int32(inputs.valid_indices) if debug: print('valid_indices is', valid_indices) # Logit mask: (B, max_n_valid_indices) logit_masks = tf.greater_equal(inputs.valid_indices, 0) logit_masks = tf.cast(logit_masks, tf.float32) if debug: print('logit_masks is', logit_masks) # Normalize indices to be at least 0. valid_indices = tf.maximum(valid_indices, 0) # valid_indices: (B, max_n_valid_indices, 1) valid_indices = tf.expand_dims(valid_indices, -1) if debug: print('valid_indices is', valid_indices) print('mem_ind is', mem_ind) valid_mem_ind = tf.expand_dims(mem_ind, axis=1) # valid_mem_ind: (B, 1, 1) valid_mem_ind = tf.expand_dims(valid_mem_ind, axis=2) if debug: print('valid_mem_ind is', valid_mem_ind) # valid_mem_ind: (B, max_n_valid_indices, 1) valid_mem_ind = tf.tile(valid_mem_ind, [1, self._max_n_valid_indices, 1]) # valid_indices: (B, max_n_valid_indices, 2) # Third dimension of valid_indices is [b_i, valid_index] so that it can # index into the right memory location. valid_indices = tf.concat([valid_mem_ind, valid_indices], axis=2) if debug: print('valid_indices is', valid_indices) # select all the valid slots. # valid_values: (B, max_n_valid_indices, embed_size) valid_values = tf.gather_nd(state.memory, valid_indices) if debug: print('valid_values is', valid_values) # expanded_inner_outputs: (B, 1, embed_size) expanded_inner_outputs = tf.expand_dims(inner_outputs, 1) if debug: print('expanded_inner_outputs is', expanded_inner_outputs) # valid_values: (B, embed_size, max_n_valid_indices) valid_values = tf.transpose(valid_values, [0, 2, 1]) if debug: print('valid_values is', valid_values) # logits: (B, 1, max_n_valid_indices) logits = tf.matmul(expanded_inner_outputs, valid_values) if debug: print('logits is', logits) # logits: (B, max_n_valid_indices) logits = tf.squeeze(logits, axis=[1]) if debug: print('logits is', logits) # masked_logits = (logits * logit_masks) - (1 - logit_masks) * 1e6 masked_logits = logits * logit_masks + (1 - logit_masks) * tf.float32.min # masked_logits = tf.Print(masked_logits, [masked_logits], # message='masked_logits') outputs = masked_logits # 4) Write the output of the inner RNN to a memory # location (write_ind), using write_masks to mask out # padded write_ind (-1). # write_ind: (B, 1) write_ind = tf.cast(inputs.write_ind, tf.int32) if debug: print('write_ind is', write_ind) # write mask: (B, 1) write_masks = tf.greater_equal(inputs.write_ind, 0) if debug: print('write_masks greater_equal', write_masks) write_masks = tf.cast(write_masks, tf.float32) # write mask: (B, 1, 1) write_masks = tf.expand_dims(write_masks, [-1]) if debug: print('write_masks is', write_masks) # Normalize write_ind to be above 0. # write_ind: (B, 1) write_ind = tf.maximum(write_ind, 0) # write_mem_ind: (B, 1) write_mem_ind = tf.expand_dims(mem_ind, axis=1) # write_ind: (B, 2) # Second dimension is [b_i, write_index] write_ind = tf.concat([write_mem_ind, write_ind], axis=1) if debug: print('write_ind is', write_ind) if debug: print('masked_logits is', masked_logits) print('memory is', state.memory) # write_mat: (B, mem_size, embed_size) write_mat = tf.scatter_nd( write_ind, inner_outputs, shape=tf.shape(state.memory)) if debug: print('@' * 50) print('write_mat is', write_mat) print('write_mask is', write_masks) print('@' * 50) masked_write_mat = write_mat * write_masks new_memory = state.memory + masked_write_mat state = MemoryStateTuple(new_memory, new_inner_state) if debug: print('state is', state) if self._use_score_wrapper: outputs = (outputs, score_val) return (outputs, state)
google-research/google-research
meta_reward_learning/semantic_parsing/nsm/tf_utils.py
Python
apache-2.0
21,465
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Find similar items for a given query in the ANN index.""" import os import pickle import annoy import tensorflow as tf import tensorflow_hub as hub _INDEX_FILENAME = 'ann.index' _MAPPING_FILENAME = 'ann.index.mapping' _RANDOM_PROJECTION_FILENAME = 'random_projection.matrix' _METRIC = 'angular' class SimilarityFinder(object): """Similarity finder class.""" def __init__( self, module_url, index_file_path, mapping_file_path, dimensions, random_projection_matrix_file, ): # Load the TF-Hub module print('Loading the TF-Hub module...') self.embed_fn = hub.load(module_url) print('TF-hub module is loaded.') dimensions = self.embed_fn(['']).shape[1] self.random_projection_matrix = None if tf.io.gfile.exists(random_projection_matrix_file): with open(random_projection_matrix_file, 'rb') as handle: self.random_projection_matrix = pickle.load(handle) dimensions = self.random_projection_matrix.shape[1] self.index = annoy.AnnoyIndex(dimensions, metric=_METRIC) self.index.load(index_file_path, prefault=True) print('Annoy index is loaded.') with open(mapping_file_path, 'rb') as handle: self.mapping = pickle.load(handle) print('Mapping file is loaded.') def find_similar_items(self, query, num_matches=5): """Finds similar items to a given quey in the ANN index. Args: query: The query string num_matches: The number of similar items to retrieve. Returns: List of items. """ query_embedding = self.embed_fn([query])[0].numpy() if self.random_projection_matrix is not None: query_embedding = query_embedding.dot(self.random_projection_matrix) ids = self.index.get_nns_by_vector( query_embedding, num_matches, search_k=-1, include_distances=False) items = [self.mapping[i] for i in ids] return items def load(args): module_url = args.module_url index_file_path = os.path.join(args.index_output_dir, _INDEX_FILENAME) mapping_file_path = os.path.join(args.index_output_dir, _MAPPING_FILENAME) dimensions = args.dimensions random_projection_matrix_file = os.path.join( args.index_output_dir, _RANDOM_PROJECTION_FILENAME) return SimilarityFinder( module_url, index_file_path, mapping_file_path, dimensions, random_projection_matrix_file, )
tensorflow/hub
tensorflow_hub/tools/make_nearest_neighbour_index/similarity_finder.py
Python
apache-2.0
3,003
from django.conf.urls import patterns, include, url import os.path import tasks.views from django.conf import settings from django.contrib import admin admin.autodiscover() STATIC_PATH = os.path.abspath(os.path.join( os.path.dirname(os.path.abspath(__file__)), '../tasks/static')) if not os.path.exists(STATIC_PATH): raise ValueError(STATIC_PATH) urlpatterns = patterns('', # Examples: # url(r'^$', 'minitasks.views.home', name='home'), # url(r'^minitasks/', include('minitasks.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^$', tasks.views.TaskIndex.as_view()), url(r'^tasks/$', tasks.views.TaskData.as_view(), name='tasks-data'), url(r'^claim/$', tasks.views.ClaimTask.as_view(), name='tasks-claim'), (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}), url(r'^secret-refresh-tasks/$', tasks.views.RefreshTaskData.as_view(), name='secret-refresh-tasks'), ) ### Attempt 1: if settings.DEBUG: urlpatterns += patterns('django.contrib.staticfiles.views', url(r'^static/(?P<path>.*)$', 'serve'), )
openhatch/new-mini-tasks
minitasks/urls.py
Python
apache-2.0
1,307
from .polytri import *
eeucalyptus/eeDA
app/dependencies/polytri/__init__.py
Python
apache-2.0
22
from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from django.utils.translation import ugettext from django.contrib import messages from django.contrib.admin.views.decorators import staff_member_required from account.utils import get_default_redirect, user_display from models import SignupCode from forms import SignupForm, InviteUserForm def group_and_bridge(request): """ Given the request we can depend on the GroupMiddleware to provide the group and bridge. """ # be group aware group = getattr(request, "group", None) if group: bridge = request.bridge else: bridge = None return group, bridge def group_context(group, bridge): # @@@ use bridge ctx = { "group": group, } if group: ctx["group_base"] = bridge.group_base_template() return ctx def signup(request, **kwargs): form_class = kwargs.pop("form_class", SignupForm) template_name = kwargs.pop("template_name", "account/signup.html") template_name_failure = kwargs.pop("template_name_failure", "signup_codes/failure.html") success_url = kwargs.pop("success_url", None) group, bridge = group_and_bridge(request) ctx = group_context(group, bridge) if success_url is None: if hasattr(settings, "SIGNUP_REDIRECT_URLNAME"): fallback_url = reverse(settings.SIGNUP_REDIRECT_URLNAME) else: if hasattr(settings, "LOGIN_REDIRECT_URLNAME"): fallback_url = reverse(settings.LOGIN_REDIRECT_URLNAME) else: fallback_url = settings.LOGIN_REDIRECT_URL success_url = get_default_redirect(request, fallback_url) code = request.GET.get("code") if request.method == "POST": form = form_class(request.POST, group=group) if form.is_valid(): user = form.save(request=request) signup_code = form.cleaned_data["signup_code"] if signup_code: signup_code.use(user) form.login(request, user) messages.add_message(request, messages.SUCCESS, ugettext("Successfully logged in as %(username)s.") % { "username": user_display(user), } ) return HttpResponseRedirect(success_url) else: signup_code = SignupCode.check(code) if signup_code: initial = { "signup_code": code, "email": signup_code.email, } form = form_class(initial=initial, group=group) else: if not settings.ACCOUNT_OPEN_SIGNUP: ctx.update({ "code": code, }) ctx = RequestContext(request, ctx) # if account signup is not open we want to fail when there is # no sign up code or what was provided failed. return render_to_response(template_name_failure, ctx) else: form = form_class(group=group) ctx.update({ "code": code, "form": form, }) return render_to_response(template_name, RequestContext(request, ctx)) @staff_member_required def admin_invite_user(request, **kwargs): """ This view, by default, works inside the Django admin. """ form_class = kwargs.pop("form_class", InviteUserForm) template_name = kwargs.pop("template_name", "signup_codes/admin_invite_user.html") group, bridge = group_and_bridge(request) if request.method == "POST": form = form_class(request.POST, group=group) if form.is_valid(): email = form.cleaned_data["email"] form.send_signup_code() messages.add_message(request, messages.INFO, ugettext("An email has been sent to %(email)s.") % { "email": email } ) form = form_class() # reset else: form = form_class(group=group) ctx = group_context(group, bridge) ctx.update({ "title": ugettext("Invite user"), "form": form, }) return render_to_response(template_name, RequestContext(request, ctx))
zhiwehu/IBookmark
webapp/apps/signup_codes/views.py
Python
apache-2.0
4,437
# detection anomalous clusters in the data set from ex import * from ex.geo.common import * from ex.ml import * import utils def ClusterFeature(cluster, features): '''fit a gaussian distribution for this cluster ''' X=features[:, cluster] return (X.mean(1), X.std(1, ddof=1)) if __name__ == '__main__': InitLog(log.DEBUG) input_file=sys.argv[1] data=LoadPickles(input_file) # data is {'target': target, 'pos': pos, 'z':z, 'xyz': xyz, 'feature': feature, 'clusters': clusters} clusters=data['clusters'] features=data['feature'].T pos=data['pos'] n=features.shape[1] nc=len(clusters) r=4./60 # maximum is 10./60 # normalize the feature log.debug('Normalizing features of size {0}...'.format(features.shape)) features=Normalize(features, 's1', 'row')[0] features *= features.shape[0] # retouch the clusters for i in range(nc): cluster=clusters[i] center=pos[:, cluster[0]] pp=pos[:, cluster] filter=Inside(pp, (center, r**2), 'c') clusters[i]=cluster[filter] # reduce the features if using KNN method # log.debug('Reducing Dim...') # energy=0.9 # U, L, M, R=ml.pca(features, energy) # features=mul(U[:, 0:min(R, 10)].T, features) # log.info('Dim={0} for {1} energy. Dim reduced to {2}'.format( # R, energy, features.shape[0])) log.debug('Extracting cluster features...') cluster_thresh=3 cs=[] ci=[] cf=[] for i in range(nc): if len(clusters[i]) > cluster_thresh: mu, sigma=ClusterFeature(clusters[i], features) cf.append(mu) cs.append(clusters[i]) ci=arr(ci) cf=np.vstack(cf).T print cf.shape log.debug('Scoring clusters...') # pca detection U, L, M, R=ml.pca(cf, 0.95) pca_model={'U':U, 'L':L, 'M':M, 'R':R} score=utils.PCAAnomalyScore(pca_model, cf.T, 'accum_err') pp=mul(U[:, 0:R].T, cf); figure() scatter(pp[0,:], pp[1,:], c=score) show() html_an, html_all=utils.GenReportCluster( cs, score, r, None, data['target'], -np.ones(n), data['pos'], data['z']) SaveText('clusters_mean_r{0:.5}_an.html'.format(r), html_an) SaveText('clusters_mean_r{0:.5}_all.html'.format(r), html_all)
excelly/xpy-ml
sdss/detection/detection_clusters.py
Python
apache-2.0
2,297
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved. from mldb import mldb, ResponseException import unittest class myTest(unittest.TestCase): def test_sequence(self): with self.assertRaisesRegex(ResponseException, "Executing builtin function exp: Can't convert value 'a' of type 'ASCII_STRING' to double") as re: query = "SELECT exp('a')" mldb.query(query) with self.assertRaisesRegex(ResponseException, "Binding builtin function sqrt: expected 1 argument, got 3") as re: query = "SELECT sqrt(1,2,3)" mldb.query(query) mldb.run_tests()
mldbai/mldb
testing/MLDB-1336-builtin-checks.py
Python
apache-2.0
636
import torch import torch.nn as nn from torch.utils.data import DataLoader import importlib import os import json import logging import pickle import re from unittest import mock import pytest import numpy as np import pandas as pd import sklearn.datasets as datasets import yaml import mlflow.pyfunc as pyfunc import mlflow.pytorch import mlflow.pyfunc.scoring_server as pyfunc_scoring_server from mlflow.pytorch import get_default_conda_env from mlflow.exceptions import MlflowException from mlflow.models import Model, infer_signature from mlflow.models.utils import _read_example from mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils.environment import _mlflow_conda_env, _mlflow_additional_pip_env from mlflow.utils.file_utils import TempDir from mlflow.utils.model_utils import _get_flavor_configuration from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS from tests.helper_functions import ( _compare_conda_env_requirements, _assert_pip_requirements, _is_available_on_pypi, _is_importable, _compare_logged_code_paths, ) _logger = logging.getLogger(__name__) # This test suite is included as a code dependency when testing PyTorch model scoring in new # processes and docker containers. In these environments, the `tests` module is not available. # Therefore, we attempt to import from `tests` and gracefully emit a warning if it's unavailable. try: from tests.helper_functions import pyfunc_serve_and_score_model from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import except ImportError: _logger.warning( "Failed to import test helper functions. Tests depending on these functions may fail!" ) EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("torch") else ["--no-conda"] @pytest.fixture(scope="module") def data(): iris = datasets.load_iris() data = pd.DataFrame( data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"] ) y = data["target"] x = data.drop("target", axis=1) return x, y def get_dataset(data): x, y = data dataset = [(xi.astype(np.float32), yi.astype(np.float32)) for xi, yi in zip(x.values, y.values)] return dataset def train_model(model, data): dataset = get_dataset(data) criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) batch_size = 16 num_workers = 4 dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True, drop_last=False ) model.train() for _ in range(5): for batch in dataloader: optimizer.zero_grad() batch_size = batch[0].shape[0] y_pred = model(batch[0]).squeeze(dim=1) loss = criterion(y_pred, batch[1]) loss.backward() optimizer.step() def get_sequential_model(): return nn.Sequential(nn.Linear(4, 3), nn.ReLU(), nn.Linear(3, 1)) @pytest.fixture def sequential_model(data, scripted_model): model = get_sequential_model() if scripted_model: model = torch.jit.script(model) train_model(model=model, data=data) return model def get_subclassed_model_definition(): """ Defines a PyTorch model class that inherits from ``torch.nn.Module``. This method can be invoked within a pytest fixture to define the model class in the ``__main__`` scope. Alternatively, it can be invoked within a module to define the class in the module's scope. """ # pylint: disable=W0223 class SubclassedModel(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(4, 1) def forward(self, x): # pylint: disable=arguments-differ y_pred = self.linear(x) return y_pred return SubclassedModel @pytest.fixture(scope="module") def main_scoped_subclassed_model(data): """ A custom PyTorch model inheriting from ``torch.nn.Module`` whose class is defined in the "__main__" scope. """ model_class = get_subclassed_model_definition() model = model_class() train_model(model=model, data=data) return model # pylint: disable=W0223 class ModuleScopedSubclassedModel(get_subclassed_model_definition()): """ A custom PyTorch model class defined in the test module scope. This is a subclass of ``torch.nn.Module``. """ @pytest.fixture(scope="module") def module_scoped_subclassed_model(data): """ A custom PyTorch model inheriting from ``torch.nn.Module`` whose class is defined in the test module scope. """ model = ModuleScopedSubclassedModel() train_model(model=model, data=data) return model @pytest.fixture def model_path(tmpdir): return os.path.join(str(tmpdir), "model") @pytest.fixture def pytorch_custom_env(tmpdir): conda_env = os.path.join(str(tmpdir), "conda_env.yml") _mlflow_conda_env(conda_env, additional_pip_deps=["pytorch", "torchvision", "pytest"]) return conda_env def _predict(model, data): dataset = get_dataset(data) batch_size = 16 num_workers = 4 dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, drop_last=False ) predictions = np.zeros((len(dataloader.sampler),)) model.eval() with torch.no_grad(): for i, batch in enumerate(dataloader): y_preds = model(batch[0]).squeeze(dim=1).numpy() predictions[i * batch_size : (i + 1) * batch_size] = y_preds return predictions @pytest.fixture def sequential_predicted(sequential_model, data): return _predict(sequential_model, data) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_signature_and_examples_are_saved_correctly(sequential_model, data): model = sequential_model signature_ = infer_signature(*data) example_ = data[0].head(3) for signature in (None, signature_): for example in (None, example_): with TempDir() as tmp: path = tmp.path("model") mlflow.pytorch.save_model( model, path=path, signature=signature, input_example=example ) mlflow_model = Model.load(path) assert signature == mlflow_model.signature if example is None: assert mlflow_model.saved_input_example_info is None else: assert all((_read_example(mlflow_model, path) == example).all()) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model(sequential_model, data, sequential_predicted): try: artifact_path = "pytorch" model_info = mlflow.pytorch.log_model(sequential_model, artifact_path=artifact_path) model_uri = "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) assert model_info.model_uri == model_uri sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_uri) test_predictions = _predict(sequential_model_loaded, data) np.testing.assert_array_equal(test_predictions, sequential_predicted) finally: mlflow.end_run() def test_log_model_calls_register_model(module_scoped_subclassed_model): custom_pickle_module = pickle artifact_path = "model" register_model_patch = mock.patch("mlflow.register_model") with mlflow.start_run(), register_model_patch: mlflow.pytorch.log_model( artifact_path=artifact_path, pytorch_model=module_scoped_subclassed_model, pickle_module=custom_pickle_module, registered_model_name="AdsModel1", ) model_uri = "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) mlflow.register_model.assert_called_once_with( model_uri, "AdsModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS ) def test_log_model_no_registered_model_name(module_scoped_subclassed_model): custom_pickle_module = pickle artifact_path = "model" register_model_patch = mock.patch("mlflow.register_model") with mlflow.start_run(), register_model_patch: mlflow.pytorch.log_model( artifact_path=artifact_path, pytorch_model=module_scoped_subclassed_model, pickle_module=custom_pickle_module, ) mlflow.register_model.assert_not_called() @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_raise_exception(sequential_model): with TempDir(chdr=True, remove_on_exit=True) as tmp: path = tmp.path("model") with pytest.raises(IOError, match="No such file or directory"): mlflow.pytorch.load_model(path) with pytest.raises(TypeError, match="Argument 'pytorch_model' should be a torch.nn.Module"): mlflow.pytorch.save_model([1, 2, 3], path) mlflow.pytorch.save_model(sequential_model, path) with pytest.raises(RuntimeError, match=f"Path '{os.path.abspath(path)}' already exists"): mlflow.pytorch.save_model(sequential_model, path) from mlflow import sklearn import sklearn.neighbors as knn path = tmp.path("knn.pkl") knn = knn.KNeighborsClassifier() with open(path, "wb") as f: pickle.dump(knn, f) path = tmp.path("knn") sklearn.save_model(knn, path=path) with pytest.raises(MlflowException, match='Model does not have the "pytorch" flavor'): mlflow.pytorch.load_model(path) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_save_and_load_model(sequential_model, model_path, data, sequential_predicted): mlflow.pytorch.save_model(sequential_model, model_path) # Loading pytorch model sequential_model_loaded = mlflow.pytorch.load_model(model_path) np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted) # Loading pyfunc model pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) np.testing.assert_array_almost_equal( pyfunc_loaded.predict(data[0]).values[:, 0], sequential_predicted, decimal=4 ) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_pyfunc_model_works_with_np_input_type( sequential_model, model_path, data, sequential_predicted ): mlflow.pytorch.save_model(sequential_model, model_path) # Loading pyfunc model pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path) # predict works with dataframes df_result = pyfunc_loaded.predict(data[0]) assert type(df_result) == pd.DataFrame np.testing.assert_array_almost_equal(df_result.values[:, 0], sequential_predicted, decimal=4) # predict works with numpy ndarray np_result = pyfunc_loaded.predict(data[0].values.astype(np.float32)) assert type(np_result) == np.ndarray np.testing.assert_array_almost_equal(np_result[:, 0], sequential_predicted, decimal=4) # predict does not work with lists with pytest.raises( TypeError, match="The PyTorch flavor does not support List or Dict input types" ): pyfunc_loaded.predict([1, 2, 3, 4]) # predict does not work with scalars with pytest.raises(TypeError, match="Input data should be pandas.DataFrame or numpy.ndarray"): pyfunc_loaded.predict(4) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_load_model_from_remote_uri_succeeds( sequential_model, model_path, mock_s3_bucket, data, sequential_predicted ): mlflow.pytorch.save_model(sequential_model, model_path) artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket) artifact_path = "model" artifact_repo = S3ArtifactRepository(artifact_root) artifact_repo.log_artifacts(model_path, artifact_path=artifact_path) model_uri = artifact_root + "/" + artifact_path sequential_model_loaded = mlflow.pytorch.load_model(model_uri=model_uri) np.testing.assert_array_equal(_predict(sequential_model_loaded, data), sequential_predicted) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_save_persists_specified_conda_env_in_mlflow_model_directory( sequential_model, model_path, pytorch_custom_env ): mlflow.pytorch.save_model( pytorch_model=sequential_model, path=model_path, conda_env=pytorch_custom_env ) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) assert os.path.exists(saved_conda_env_path) assert saved_conda_env_path != pytorch_custom_env with open(pytorch_custom_env, "r") as f: pytorch_custom_env_text = f.read() with open(saved_conda_env_path, "r") as f: saved_conda_env_text = f.read() assert saved_conda_env_text == pytorch_custom_env_text @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_save_persists_requirements_in_mlflow_model_directory( sequential_model, model_path, pytorch_custom_env ): mlflow.pytorch.save_model( pytorch_model=sequential_model, path=model_path, conda_env=pytorch_custom_env ) saved_pip_req_path = os.path.join(model_path, "requirements.txt") _compare_conda_env_requirements(pytorch_custom_env, saved_pip_req_path) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [False]) def test_save_model_with_pip_requirements(sequential_model, tmpdir): # Path to a requirements file tmpdir1 = tmpdir.join("1") req_file = tmpdir.join("requirements.txt") req_file.write("a") mlflow.pytorch.save_model(sequential_model, tmpdir1.strpath, pip_requirements=req_file.strpath) _assert_pip_requirements(tmpdir1.strpath, ["mlflow", "a"], strict=True) # List of requirements tmpdir2 = tmpdir.join("2") mlflow.pytorch.save_model( sequential_model, tmpdir2.strpath, pip_requirements=[f"-r {req_file.strpath}", "b"] ) _assert_pip_requirements(tmpdir2.strpath, ["mlflow", "a", "b"], strict=True) # Constraints file tmpdir3 = tmpdir.join("3") mlflow.pytorch.save_model( sequential_model, tmpdir3.strpath, pip_requirements=[f"-c {req_file.strpath}", "b"] ) _assert_pip_requirements( tmpdir3.strpath, ["mlflow", "b", "-c constraints.txt"], ["a"], strict=True ) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [False]) def test_save_model_with_extra_pip_requirements(sequential_model, tmpdir): default_reqs = mlflow.pytorch.get_default_pip_requirements() # Path to a requirements file tmpdir1 = tmpdir.join("1") req_file = tmpdir.join("requirements.txt") req_file.write("a") mlflow.pytorch.save_model( sequential_model, tmpdir1.strpath, extra_pip_requirements=req_file.strpath ) _assert_pip_requirements(tmpdir1.strpath, ["mlflow", *default_reqs, "a"]) # List of requirements tmpdir2 = tmpdir.join("2") mlflow.pytorch.save_model( sequential_model, tmpdir2.strpath, extra_pip_requirements=[f"-r {req_file.strpath}", "b"] ) _assert_pip_requirements(tmpdir2.strpath, ["mlflow", *default_reqs, "a", "b"]) # Constraints file tmpdir3 = tmpdir.join("3") mlflow.pytorch.save_model( sequential_model, tmpdir3.strpath, extra_pip_requirements=[f"-c {req_file.strpath}", "b"] ) _assert_pip_requirements( tmpdir3.strpath, ["mlflow", *default_reqs, "b", "-c constraints.txt"], ["a"] ) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_save_accepts_conda_env_as_dict(sequential_model, model_path): conda_env = dict(mlflow.pytorch.get_default_conda_env()) conda_env["dependencies"].append("pytest") mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path, conda_env=conda_env) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) assert os.path.exists(saved_conda_env_path) with open(saved_conda_env_path, "r") as f: saved_conda_env_parsed = yaml.safe_load(f) assert saved_conda_env_parsed == conda_env @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_log_persists_specified_conda_env_in_mlflow_model_directory( sequential_model, pytorch_custom_env ): artifact_path = "model" with mlflow.start_run(): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path=artifact_path, conda_env=pytorch_custom_env, ) model_path = _download_artifact_from_uri( "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) ) pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]) assert os.path.exists(saved_conda_env_path) assert saved_conda_env_path != pytorch_custom_env with open(pytorch_custom_env, "r") as f: pytorch_custom_env_text = f.read() with open(saved_conda_env_path, "r") as f: saved_conda_env_text = f.read() assert saved_conda_env_text == pytorch_custom_env_text @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_log_persists_requirements_in_mlflow_model_directory( sequential_model, pytorch_custom_env ): artifact_path = "model" with mlflow.start_run(): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path=artifact_path, conda_env=pytorch_custom_env, ) model_path = _download_artifact_from_uri( "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) ) saved_pip_req_path = os.path.join(model_path, "requirements.txt") _compare_conda_env_requirements(pytorch_custom_env, saved_pip_req_path) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies( sequential_model, model_path ): mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path) _assert_pip_requirements(model_path, mlflow.pytorch.get_default_pip_requirements()) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies( sequential_model, ): artifact_path = "model" with mlflow.start_run(): mlflow.pytorch.log_model(sequential_model, artifact_path) model_uri = mlflow.get_artifact_uri(artifact_path) _assert_pip_requirements(model_uri, mlflow.pytorch.get_default_pip_requirements()) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_load_model_with_differing_pytorch_version_logs_warning(sequential_model, model_path): mlflow.pytorch.save_model(pytorch_model=sequential_model, path=model_path) saver_pytorch_version = "1.0" model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) model_config.flavors[mlflow.pytorch.FLAVOR_NAME]["pytorch_version"] = saver_pytorch_version model_config.save(model_config_path) log_messages = [] def custom_warn(message_text, *args, **kwargs): log_messages.append(message_text % args % kwargs) loader_pytorch_version = "0.8.2" with mock.patch("mlflow.pytorch._logger.warning") as warn_mock, mock.patch( "torch.__version__", loader_pytorch_version ): warn_mock.side_effect = custom_warn mlflow.pytorch.load_model(model_uri=model_path) assert any( [ "does not match installed PyTorch version" in log_message and saver_pytorch_version in log_message and loader_pytorch_version in log_message for log_message in log_messages ] ) @pytest.mark.large def test_pyfunc_model_serving_with_module_scoped_subclassed_model_and_default_conda_env( module_scoped_subclassed_model, model_path, data ): mlflow.pytorch.save_model( path=model_path, pytorch_model=module_scoped_subclassed_model, code_paths=[__file__], ) scoring_response = pyfunc_serve_and_score_model( model_uri=model_path, data=data[0], content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, extra_args=["--no-conda"], ) assert scoring_response.status_code == 200 deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) np.testing.assert_array_almost_equal( deployed_model_preds.values[:, 0], _predict(model=module_scoped_subclassed_model, data=data), decimal=4, ) def test_save_model_with_wrong_codepaths_fails_correctly( module_scoped_subclassed_model, model_path, data ): # pylint: disable=unused-argument with pytest.raises(TypeError, match="Argument code_paths should be a list, not <class 'str'>"): mlflow.pytorch.save_model( path=model_path, pytorch_model=module_scoped_subclassed_model, code_paths="some string" ) @pytest.mark.large def test_pyfunc_model_serving_with_main_scoped_subclassed_model_and_custom_pickle_module( main_scoped_subclassed_model, model_path, data ): mlflow.pytorch.save_model( path=model_path, pytorch_model=main_scoped_subclassed_model, pickle_module=mlflow_pytorch_pickle_module, ) scoring_response = pyfunc_serve_and_score_model( model_uri=model_path, data=data[0], content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, extra_args=["--no-conda"], ) assert scoring_response.status_code == 200 deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) np.testing.assert_array_almost_equal( deployed_model_preds.values[:, 0], _predict(model=main_scoped_subclassed_model, data=data), decimal=4, ) @pytest.mark.large def test_load_model_succeeds_with_dependencies_specified_via_code_paths( module_scoped_subclassed_model, model_path, data ): # Save a PyTorch model whose class is defined in the current test suite. Because the # `tests` module is not available when the model is deployed for local scoring, we include # the test suite file as a code dependency mlflow.pytorch.save_model( path=model_path, pytorch_model=module_scoped_subclassed_model, code_paths=[__file__], ) # Define a custom pyfunc model that loads a PyTorch model artifact using # `mlflow.pytorch.load_model` class TorchValidatorModel(pyfunc.PythonModel): def load_context(self, context): # pylint: disable=attribute-defined-outside-init self.pytorch_model = mlflow.pytorch.load_model(context.artifacts["pytorch_model"]) def predict(self, context, model_input): with torch.no_grad(): input_tensor = torch.from_numpy(model_input.values.astype(np.float32)) output_tensor = self.pytorch_model(input_tensor) return pd.DataFrame(output_tensor.numpy()) pyfunc_artifact_path = "pyfunc_model" with mlflow.start_run(): pyfunc.log_model( artifact_path=pyfunc_artifact_path, python_model=TorchValidatorModel(), artifacts={"pytorch_model": model_path}, ) pyfunc_model_path = _download_artifact_from_uri( "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path ) ) # Deploy the custom pyfunc model and ensure that it is able to successfully load its # constituent PyTorch model via `mlflow.pytorch.load_model` scoring_response = pyfunc_serve_and_score_model( model_uri=pyfunc_model_path, data=data[0], content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, extra_args=["--no-conda"], ) assert scoring_response.status_code == 200 deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content)) np.testing.assert_array_almost_equal( deployed_model_preds.values[:, 0], _predict(model=module_scoped_subclassed_model, data=data), decimal=4, ) @pytest.mark.large def test_load_pyfunc_loads_torch_model_using_pickle_module_specified_at_save_time( module_scoped_subclassed_model, model_path ): custom_pickle_module = pickle mlflow.pytorch.save_model( path=model_path, pytorch_model=module_scoped_subclassed_model, pickle_module=custom_pickle_module, ) import_module_fn = importlib.import_module imported_modules = [] def track_module_imports(module_name): imported_modules.append(module_name) return import_module_fn(module_name) with mock.patch("importlib.import_module") as import_mock, mock.patch( "torch.load" ) as torch_load_mock: import_mock.side_effect = track_module_imports pyfunc.load_pyfunc(model_path) torch_load_mock.assert_called_with(mock.ANY, pickle_module=custom_pickle_module) assert custom_pickle_module.__name__ in imported_modules @pytest.mark.large def test_load_model_loads_torch_model_using_pickle_module_specified_at_save_time( module_scoped_subclassed_model, ): custom_pickle_module = pickle artifact_path = "pytorch_model" with mlflow.start_run(): mlflow.pytorch.log_model( artifact_path=artifact_path, pytorch_model=module_scoped_subclassed_model, pickle_module=custom_pickle_module, ) model_uri = "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) import_module_fn = importlib.import_module imported_modules = [] def track_module_imports(module_name): imported_modules.append(module_name) return import_module_fn(module_name) with mock.patch("importlib.import_module") as import_mock, mock.patch( "torch.load" ) as torch_load_mock: import_mock.side_effect = track_module_imports pyfunc.load_pyfunc(model_uri=model_uri) torch_load_mock.assert_called_with(mock.ANY, pickle_module=custom_pickle_module) assert custom_pickle_module.__name__ in imported_modules @pytest.mark.large def test_load_pyfunc_succeeds_when_data_is_model_file_instead_of_directory( module_scoped_subclassed_model, model_path, data ): """ This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is serialized PyTorch model file, as opposed to the current format: a directory containing a serialized model file and pickle module information. """ mlflow.pytorch.save_model(path=model_path, pytorch_model=module_scoped_subclassed_model) model_conf_path = os.path.join(model_path, "MLmodel") model_conf = Model.load(model_conf_path) pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME) assert pyfunc_conf is not None model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) assert os.path.exists(model_data_path) assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path) pyfunc_conf[pyfunc.DATA] = os.path.join( model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME ) model_conf.save(model_conf_path) loaded_pyfunc = pyfunc.load_pyfunc(model_path) np.testing.assert_array_almost_equal( loaded_pyfunc.predict(data[0]), pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)), decimal=4, ) @pytest.mark.large def test_load_model_succeeds_when_data_is_model_file_instead_of_directory( module_scoped_subclassed_model, model_path, data ): """ This test verifies that PyTorch models saved in older versions of MLflow are loaded successfully by ``mlflow.pytorch.load_model``. The ``data`` path associated with these older models is serialized PyTorch model file, as opposed to the current format: a directory containing a serialized model file and pickle module information. """ artifact_path = "pytorch_model" with mlflow.start_run(): mlflow.pytorch.log_model( artifact_path=artifact_path, pytorch_model=module_scoped_subclassed_model ) model_path = _download_artifact_from_uri( "runs:/{run_id}/{artifact_path}".format( run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path ) ) model_conf_path = os.path.join(model_path, "MLmodel") model_conf = Model.load(model_conf_path) pyfunc_conf = model_conf.flavors.get(pyfunc.FLAVOR_NAME) assert pyfunc_conf is not None model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) assert os.path.exists(model_data_path) assert mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME in os.listdir(model_data_path) pyfunc_conf[pyfunc.DATA] = os.path.join( model_data_path, mlflow.pytorch._SERIALIZED_TORCH_MODEL_FILE_NAME ) model_conf.save(model_conf_path) loaded_pyfunc = pyfunc.load_pyfunc(model_path) np.testing.assert_array_almost_equal( loaded_pyfunc.predict(data[0]), pd.DataFrame(_predict(model=module_scoped_subclassed_model, data=data)), decimal=4, ) @pytest.mark.large def test_load_model_allows_user_to_override_pickle_module_via_keyword_argument( module_scoped_subclassed_model, model_path ): mlflow.pytorch.save_model( path=model_path, pytorch_model=module_scoped_subclassed_model, pickle_module=pickle ) with mock.patch("torch.load") as torch_load_mock, mock.patch( "mlflow.pytorch._logger.warning" ) as warn_mock: mlflow.pytorch.load_model(model_uri=model_path, pickle_module=mlflow_pytorch_pickle_module) torch_load_mock.assert_called_with(mock.ANY, pickle_module=mlflow_pytorch_pickle_module) warn_mock.assert_any_call(mock.ANY, mlflow_pytorch_pickle_module.__name__, pickle.__name__) @pytest.mark.large def test_load_model_raises_exception_when_pickle_module_cannot_be_imported( main_scoped_subclassed_model, model_path ): mlflow.pytorch.save_model(path=model_path, pytorch_model=main_scoped_subclassed_model) bad_pickle_module_name = "not.a.real.module" pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME) model_data_path = os.path.join(model_path, pyfunc_conf[pyfunc.DATA]) assert os.path.exists(model_data_path) assert mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME in os.listdir(model_data_path) with open( os.path.join(model_data_path, mlflow.pytorch._PICKLE_MODULE_INFO_FILE_NAME), "w" ) as f: f.write(bad_pickle_module_name) with pytest.raises( MlflowException, match=r"Failed to import the pickle module.+" + re.escape(bad_pickle_module_name), ): mlflow.pytorch.load_model(model_uri=model_path) @pytest.mark.large def test_pyfunc_serve_and_score(data): model = torch.nn.Linear(4, 1) train_model(model=model, data=data) with mlflow.start_run(): mlflow.pytorch.log_model(model, artifact_path="model") model_uri = mlflow.get_artifact_uri("model") resp = pyfunc_serve_and_score_model( model_uri, data[0], pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED, extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS, ) scores = pd.DataFrame(json.loads(resp.content)) np.testing.assert_array_almost_equal(scores.values[:, 0], _predict(model=model, data=data)) @pytest.mark.large @pytest.mark.skipif(not _is_importable("transformers"), reason="This test requires transformers") def test_pyfunc_serve_and_score_transformers(): from transformers import BertModel, BertConfig # pylint: disable=import-error class MyBertModel(BertModel): def forward(self, *args, **kwargs): # pylint: disable=arguments-differ return super().forward(*args, **kwargs).last_hidden_state model = MyBertModel( BertConfig( vocab_size=16, hidden_size=2, num_hidden_layers=2, num_attention_heads=2, intermediate_size=2, ) ) model.eval() with mlflow.start_run(): mlflow.pytorch.log_model(model, artifact_path="model") model_uri = mlflow.get_artifact_uri("model") input_ids = model.dummy_inputs["input_ids"] data = json.dumps({"inputs": input_ids.tolist()}) resp = pyfunc_serve_and_score_model( model_uri, data, pyfunc_scoring_server.CONTENT_TYPE_JSON, extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS, ) np.testing.assert_array_equal(json.loads(resp.content), model(input_ids).detach().numpy()) @pytest.fixture def create_requirements_file(tmpdir): requirement_file_name = "requirements.txt" fp = tmpdir.join(requirement_file_name) test_string = "mlflow" fp.write(test_string) return fp.strpath, test_string @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_requirements_file_log_model(create_requirements_file, sequential_model): requirements_file, content_expected = create_requirements_file with mlflow.start_run(): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", requirements_file=requirements_file, ) model_uri = "runs:/{run_id}/{model_path}".format( run_id=mlflow.active_run().info.run_id, model_path="models" ) # Verify that explicitly specified requirements file overrides default requirements file conda_env = get_default_conda_env() pip_deps = conda_env["dependencies"][-1]["pip"] assert _mlflow_additional_pip_env(pip_deps) != content_expected with TempDir(remove_on_exit=True) as tmp: model_path = _download_artifact_from_uri(model_uri, tmp.path()) model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) flavor_config = model_config.flavors["pytorch"] assert "requirements_file" in flavor_config loaded_requirements_file = flavor_config["requirements_file"] assert "path" in loaded_requirements_file requirements_file_path = loaded_requirements_file["path"] requirements_file_path = os.path.join(model_path, requirements_file_path) with open(requirements_file_path) as fp: assert fp.read() == content_expected @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_requirements_file_save_model(create_requirements_file, sequential_model): requirements_file, content_expected = create_requirements_file with TempDir(remove_on_exit=True) as tmp: model_path = os.path.join(tmp.path(), "models") mlflow.pytorch.save_model( pytorch_model=sequential_model, path=model_path, requirements_file=requirements_file ) # Verify that explicitly specified requirements file overrides default requirements file conda_env = get_default_conda_env() pip_deps = conda_env["dependencies"][-1]["pip"] assert _mlflow_additional_pip_env(pip_deps) != content_expected model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) flavor_config = model_config.flavors["pytorch"] assert "requirements_file" in flavor_config loaded_requirements_file = flavor_config["requirements_file"] assert "path" in loaded_requirements_file requirements_file_path = loaded_requirements_file["path"] requirements_file_path = os.path.join(model_path, requirements_file_path) with open(requirements_file_path) as fp: assert fp.read() == content_expected @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model_invalid_requirement_file_path(sequential_model): with mlflow.start_run(), pytest.raises(MlflowException, match="FileNotFoundError"): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", requirements_file="non_existing_file.txt", ) @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model_invalid_requirement_file_type(sequential_model): with mlflow.start_run(), pytest.raises( TypeError, match="Path to requirements file should be a string" ): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", requirements_file=["non_existing_file.txt"], ) def test_save_model_emits_deprecation_warning_for_requirements_file(tmpdir): reqs_file = tmpdir.join("requirements.txt") reqs_file.write("torch") with pytest.warns(FutureWarning, match="`requirements_file` has been deprecated"): mlflow.pytorch.save_model( get_sequential_model(), tmpdir.join("model"), requirements_file=reqs_file.strpath, ) @pytest.fixture def create_extra_files(tmpdir): fp1 = tmpdir.join("extra1.txt") fp2 = tmpdir.join("extra2.txt") fp1.write("1") fp2.write("2") return [fp1.strpath, fp2.strpath], ["1", "2"] @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_extra_files_log_model(create_extra_files, sequential_model): extra_files, contents_expected = create_extra_files with mlflow.start_run(): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", extra_files=extra_files ) model_uri = "runs:/{run_id}/{model_path}".format( run_id=mlflow.active_run().info.run_id, model_path="models" ) with TempDir(remove_on_exit=True) as tmp: model_path = _download_artifact_from_uri(model_uri, tmp.path()) model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) flavor_config = model_config.flavors["pytorch"] assert "extra_files" in flavor_config loaded_extra_files = flavor_config["extra_files"] for loaded_extra_file, content_expected in zip(loaded_extra_files, contents_expected): assert "path" in loaded_extra_file extra_file_path = os.path.join(model_path, loaded_extra_file["path"]) with open(extra_file_path) as fp: assert fp.read() == content_expected @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_extra_files_save_model(create_extra_files, sequential_model): extra_files, contents_expected = create_extra_files with TempDir(remove_on_exit=True) as tmp: model_path = os.path.join(tmp.path(), "models") mlflow.pytorch.save_model( pytorch_model=sequential_model, path=model_path, extra_files=extra_files ) model_config_path = os.path.join(model_path, "MLmodel") model_config = Model.load(model_config_path) flavor_config = model_config.flavors["pytorch"] assert "extra_files" in flavor_config loaded_extra_files = flavor_config["extra_files"] for loaded_extra_file, content_expected in zip(loaded_extra_files, contents_expected): assert "path" in loaded_extra_file extra_file_path = os.path.join(model_path, loaded_extra_file["path"]) with open(extra_file_path) as fp: assert fp.read() == content_expected @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model_invalid_extra_file_path(sequential_model): with mlflow.start_run(), pytest.raises(MlflowException, match="FileNotFoundError"): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", extra_files=["non_existing_file.txt"], ) @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model_invalid_extra_file_type(sequential_model): with mlflow.start_run(), pytest.raises( TypeError, match="Extra files argument should be a list" ): mlflow.pytorch.log_model( pytorch_model=sequential_model, artifact_path="models", extra_files="non_existing_file.txt", ) def state_dict_equal(state_dict1, state_dict2): for key1 in state_dict1: if key1 not in state_dict2: return False value1 = state_dict1[key1] value2 = state_dict2[key1] if type(value1) != type(value2): return False elif isinstance(value1, dict): if not state_dict_equal(value1, value2): return False elif isinstance(value1, torch.Tensor): if not torch.equal(value1, value2): return False elif value1 != value2: return False else: continue return True @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_save_state_dict(sequential_model, model_path, data): state_dict = sequential_model.state_dict() mlflow.pytorch.save_state_dict(state_dict, model_path) loaded_state_dict = mlflow.pytorch.load_state_dict(model_path) assert state_dict_equal(loaded_state_dict, state_dict) model = get_sequential_model() model.load_state_dict(loaded_state_dict) np.testing.assert_array_almost_equal( _predict(model, data), _predict(sequential_model, data), decimal=4, ) @pytest.mark.large def test_save_state_dict_can_save_nested_state_dict(model_path): """ This test ensures that `save_state_dict` supports a use case described in the page below where a user bundles multiple objects (e.g., model, optimizer, learning-rate scheduler) into a single nested state_dict and loads it back later for inference or re-training: https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html """ model = get_sequential_model() optim = torch.optim.Adam(model.parameters()) state_dict = {"model": model.state_dict(), "optim": optim.state_dict()} mlflow.pytorch.save_state_dict(state_dict, model_path) loaded_state_dict = mlflow.pytorch.load_state_dict(model_path) assert state_dict_equal(loaded_state_dict, state_dict) model.load_state_dict(loaded_state_dict["model"]) optim.load_state_dict(loaded_state_dict["optim"]) @pytest.mark.large @pytest.mark.parametrize("not_state_dict", [0, "", get_sequential_model()]) def test_save_state_dict_throws_for_invalid_object_type(not_state_dict, model_path): with pytest.raises(TypeError, match="Invalid object type for `state_dict`"): mlflow.pytorch.save_state_dict(not_state_dict, model_path) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_state_dict(sequential_model, data): artifact_path = "model" state_dict = sequential_model.state_dict() with mlflow.start_run(): mlflow.pytorch.log_state_dict(state_dict, artifact_path) state_dict_uri = mlflow.get_artifact_uri(artifact_path) loaded_state_dict = mlflow.pytorch.load_state_dict(state_dict_uri) assert state_dict_equal(loaded_state_dict, state_dict) model = get_sequential_model() model.load_state_dict(loaded_state_dict) np.testing.assert_array_almost_equal( _predict(model, data), _predict(sequential_model, data), decimal=4, ) @pytest.mark.large @pytest.mark.parametrize("scripted_model", [True, False]) def test_log_model_with_code_paths(sequential_model): artifact_path = "model" with mlflow.start_run(), mock.patch( "mlflow.pytorch._add_code_from_conf_to_system_path" ) as add_mock: mlflow.pytorch.log_model( sequential_model, artifact_path=artifact_path, code_paths=[__file__] ) model_uri = mlflow.get_artifact_uri(artifact_path) _compare_logged_code_paths(__file__, model_uri, mlflow.pytorch.FLAVOR_NAME) mlflow.pytorch.load_model(model_uri) add_mock.assert_called()
mlflow/mlflow
tests/pytorch/test_pytorch_model_export.py
Python
apache-2.0
45,474
from setuptools import setup setup(name='servicemanager', version='0.0.30', description='A python tool to manage developing and testing with lots of microservices', url='https://github.com/hmrc/service-manager', author='vaughansharman', license='Apache Licence 2.0', packages=['servicemanager', 'servicemanager.actions', 'servicemanager.server', 'servicemanager.service', 'servicemanager.thirdparty'], install_requires=['requests==2.8.1','pymongo==2.6.3','bottle==0.12.4','pytest==2.5.2','argcomplete==0.8.1'], scripts=['bin/sm', 'bin/smserver'], zip_safe=False)
andrewgee/service-manager
setup.py
Python
apache-2.0
619
#!/usr/bin/env python # Copyright 2013 Signal Analysis and Interpretation Laboratory, # University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Command line tool to test KaldiContext operations. """ import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from time import time from context import KaldiContext def main(): kaldiDir = "/usr/skiptest/kaldi-trunk" # parameters to test sampleFreq = 16000 transTest = "/usr/skiptest/test_small/text" wavTest = "/usr/skiptest/test_small/wav.scp" utt2spkTest = "/usr/skiptest/test_small/utt2spk" spk2uttTest = "/usr/skiptest/test_small/spk2utt" newLm = "/usr/skiptest/skip/lm_bg5k.arpa" # parameters for creating new graphs and models newTransTrain = "/usr/skiptest/train_si84/text" newPhones = "/usr/skiptest/phones.txt" newWords = "/usr/skiptest/words.txt" newLexicon = "/usr/skiptest/lexicon.txt" newPhonesAlign = "/usr/skiptest/phones_align.txt" newWordsAlign = newWords newLexiconAlign = "/usr/skiptest/lexicon_align.txt" newPhonesDisambig = "/usr/skiptest/phones_disambig.txt" newWordsDisambig = newWords newLexiconDisambig = "/usr/skiptest/lexicon_disambig.txt" # parameters for using existing graphs and models exPhones = "/usr/skiptest/existing/phones.txt" exWords = "/usr/skiptest/existing/words.txt" exLexicon = "/usr/skiptest/existing/lexicon.int" exLexFst = "/usr/skiptest/existing/L.fst" exPhonesAlign = "/usr/skiptest/existing/phones_disambig.txt" exWordsAlign = exWords exLexFstAlign = "/usr/skiptest/existing/L_align.fst" exPhonesDisambig = "/usr/skiptest/existing/phones_disambig.txt" exWordsDisambig = exWords exLexFstDisambig = "/usr/skiptest/existing/L_disambig.fst" exHCLG = "/usr/skiptest/existing/HCLG_mono.fst" # exHCLG = "/usr/skiptest/existing/HCLG_tri.fst" exMdlFile = "/usr/skiptest/existing/mono.mdl" exTreeFile = "/usr/skiptest/existing/mono.tree" contextSize = 1 centerPos = 0 # exMdlFile = "/usr/skiptest/existing/tri.mdl" # exTreeFile = "/usr/skiptest/existing/tri.tree" # contextSize = 3 # centerPos = 1 # ======= EXISTING CONTEXT TEST ====================================== context = KaldiContext("ExistingContext", kaldiDir) print "Adding lexicon FSTs..." t0 = time() L = context.addL(exLexFst, exPhones, exWords) L_align = context.addL(exLexFstAlign, exPhonesAlign, exWordsAlign) L_disambig = context.addL(exLexFstDisambig, exPhonesDisambig, exWordsDisambig) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Adding gmm..." t0 = time() mdl = context.addGMM(exMdlFile, exTreeFile) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Adding decoding graph..." t0 = time() HCLG = context.addHCLG(exHCLG) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Computing test wave features..." t0 = time() feats = context.makeFeatures(wavTest, samplefreq=sampleFreq, utt2spk=utt2spkTest, spk2utt=spk2uttTest) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Generating nbest hypotheses..." t0 = time() hyp = context.decodeNbest(10, feats, HCLG, exWords, exLexicon, mdl, L_align) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Decoding test wave features..." t0 = time() hyp = context.decode(feats, HCLG, exWords, mdl, L_align) print "Done in {0:0.2f} seconds.".format(time() - t0) print with open(hyp.filename) as f: for line in f: print line with open(hyp.wordlens) as f: for line in f: print line print "Aligning test wave features to test transcripts..." t0 = time() hyp_ali = context.align(feats, transTest, L, L_align, mdl) print "Done in {0:0.2f} seconds.".format(time() - t0) print with open(hyp_ali.wordlens) as f: for line in f: print line # ======= NEW CONTEXT TEST =========================================== context = KaldiContext("NewContext", kaldiDir) print "Creating new lexicon FSTs..." t0 = time() L = context.makeL(newPhones, newWords, newLexicon) L_align = context.makeL(newPhonesAlign, newWordsAlign, newLexiconAlign) L_disambig = context.makeL(newPhonesDisambig, newWordsDisambig, newLexiconDisambig) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Creating new grammar..." t0 = time() G = context.makeGArpa(newWords, newLm) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Adding existing gmm..." t0 = time() mdl = context.addGMM(exMdlFile, exTreeFile) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Creating new decoding graph..." t0 = time() HCLG = context.makeHCLG(L_disambig, G, mdl, contextsize=contextSize, centralposition=centerPos) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Computing test wave features..." t0 = time() feats = context.makeFeatures(wavTest, samplefreq=sampleFreq, utt2spk=utt2spkTest, spk2utt=spk2uttTest) print "Done in {0:0.2f} seconds.".format(time() - t0) print print "Decoding test wave features..." t0 = time() hyp = context.decode(feats, HCLG, newWords, mdl, L_align) print "Done in {0:0.2f} seconds.".format(time() - t0) print with open(hyp.filename) as f: for line in f: print line with open(hyp.wordlens) as f: for line in f: print line print "Aligning test wave features to test transcripts..." t0 = time() hyp_ali = context.align(feats, transTest, L, L_align, mdl) print "Done in {0:0.2f} seconds.".format(time() - t0) print with open(hyp_ali.wordlens) as f: for line in f: print line if __name__ == "__main__": main()
mrfalcone/skip
tool.py
Python
apache-2.0
6,222
"""The Panasonic Viera integration.""" import asyncio from functools import partial import logging from urllib.request import URLError from panasonic_viera import EncryptionRequired, Keys, RemoteControl, SOAPError import voluptuous as vol from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON import homeassistant.helpers.config_validation as cv from homeassistant.helpers.script import Script from .const import ( ATTR_REMOTE, CONF_APP_ID, CONF_ENCRYPTION_KEY, CONF_ON_ACTION, DEFAULT_NAME, DEFAULT_PORT, DOMAIN, ) _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) PLATFORMS = [MEDIA_PLAYER_DOMAIN] async def async_setup(hass, config): """Set up Panasonic Viera from configuration.yaml.""" if DOMAIN not in config: return True for conf in config[DOMAIN]: hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=conf ) ) return True async def async_setup_entry(hass, config_entry): """Set up Panasonic Viera from a config entry.""" panasonic_viera_data = hass.data.setdefault(DOMAIN, {}) config = config_entry.data host = config[CONF_HOST] port = config[CONF_PORT] on_action = config[CONF_ON_ACTION] if on_action is not None: on_action = Script(hass, on_action, config[CONF_NAME], DOMAIN) params = {} if CONF_APP_ID in config and CONF_ENCRYPTION_KEY in config: params["app_id"] = config[CONF_APP_ID] params["encryption_key"] = config[CONF_ENCRYPTION_KEY] remote = Remote(hass, host, port, on_action, **params) await remote.async_create_remote_control(during_setup=True) panasonic_viera_data[config_entry.entry_id] = {ATTR_REMOTE: remote} for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok class Remote: """The Remote class. It stores the TV properties and the remote control connection itself.""" def __init__( self, hass, host, port, on_action=None, app_id=None, encryption_key=None, ): """Initialize the Remote class.""" self._hass = hass self._host = host self._port = port self._on_action = on_action self._app_id = app_id self._encryption_key = encryption_key self.state = None self.available = False self.volume = 0 self.muted = False self.playing = True self._control = None async def async_create_remote_control(self, during_setup=False): """Create remote control.""" control_existed = self._control is not None try: params = {} if self._app_id and self._encryption_key: params["app_id"] = self._app_id params["encryption_key"] = self._encryption_key self._control = await self._hass.async_add_executor_job( partial(RemoteControl, self._host, self._port, **params) ) self.state = STATE_ON self.available = True except (TimeoutError, URLError, SOAPError, OSError) as err: if control_existed or during_setup: _LOGGER.debug("Could not establish remote connection: %s", err) self._control = None self.state = STATE_OFF self.available = self._on_action is not None except Exception as err: # pylint: disable=broad-except if control_existed or during_setup: _LOGGER.exception("An unknown error occurred: %s", err) self._control = None self.state = STATE_OFF self.available = self._on_action is not None async def async_update(self): """Update device data.""" if self._control is None: await self.async_create_remote_control() return await self._handle_errors(self._update) def _update(self): """Retrieve the latest data.""" self.muted = self._control.get_mute() self.volume = self._control.get_volume() / 100 async def async_send_key(self, key): """Send a key to the TV and handle exceptions.""" try: key = getattr(Keys, key) except (AttributeError, TypeError): key = getattr(key, "value", key) await self._handle_errors(self._control.send_key, key) async def async_turn_on(self, context): """Turn on the TV.""" if self._on_action is not None: await self._on_action.async_run(context=context) self.state = STATE_ON elif self.state != STATE_ON: await self.async_send_key(Keys.power) self.state = STATE_ON async def async_turn_off(self): """Turn off the TV.""" if self.state != STATE_OFF: await self.async_send_key(Keys.power) self.state = STATE_OFF await self.async_update() async def async_set_mute(self, enable): """Set mute based on 'enable'.""" await self._handle_errors(self._control.set_mute, enable) async def async_set_volume(self, volume): """Set volume level, range 0..1.""" volume = int(volume * 100) await self._handle_errors(self._control.set_volume, volume) async def async_play_media(self, media_type, media_id): """Play media.""" _LOGGER.debug("Play media: %s (%s)", media_id, media_type) await self._handle_errors(self._control.open_webpage, media_id) async def _handle_errors(self, func, *args): """Handle errors from func, set available and reconnect if needed.""" try: return await self._hass.async_add_executor_job(func, *args) except EncryptionRequired: _LOGGER.error( "The connection couldn't be encrypted. Please reconfigure your TV" ) except (TimeoutError, URLError, SOAPError, OSError): self.state = STATE_OFF self.available = self._on_action is not None except Exception as err: # pylint: disable=broad-except _LOGGER.exception("An unknown error occurred: %s", err) self.state = STATE_OFF self.available = self._on_action is not None
tchellomello/home-assistant
homeassistant/components/panasonic_viera/__init__.py
Python
apache-2.0
7,555
""" Test that the debugger can call a *really* new function. """ import os import lldb import lldbsuite.test.lldbplatformutil as lldbplatformutil import lldbsuite.test.lldbutil as lldbutil from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * def getOSName(os): if os == 'macosx': return 'macOS' if os == 'ios': return 'iOS' if os == 'tvos': return 'tvOS' if os == 'watchos': return 'watchOS' return os def getArch(os): if os == 'macosx': return 'x86_64' if os == 'ios': return 'arm64' if os == 'tvos': return 'arm64' if os == 'watchos': return 'armv7k' return os def getTriple(os, version): return getArch(os) + '-apple-' + os + version def getOlderVersion(major, minor): if minor != 0: return '%d.%d' % (major, minor-1) return '%d.%d' % (major-1, minor) class TestAvailability(TestBase): mydir = TestBase.compute_mydir(__file__) NO_DEBUG_INFO_TESTCASE = True def setUp(self): # Call super's setUp(). TestBase.setUp(self) @skipIf(oslist=['linux', 'windows']) def testAvailability(self): platform_name = lldbplatformutil.getPlatform() os_name = getOSName(platform_name) platform = lldb.DBG.GetSelectedPlatform() major = platform.GetOSMajorVersion() minor = platform.GetOSMinorVersion() version = '%d.%d'%(major, minor) program = """ @available(%s %s, *) func f() {} // --------------------------------------------------------------------- // Method context. // --------------------------------------------------------------------- class C1 { func method() { print("in method") // break_1 } } C1().method() // break_0 // --------------------------------------------------------------------- // Generic method context. // --------------------------------------------------------------------- class C2 { func method<T>(_ t: T) { print("in method") // break_2 } } C2().method(0) // --------------------------------------------------------------------- // Method in generic class context. // --------------------------------------------------------------------- class C3<T> { func method() { print("in method") // break_3 } } C3<Int>().method() // --------------------------------------------------------------------- // Generic method in generic class context. // --------------------------------------------------------------------- class C4<U> { func method<V>(_ v: V) { print("in method") // break_4 } } C4<Int>().method(0) // --------------------------------------------------------------------- // Function context. // --------------------------------------------------------------------- func f1() { print("in function") // break_5 } f1() // --------------------------------------------------------------------- // Generic function context. // --------------------------------------------------------------------- func f2<T>(_ t: T) { print("in function") // break_6 } f2(0) // --------------------------------------------------------------------- // Top-level context. // --------------------------------------------------------------------- print("in top_level") // break_7 """ with open(self.getBuildArtifact("main.swift"), 'w') as main: main.write(program %(os_name, version)) self.build(dictionary={'TRIPLE': getTriple(platform_name, getOlderVersion(major, minor))}) source_spec = lldb.SBFileSpec("main.swift") (target, process, thread, brk0) = \ lldbutil.run_to_source_breakpoint(self, "break_0", source_spec) # Create breakpoints. breakpoints = [] for i in range(1, 8): breakpoints.append(target.BreakpointCreateBySourceRegex( 'break_%d'%i, lldb.SBFileSpec("main.swift"))) self.assertTrue(breakpoints[-1] and breakpoints[-1].GetNumLocations() >= 1, VALID_BREAKPOINT) for breakpoint in breakpoints: threads = lldbutil.continue_to_breakpoint(process, breakpoint) self.expect("p f()", "can call")
apple/swift-lldb
packages/Python/lldbsuite/test/lang/swift/availability/TestAvailability.py
Python
apache-2.0
4,201
# -*- coding:utf-8 -*- # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from command import PagedCommand from color import Coloring from git_refs import R_M class _Coloring(Coloring): def __init__(self, config): Coloring.__init__(self, config, "status") class Info(PagedCommand): common = True helpSummary = "Get info on the manifest branch, current branch or unmerged branches" helpUsage = "%prog [-dl] [-o [-b]] [<project>...]" def _Options(self, p): p.add_option('-d', '--diff', dest='all', action='store_true', help="show full info and commit diff including remote branches") p.add_option('-o', '--overview', dest='overview', action='store_true', help='show overview of all local commits') p.add_option('-b', '--current-branch', dest="current_branch", action="store_true", help="consider only checked out branches") p.add_option('-l', '--local-only', dest="local", action="store_true", help="Disable all remote operations") def Execute(self, opt, args): self.out = _Coloring(self.manifest.globalConfig) self.heading = self.out.printer('heading', attr = 'bold') self.headtext = self.out.nofmt_printer('headtext', fg = 'yellow') self.redtext = self.out.printer('redtext', fg = 'red') self.sha = self.out.printer("sha", fg = 'yellow') self.text = self.out.nofmt_printer('text') self.dimtext = self.out.printer('dimtext', attr = 'dim') self.opt = opt manifestConfig = self.manifest.manifestProject.config mergeBranch = manifestConfig.GetBranch("default").merge manifestGroups = (manifestConfig.GetString('manifest.groups') or 'all,-notdefault') self.heading("Manifest branch: ") if self.manifest.default.revisionExpr: self.headtext(self.manifest.default.revisionExpr) self.out.nl() self.heading("Manifest merge branch: ") self.headtext(mergeBranch) self.out.nl() self.heading("Manifest groups: ") self.headtext(manifestGroups) self.out.nl() self.printSeparator() if not opt.overview: self.printDiffInfo(args) else: self.printCommitOverview(args) def printSeparator(self): self.text("----------------------------") self.out.nl() def printDiffInfo(self, args): # We let exceptions bubble up to main as they'll be well structured. projs = self.GetProjects(args) for p in projs: self.heading("Project: ") self.headtext(p.name) self.out.nl() self.heading("Mount path: ") self.headtext(p.worktree) self.out.nl() self.heading("Current revision: ") self.headtext(p.revisionExpr) self.out.nl() localBranches = list(p.GetBranches().keys()) self.heading("Local Branches: ") self.redtext(str(len(localBranches))) if len(localBranches) > 0: self.text(" [") self.text(", ".join(localBranches)) self.text("]") self.out.nl() if self.opt.all: self.findRemoteLocalDiff(p) self.printSeparator() def findRemoteLocalDiff(self, project): #Fetch all the latest commits if not self.opt.local: project.Sync_NetworkHalf(quiet=True, current_branch_only=True) logTarget = R_M + self.manifest.manifestProject.config.GetBranch("default").merge bareTmp = project.bare_git._bare project.bare_git._bare = False localCommits = project.bare_git.rev_list( '--abbrev=8', '--abbrev-commit', '--pretty=oneline', logTarget + "..", '--') originCommits = project.bare_git.rev_list( '--abbrev=8', '--abbrev-commit', '--pretty=oneline', ".." + logTarget, '--') project.bare_git._bare = bareTmp self.heading("Local Commits: ") self.redtext(str(len(localCommits))) self.dimtext(" (on current branch)") self.out.nl() for c in localCommits: split = c.split() self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl() self.printSeparator() self.heading("Remote Commits: ") self.redtext(str(len(originCommits))) self.out.nl() for c in originCommits: split = c.split() self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl() def printCommitOverview(self, args): all_branches = [] for project in self.GetProjects(args): br = [project.GetUploadableBranch(x) for x in project.GetBranches()] br = [x for x in br if x] if self.opt.current_branch: br = [x for x in br if x.name == project.CurrentBranch] all_branches.extend(br) if not all_branches: return self.out.nl() self.heading('Projects Overview') project = None for branch in all_branches: if project != branch.project: project = branch.project self.out.nl() self.headtext(project.relpath) self.out.nl() commits = branch.commits date = branch.date self.text('%s %-33s (%2d commit%s, %s)' % ( branch.name == project.CurrentBranch and '*' or ' ', branch.name, len(commits), len(commits) != 1 and 's' or '', date)) self.out.nl() for commit in commits: split = commit.split() self.text('{0:38}{1} '.format('','-')) self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl()
gabbayo/git-repo
subcmds/info.py
Python
apache-2.0
6,084
from django.contrib.sites.models import Site from settings import * def get_domain(port=8010): try: #domain = Site.objects.all()[0].domain domain = Site.objects.get(id=SITE_ID).domain if 'localhost' in domain: domain = 'localhost:%s' %port if DEFAULT_PROTOCOL: domain = DEFAULT_PROTOCOL + domain else: domain = 'https://' + domain except: domain = '..' #print(domain) return domain
Ecotrust/COMPASS
mp/utils.py
Python
apache-2.0
500
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Latent-Adversarial Generator. """ import functools import os import numpy as np import tensorflow as tf from absl import app from absl import flags from easydict import EasyDict from libml import data, layers, utils from libml.layers import conv2d_scaled from libml.train import TrainSchedule from libml.train_sr import SRESPro FLAGS = flags.FLAGS class LAG(SRESPro): def stage_scopes(self, stage): return (['global_step'] + ['opt_%d/' % x for x in range(stage + 1)] + ['sres/stage_%d/' % x for x in range(stage + 1)] + ['disc/stage_%d/' % x for x in range(stage + 1)]) def sres(self, x0, colors, lod, lod_min, lod_start, lod_stop, blocks, lfilters, ema=None): getter = functools.partial(utils.getter_ema, ema) if ema else None scope_args = dict(custom_getter=getter, reuse=tf.AUTO_REUSE) lrelu_args = dict(activation=tf.nn.leaky_relu) relu_args = dict(activation=tf.nn.relu) with tf.variable_scope('sres', **scope_args): with tf.variable_scope('stage_0', **scope_args): y = conv2d_scaled(x0, lfilters[0], 3) for x in range(blocks): dy = conv2d_scaled(y, lfilters[0], 3, **relu_args) y += conv2d_scaled(dy, lfilters[0], 3) / blocks rgb = [] for stage in range(lod_min, lod_stop + 1): with tf.variable_scope('stage_%d' % stage, **scope_args): y = layers.upscale2d(y) y = conv2d_scaled(y, lfilters[stage], 3, **lrelu_args) y = conv2d_scaled(y, lfilters[stage], 3, **lrelu_args) with tf.variable_scope('to_rgb', **scope_args): rgb.append(conv2d_scaled(y, colors, 3)) im = rgb.pop(0) for stage in range(lod_min + 1, lod_start + 1): im = layers.upscale2d(im) + rgb.pop(0) if lod_start == lod_stop: return im return layers.upscale2d(im) + (lod - lod_start) * rgb[-1] def disc(self, x0, x0_lores_delta, lod, lod_min, lod_start, lod_stop, blocks, lfilters): leaky_relu = dict(activation=tf.nn.leaky_relu) def from_rgb(x, stage): with tf.variable_scope('from_rgb', reuse=tf.AUTO_REUSE): return conv2d_scaled(x, lfilters[stage], 3, **leaky_relu) with tf.variable_scope('disc', reuse=tf.AUTO_REUSE): y = None for stage in range(lod_stop, lod_min - 1, -1): with tf.variable_scope('stage_%d' % stage, reuse=tf.AUTO_REUSE): if stage == lod_stop: y = from_rgb(x0, stage) elif stage == lod_start: y0 = from_rgb(layers.downscale2d(x0), stage) y = y0 + (lod - lod_start) * y else: y += from_rgb(layers.downscale2d(x0, 1 << (lod_stop - stage)), stage) y = conv2d_scaled(y, lfilters[stage], 3, **leaky_relu) y = layers.space_to_channels(y) y = conv2d_scaled(y, lfilters[stage - 1], 3, **leaky_relu) y = tf.concat([y, x0_lores_delta], axis=1) with tf.variable_scope('stage_0', reuse=tf.AUTO_REUSE): for x in range(blocks): y = conv2d_scaled(y, lfilters[0], 3, **leaky_relu) center = np.ones(lfilters[0], 'f') center[::2] = -1 center = tf.constant(center, shape=[1, lfilters[0], 1, 1]) return y * center def model(self, dataset, lod_min, lod_max, lod_start, lod_stop, scale, blocks, filters, filters_min, wass_target, weight_avg, mse_weight, noise_dim, ttur, total_steps, **kwargs): assert lod_min == 1 del kwargs x = tf.placeholder(tf.float32, [None, dataset.colors, dataset.height, dataset.width], 'x') y = tf.placeholder(tf.float32, [None, dataset.colors, None, None], 'y') noise = tf.placeholder(tf.float32, [], 'noise') lod = tf.placeholder(tf.float32, [], 'lod') lfilters = [max(filters_min, filters >> stage) for stage in range(lod_max + 1)] disc = functools.partial(self.disc, lod=lod, lod_min=lod_min, lod_start=lod_start, lod_stop=lod_stop, blocks=blocks, lfilters=lfilters) sres = functools.partial(self.sres, lod=lod, lod_min=lod_min, lod_start=lod_start, lod_stop=lod_stop, blocks=blocks, lfilters=lfilters, colors=dataset.colors) ema = tf.train.ExponentialMovingAverage(decay=weight_avg) if weight_avg > 0 else None def pad_shape(x): return [tf.shape(x)[0], noise_dim, tf.shape(x)[2], tf.shape(x)[3]] def straight_through_round(x, r=127.5 / 4): xr = tf.round(x * r) / r return tf.stop_gradient(xr - x) + x def sres_op(y, noise): eps = tf.random_normal(pad_shape(y), stddev=noise) sres_op = sres(tf.concat([y, eps], axis=1), ema=ema) sres_op = layers.upscale2d(sres_op, 1 << (lod_max - lod_stop)) return sres_op def tower(x): lores = self.downscale(x) real = layers.downscale2d(x, 1 << (lod_max - lod_stop)) if lod_start != lod_stop: real = layers.blend_resolution(layers.remove_details2d(real), real, lod - lod_start) eps = tf.random_normal(pad_shape(lores)) fake = sres(tf.concat([lores, tf.zeros_like(eps)], axis=1)) fake_eps = sres(tf.concat([lores, eps], axis=1)) lores_fake = self.downscale(layers.upscale2d(fake, 1 << (lod_max - lod_stop))) lores_fake_eps = self.downscale(layers.upscale2d(fake_eps, 1 << (lod_max - lod_stop))) latent_real = disc(real, straight_through_round(tf.abs(lores - lores))) latent_fake = disc(fake, straight_through_round(tf.abs(lores - lores_fake))) latent_fake_eps = disc(fake_eps, straight_through_round(tf.abs(lores - lores_fake_eps))) # Gradient penalty. mix = tf.random_uniform([tf.shape(real)[0], 1, 1, 1], 0., 1.) mixed = real + mix * (fake_eps - real) mixed = layers.upscale2d(mixed, 1 << (lod_max - lod_stop)) mixed_round = straight_through_round(tf.abs(lores - self.downscale(mixed))) mixdown = layers.downscale2d(mixed, 1 << (lod_max - lod_stop)) grad = tf.gradients(tf.reduce_sum(tf.reduce_mean(disc(mixdown, mixed_round), 1)), [mixed])[0] grad_norm = tf.sqrt(tf.reduce_mean(tf.square(grad), axis=[1, 2, 3]) + 1e-8) loss_dreal = -tf.reduce_mean(latent_real) loss_dfake = tf.reduce_mean(latent_fake_eps) loss_gfake = -tf.reduce_mean(latent_fake_eps) loss_gmse = tf.losses.mean_squared_error(latent_real, latent_fake) loss_gp = 10 * tf.reduce_mean(tf.square(grad_norm - wass_target)) * wass_target ** -2 mse_ema = tf.losses.mean_squared_error(sres(tf.concat([lores, tf.zeros_like(eps)], axis=1), ema=ema), real) return loss_gmse, loss_gfake, loss_dreal, loss_dfake, loss_gp, mse_ema loss_gmse, loss_gfake, loss_dreal, loss_dfake, loss_gp, mse_ema = utils.para_mean(tower, x) loss_disc = loss_dreal + loss_dfake + loss_gp loss_gen = loss_gfake + mse_weight * loss_gmse utils.HookReport.log_tensor(loss_dreal, 'dreal') utils.HookReport.log_tensor(loss_dfake, 'dfake') utils.HookReport.log_tensor(loss_gp, 'gp') utils.HookReport.log_tensor(loss_gfake, 'gfake') utils.HookReport.log_tensor(loss_gmse, 'gmse') utils.HookReport.log_tensor(tf.sqrt(mse_ema) * 127.5, 'rmse_ema') utils.HookReport.log_tensor(lod, 'lod') update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_d, train_g = [], [] global_arg = dict(global_step=tf.train.get_global_step()) for stage in range(lod_stop + 1): g_arg = global_arg if stage == 0 else {} with tf.variable_scope('opt_%d' % stage): train_d.append(tf.train.AdamOptimizer(FLAGS.lr, 0, 0.99).minimize( loss_disc * ttur, var_list=utils.model_vars('disc/stage_%d' % stage), colocate_gradients_with_ops=True)) train_g.append(tf.train.AdamOptimizer(FLAGS.lr, 0, 0.99).minimize( loss_gen, var_list=utils.model_vars('sres/stage_%d' % stage), colocate_gradients_with_ops=True, **g_arg)) if ema is not None: ema_op = ema.apply(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'sres')) train_op = tf.group(*train_d, *train_g, ema_op) else: train_op = tf.group(*train_d, *train_g) return EasyDict(x=x, y=y, noise=noise, lod=lod, train_op=train_op, downscale_op=self.downscale(x), upscale_op=layers.upscale2d(y, self.scale, order=layers.NCHW), sres_op=sres_op(y, noise), eval_op=sres_op(self.downscale(x), 0)) def main(argv): del argv # Unused. dataset = data.get_dataset(FLAGS.dataset) schedule = TrainSchedule(2, FLAGS.scale, FLAGS.transition_kimg, FLAGS.training_kimg, FLAGS.total_kimg) if FLAGS.memtest: schedule.schedule = schedule.schedule[-2:] model = LAG( os.path.join(FLAGS.train_dir, dataset.name), lr=FLAGS.lr, batch=FLAGS.batch, lod_min=1, scale=FLAGS.scale, downscaler=FLAGS.downscaler, blocks=FLAGS.blocks, filters=FLAGS.filters, filters_min=FLAGS.filters_min, mse_weight=FLAGS.mse_weight, noise_dim=FLAGS.noise_dim, transition_kimg=FLAGS.transition_kimg, training_kimg=FLAGS.training_kimg, ttur=FLAGS.ttur, wass_target=FLAGS.wass_target, weight_avg=FLAGS.weight_avg) if FLAGS.reset: model.reset_files() model.train(dataset, schedule) if __name__ == '__main__': utils.setup_tf() flags.DEFINE_integer('blocks', 8, 'Number of residual layers in residual networks.') flags.DEFINE_integer('filters', 256, 'Filter size of first convolution.') flags.DEFINE_integer('filters_min', 64, 'Minimum filter size of convolution.') flags.DEFINE_integer('noise_dim', 64, 'Number of noise dimensions to concat to lores.') flags.DEFINE_integer('transition_kimg', 2048, 'Number of images during transition (in kimg).') flags.DEFINE_integer('training_kimg', 2048, 'Number of images during between transitions (in kimg).') flags.DEFINE_integer('ttur', 4, 'How much faster D is trained.') flags.DEFINE_float('wass_target', 1, 'Wasserstein gradient penalty target value.') flags.DEFINE_float('weight_avg', 0.999, 'Weight averaging.') flags.DEFINE_float('mse_weight', 10, 'Amount of mean square error loss for G.') flags.DEFINE_bool('reset', False, 'Retrain from the start.') flags.DEFINE_bool('memtest', False, 'Test if the parameters fit in memory (start at last stage).') FLAGS.set_default('batch', 16) FLAGS.set_default('lr', 0.001) FLAGS.set_default('total_kimg', 0) app.run(main)
google-research/lag
lag.py
Python
apache-2.0
12,034
''' MAP Client Plugin ''' __version__ = '0.1.0' __author__ = 'Hugh Sorby' __stepname__ = 'Zinc Model Source' __location__ = 'https://github.com/mapclient-plugins/zincdatasourcestep' # import class that derives itself from the step mountpoint. from mapclientplugins.zincmodelsourcestep import resources_rc from mapclientplugins.zincmodelsourcestep import step
mapclient-plugins/zincmodelsourcestep
mapclientplugins/zincmodelsourcestep/__init__.py
Python
apache-2.0
361
def sumInRange(nums, queries): dic = {} su = 0 for i, e in enumerate(nums): su += e dic["0_{}".format(i)] = su sum_r = 0 for i in queries: a, b = i if '{}_{}'.format(a,b) in dic: sum_r += dic['{}_{}'.format(a,b)] else: t = dic['0_{}'.format(b)] - dic['0_{}'.format(a-1)] dic['{}_{}'.format(a,b)] = t sum_r += t return sum_r % 1000000007
emirot/codefights
interviewPractice/sumInRange.py
Python
apache-2.0
472
import time from pprint import pformat import polling2 import pytest from domino import Domino from domino.helpers import domino_is_reachable from requests.exceptions import RequestException @pytest.fixture def mock_job_start_blocking_setup(requests_mock, dummy_hostname): """ Create mock replies to the chain of calls required before checking job status """ # Mock the /version API endpoint (GET) requests_mock.get(f"{dummy_hostname}/version", json={"version": "9.9.9"}) # Mock /findProjectByOwnerAndName API endpoint (GET) project_endpoint = "v4/gateway/projects/findProjectByOwnerAndName" project_query = "ownerName=anyuser&projectName=anyproject" requests_mock.get(f"{dummy_hostname}/{project_endpoint}?{project_query}", json={}) # Mock the jobs/start API endpoint (POST) and return run with ID 123 jobs_start_endpoint = "v4/jobs/start" requests_mock.post(f"{dummy_hostname}/{jobs_start_endpoint}", json={"id": "123"}) # Mock STDOUT for run with ID 123 stdout_endpoint = "v1/projects/anyuser/anyproject/run/123/stdout" requests_mock.get(f"{dummy_hostname}/{stdout_endpoint}", json={"stdout": "whatever"}) yield @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup") def test_job_status_completes_with_default_params(requests_mock, dummy_hostname): """ Confirm that the happy path default case passes (no exceptions thrown) """ # Mock a typical response from the jobs status API endpoint (GET) jobs_status_endpoint = "v4/jobs/123" requests_mock.get(f"{dummy_hostname}/{jobs_status_endpoint}", json={"statuses": {"isCompleted": True, "executionStatus": "whatever"}}) d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever") job_status = d.job_start_blocking(command="foo.py", poll_freq=1, max_poll_time=1) assert job_status['statuses']['isCompleted'] is True @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup") def test_job_status_ignores_RequestException_and_times_out(requests_mock, dummy_hostname): """ Test that the default behavior is to simply ignore RequestException being thrown. (In this case, timing out via polling2.TimeoutException is expected.) """ # Force the jobs status API endpoint to throw a RequestException when called jobs_status_endpoint = "v4/jobs/123" requests_mock.get(f"{dummy_hostname}/{jobs_status_endpoint}", exc=RequestException) d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever") with pytest.raises(polling2.TimeoutException): d.job_start_blocking(command="foo.py", poll_freq=1, max_poll_time=1) @pytest.mark.usefixtures("clear_token_file_from_env", "mock_job_start_blocking_setup") def test_job_status_without_ignoring_exceptions(requests_mock, dummy_hostname): """ Test that ignore_exceptions can be overridden by passing in an empty tuple. The call should fail immediately with RequestException. """ # Force the jobs status API endpoint to throw a RequestException when called jobs_status_endpoint = "v4/jobs/123" requests_mock.get(f"{dummy_hostname}/{jobs_status_endpoint}", exc=RequestException) d = Domino(host=dummy_hostname, project="anyuser/anyproject", api_key="whatever") with pytest.raises(RequestException): d.job_start_blocking(command="foo.py", ignore_exceptions=()) @pytest.mark.skipif(not domino_is_reachable(), reason="No access to a live Domino deployment") def test_job_start_blocking(default_domino_client): """ Confirm that we can start a job using the v4 API, and block until it succeeds. """ job = default_domino_client.job_start_blocking(command="main.py") assert job["statuses"]["executionStatus"] == "Succeeded" @pytest.mark.skipif(not domino_is_reachable(), reason="No access to a live Domino deployment") def test_runs_list(default_domino_client): """ Confirm that the v1 API endpoint to list jobs returns a list. """ runs = default_domino_client.runs_list() assert runs["objectType"] == "list", f"runs_list returned unexpected result:\n{pformat(runs)}" assert isinstance(runs["data"], list), \ f"runs_list returned unexpected result:\n{pformat(runs)}" @pytest.mark.skipif(not domino_is_reachable(), reason="No access to a live Domino deployment") def test_queue_job(default_domino_client): """ Queue a job, and then poll until the job completes (timeout at 240 seconds). """ job = default_domino_client.job_start("main.py") remaining_polling_seconds = 240 while remaining_polling_seconds > 0: job_status = default_domino_client.job_status(job['id']) if not job_status['statuses']['isCompleted']: time.sleep(3) remaining_polling_seconds -= 3 print(f"Job {job['id']} has not completed.") continue exec_status = job_status['statuses']['executionStatus'] try: assert exec_status == "Succeeded" print(f"Job {job['id']} succeeded.") break except AssertionError: print(f"Job failed: {pformat(job_status)}") raise else: pytest.fail(f"Job took too long to complete: {pformat(job_status)}")
dominodatalab/python-domino
tests/test_jobs.py
Python
apache-2.0
5,342
#!/usr/bin/env python import paramiko import time from getpass import getpass pynet_rtr2 = { 'device':'Cisco 881', 'ip_addr':'184.105.247.71', 'username':'pyclass', 'password':'', 'snmp_port':161, 'ssh_port':22, } def ssh_connect(): pynet_rtr2['password'] = getpass() remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: remote_conn_pre.connect(pynet_rtr2['ip_addr'], \ username=pynet_rtr2['username'], \ password=pynet_rtr2['password'], \ look_for_keys=False, \ allow_agent=False, \ port=pynet_rtr2['ssh_port']) except paramiko.ssh_exception.AuthenticationException: print "Authentication Failure" return "AuthenticationException" return remote_conn_pre.invoke_shell() def ssh_send(command_list): for command in command_list: remote_conn.send(command + "\n") time.sleep(1) output = remote_conn.recv(5000) print output if __name__ == '__main__': remote_conn_pre = paramiko.SSHClient() remote_conn = ssh_connect() while(remote_conn == "AuthenticationException"): remote_conn = ssh_connect() ssh_send(["show running-config | include ^logging buffered"]) ssh_send(["configure terminal", "logging buffered 10001", "exit"]) ssh_send(["show running-config | include ^logging buffered"])
gerards/pynet_network_automation_course
week4/q2_paramiko.py
Python
apache-2.0
1,497
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilies for user defined functions. TFX-internal use only and experimental, no backwards compatibilty guarantees. """ import hashlib import os import re import shutil import struct import subprocess import sys import tempfile from typing import Any, Callable, Dict, List, Optional, Tuple from absl import logging from tfx.dsl.components.base import base_component from tfx.dsl.io import fileio from tfx.utils import import_utils from tfx.utils import io_utils # Key for module file path. _MODULE_FILE_KEY = 'module_file' # Key for module python path. _MODULE_PATH_KEY = 'module_path' # Ephemeral setup.py file name. _EPHEMERAL_SETUP_PY_FILE_NAME = '_tfx_generated_setup.py' # TODO(b/157155972): improve user code support. def get_fn(exec_properties: Dict[str, Any], fn_name: str) -> Callable[..., Any]: """Loads and returns user-defined function.""" logging.info('udf_utils.get_fn %r %r', exec_properties, fn_name) has_module_file = bool(exec_properties.get(_MODULE_FILE_KEY)) has_module_path = bool(exec_properties.get(_MODULE_PATH_KEY)) has_fn = bool(exec_properties.get(fn_name)) if has_module_path: module_path = exec_properties[_MODULE_PATH_KEY] return import_utils.import_func_from_module(module_path, fn_name) elif has_module_file: if has_fn: return import_utils.import_func_from_source( exec_properties[_MODULE_FILE_KEY], exec_properties[fn_name]) else: return import_utils.import_func_from_source( exec_properties[_MODULE_FILE_KEY], fn_name) elif has_fn: fn_path_split = exec_properties[fn_name].split('.') return import_utils.import_func_from_module('.'.join(fn_path_split[0:-1]), fn_path_split[-1]) else: raise ValueError( 'Neither module file or user function have been supplied in `exec_properties`.' ) def try_get_fn(exec_properties: Dict[str, Any], fn_name: str) -> Optional[Callable[..., Any]]: """Loads and returns user-defined function if exists.""" try: return get_fn(exec_properties, fn_name) except (ValueError, AttributeError): # ValueError: module file or user function is unset. # AttributeError: the function doesn't exist in the module. return None def _get_ephemeral_setup_py_contents(package_name: str, version_string: str, module_names: List[str]): return f"""import setuptools setuptools.setup( name={repr(package_name)}, version={repr(version_string)}, author='TFX User', author_email='nobody@example.com', description='Auto-generated TFX user code package.', py_modules={repr(module_names)}, classifiers=[], python_requires='>=3.6', ) """ def should_package_user_modules(): """Whether to package user modules in the current execution environment.""" if os.environ.get('UNSUPPORTED_DO_NOT_PACKAGE_USER_MODULES'): return False return True class UserModuleFilePipDependency(base_component._PipDependencyFuture): # pylint: disable=protected-access """Specification of a user module dependency.""" def __init__(self, component: base_component.BaseComponent, module_file_key: str, module_path_key: str): self.component = component self.module_file_key = module_file_key self.module_path_key = module_path_key def resolve(self, pipeline_root: str): # Package the given user module file as a Python wheel. module_file = self.component.spec.exec_properties[self.module_file_key] # Perform validation on the given `module_file`. if not module_file: return None elif not isinstance(module_file, str): # TODO(b/187753042): Deprecate and remove usage of RuntimeParameters for # `module_file` parameters and remove this code path. logging.warning( 'Module file %r for component %s is not a path string; ' 'skipping Python user module wheel packaging.', module_file, self.component) return None elif not fileio.exists(module_file): raise ValueError( 'Specified module file %r for component %s does not exist.' % (module_file, self.component)) # Perform validation on the `pipeline_root`. if not pipeline_root: logging.warning( 'No pipeline root provided; skipping Python user module ' 'wheel packaging for component %s.', self.component) return None pipeline_root_exists = fileio.exists(pipeline_root) if not pipeline_root_exists: fileio.makedirs(pipeline_root) # Perform packaging of the user module. dist_file_path, user_module_path = package_user_module_file( self.component.id, module_file, pipeline_root) # Set the user module key to point to a module in this wheel, and clear the # module path key before returning. self.component.spec.exec_properties[self.module_path_key] = user_module_path self.component.spec.exec_properties[self.module_file_key] = None return dist_file_path def add_user_module_dependency(component: base_component.BaseComponent, module_file_key: str, module_path_key: str) -> None: """Adds a module file dependency to the current component.""" dependency = UserModuleFilePipDependency(component, module_file_key, module_path_key) component._add_pip_dependency(dependency) # pylint: disable=protected-access def _get_version_hash(user_module_dir: str, source_files: List[str]) -> str: """Compute a version hash based on user module directory contents.""" source_files = sorted(source_files) h = hashlib.sha256() for source_file in source_files: source_file_name_bytes = source_file.encode('utf-8') h.update(struct.pack('>Q', len(source_file_name_bytes))) h.update(source_file_name_bytes) with open(os.path.join(user_module_dir, source_file), 'rb') as f: file_contents = f.read() h.update(struct.pack('>Q', len(file_contents))) h.update(file_contents) return h.hexdigest() def package_user_module_file(instance_name: str, module_path: str, pipeline_root: str) -> Tuple[str, str]: """Package the given user module file into a Python Wheel package. Args: instance_name: Name of the component instance, for creating a unique wheel package name. module_path: Path to the module file to be packaged. pipeline_root: Text Returns: dist_file_path: Path to the generated wheel file. user_module_path: Path for referencing the user module when stored as the _MODULE_PATH_KEY execution property. Format should be treated as opaque by the user. Raises: RuntimeError: When wheel building fails. """ module_path = os.path.abspath(io_utils.ensure_local(module_path)) if not module_path.endswith('.py'): raise ValueError('Module path %r is not a ".py" file.' % module_path) if not os.path.exists(module_path): raise ValueError('Module path %r does not exist.' % module_path) user_module_dir, module_file_name = os.path.split(module_path) user_module_name = re.sub(r'\.py$', '', module_file_name) source_files = [] # Discover all Python source files in this directory for inclusion. for file_name in os.listdir(user_module_dir): if file_name.endswith('.py'): source_files.append(file_name) module_names = [] for file_name in source_files: if file_name in (_EPHEMERAL_SETUP_PY_FILE_NAME, '__init__.py'): continue module_name = re.sub(r'\.py$', '', file_name) module_names.append(module_name) # Set up build directory. build_dir = tempfile.mkdtemp() for source_file in source_files: shutil.copyfile( os.path.join(user_module_dir, source_file), os.path.join(build_dir, source_file)) # Generate an ephemeral wheel for this module. logging.info( 'Generating ephemeral wheel package for %r (including modules: %s).', module_path, module_names) version_hash = _get_version_hash(user_module_dir, source_files) logging.info('User module package has hash fingerprint version %s.', version_hash) setup_py_path = os.path.join(build_dir, _EPHEMERAL_SETUP_PY_FILE_NAME) with open(setup_py_path, 'w') as f: f.write( _get_ephemeral_setup_py_contents('tfx-user-code-%s' % instance_name, '0.0+%s' % version_hash, module_names)) temp_dir = tempfile.mkdtemp() dist_dir = tempfile.mkdtemp() bdist_command = [ sys.executable, setup_py_path, 'bdist_wheel', '--bdist-dir', temp_dir, '--dist-dir', dist_dir ] logging.info('Executing: %s', bdist_command) try: subprocess.check_call(bdist_command, cwd=build_dir) except subprocess.CalledProcessError as e: raise RuntimeError('Failed to build wheel.') from e dist_files = os.listdir(dist_dir) if len(dist_files) != 1: raise RuntimeError( 'Unexpectedly found %d output files in wheel output directory %s.' % (len(dist_files), dist_dir)) build_dist_file_path = os.path.join(dist_dir, dist_files[0]) # Copy wheel file atomically to wheel staging directory. dist_wheel_directory = os.path.join(pipeline_root, '_wheels') dist_file_path = os.path.join(dist_wheel_directory, dist_files[0]) temp_dist_file_path = dist_file_path + '.tmp' fileio.makedirs(dist_wheel_directory) fileio.copy(build_dist_file_path, temp_dist_file_path, overwrite=True) fileio.rename(temp_dist_file_path, dist_file_path, overwrite=True) logging.info( ('Successfully built user code wheel distribution at %r; target user ' 'module is %r.'), dist_file_path, user_module_name) # Encode the user module key as a specification of a user module name within # a packaged wheel path. assert '@' not in user_module_name, ('Unexpected invalid module name: %s' % user_module_name) user_module_path = '%s@%s' % (user_module_name, dist_file_path) logging.info('Full user module path is %r', user_module_path) return dist_file_path, user_module_path def decode_user_module_key(user_module_key: str) -> Tuple[str, List[str]]: """Decode the given user module key into module path and pip dependencies.""" if user_module_key and '@' in user_module_key: user_module_name, dist_file_path = user_module_key.split('@', maxsplit=1) return user_module_name, [dist_file_path] else: return user_module_key, [] class TempPipInstallContext: """Context manager for wrapped code and subprocesses to use pip package.""" def __init__(self, pip_dependencies: List[str]): if not isinstance(pip_dependencies, list): raise ValueError('Expected list of dependencies, got %r instead.' % (pip_dependencies,)) self.pip_dependencies = pip_dependencies self.temp_directory = None def __enter__(self) -> 'TempPipInstallContext': if self.pip_dependencies: self.temp_directory = tempfile.mkdtemp() for dependency in self.pip_dependencies: install_to_temp_directory(dependency, temp_dir=self.temp_directory) sys.path = sys.path + [self.temp_directory] os.environ['PYTHONPATH'] = ':'.join(sys.path) return self def __exit__(self, *unused_exc_info): if self.pip_dependencies: sys.path = list(path for path in sys.path if path != self.temp_directory) os.environ['PYTHONPATH'] = ':'.join(sys.path) def install_to_temp_directory(pip_dependency: str, temp_dir: Optional[str] = None) -> str: """Install the given pip dependency specifier to a temporary directory. Args: pip_dependency: Path to a wheel file or a pip dependency specifier (e.g. "setuptools==18.0"). temp_dir: Path to temporary installation location (optional). Returns: Temporary directory where the package was installed, that should be added to the Python import path. """ logging.info('Installing %r to a temporary directory.', pip_dependency) if not temp_dir: temp_dir = tempfile.mkdtemp() install_command = [ sys.executable, '-m', 'pip', 'install', '--target', temp_dir, pip_dependency ] logging.info('Executing: %s', install_command) subprocess.check_call(install_command) logging.info('Successfully installed %r.', pip_dependency) return temp_dir
tensorflow/tfx
tfx/components/util/udf_utils.py
Python
apache-2.0
13,018
# -*- coding: utf-8 -*- import sys import os import urllib import webapp2 import json import logging import datetime import core_util from google.appengine.ext import ndb from google.appengine.ext import blobstore from google.appengine.ext.webapp import blobstore_handlers from google.appengine.api import images import littlecircle LITTLECIRCLE_THUMBNAIL_W=200 LITTLECIRCLE_THUMBNAIL_H=LITTLECIRCLE_THUMBNAIL_W*3/4 LITTLECIRCLE_PREVIEW_W=LITTLECIRCLE_THUMBNAIL_W*2 LITTLECIRCLE_PREVIEW_H=LITTLECIRCLE_THUMBNAIL_H*2 LITTLECIRCLE_IMG_Q=90 class UploadUrlHandler(webapp2.RequestHandler): def get(self): link = blobstore.create_upload_url('/upload') link = link[link.index('/') + 2:] link = link[link.index('/'):] self.response.headers['Content-Type'] = 'text/plain' self.response.out.write(link) class UploadHandler(blobstore_handlers.BlobstoreUploadHandler): def post(self): # check user login sid = self.request.get('sid') login = littlecircle.Login.get_by_sid(sid) if login is None or login.is_valid() == False: logging.error("[UploadHandler] invalid session id: {}".format(sid)) self.error(401) return # check user exists user = login.user.get() if user is None: logging.error("[UploadHandler] cannot get user, sid: {}".format(sid)) self.error(401) return upload_files = self.get_uploads('file') # 'file' is file upload field in the form blob_info = upload_files[0] blob_key = blob_info.key() size = blob_info.size logging.info("[UploadHandler] file size: {}".format(size)) # make preview img = images.Image(blob_key=blob_key) ''' https://developers.google.com/appengine/docs/python/images/imageclass?csw=1#Image_resize The resize transform preserves the aspect ratio of the image. If both the width and the height arguments are provided, the transform uses the dimension that results in a smaller image ''' img.resize(width=LITTLECIRCLE_PREVIEW_W, height=LITTLECIRCLE_PREVIEW_H) preview = img.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q, parse_source_metadata=True) # try to get geo location and date time of the photo env = os.environ['SERVER_SOFTWARE'] dev = (core_util.is_missing(env) == False) and (env.split('/')[0] == 'Development') meta = img.get_original_metadata() logging.debug("[UploadHandler] env: {}, dev: {}, meta: {}".format(env, dev, meta)) # date time dt = None if dev == True: dt = datetime.datetime.now() elif ('DateTime' in meta): dt = core_util.exif_datetime_to_datetime(meta['DateTime']) # location loc = None if dev == True: loc = ndb.GeoPt(22.4182277, 114.2080536) # The Chinese University of Hong Kong elif ('GPSLatitude' in meta and 'GPSLongitude' in meta): loc = ndb.GeoPt(meta['GPSLatitude'], meta['GPSLongitude']) # orientation, default is 1 orientation = 1 if 'Orientation' in meta: orientation = meta['Orientation'] rotate = core_util.get_rotate(orientation) logging.info("[UploadHandler] rotate {}".format(rotate)) img.rotate(rotate) ''' Due to GAE's limitation, parse_source_metadata will only be done in execute_transforms it makes preview is generated before img.rotate, therefore, we need to rotate it. But no need to parse_source_metadata again ''' preview = img.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q, parse_source_metadata=False) # summary logging.info("[UploadHandler] photo taken at {} in location {}, orientation: {}".format(dt, loc, orientation)) # make thumbnail ''' https://developers.google.com/appengine/docs/python/images/imageclass?csw=1#Image_resize The resize transform preserves the aspect ratio of the image. If both the width and the height arguments are provided, the transform uses the dimension that results in a smaller image ''' logging.info("[UploadHandler] thumb: {} x {}".format(LITTLECIRCLE_THUMBNAIL_W, LITTLECIRCLE_THUMBNAIL_H)) img.resize(width=LITTLECIRCLE_THUMBNAIL_W, height=LITTLECIRCLE_THUMBNAIL_H) thumb = img.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q) # save photo information p = littlecircle.Photo( key=ndb.Key(littlecircle.Photo, str(blob_key)), owner=user.key, size=size, ori=orientation, geo=loc, photoDate=dt, preview=preview, thumbnail=thumb ) k = p.put() logging.info("[UploadHandler] photo saved, key: {}".format(k.string_id())) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(json.dumps(p.to_dict(login))) class ImageSearchHandler(webapp2.RequestHandler): def get(self, url_sid): # check user login if (core_util.is_missing(url_sid)): logging.error("[ImageSearchHandler] missing session id") self.error(401) return sid = str(urllib.unquote(url_sid)) login = littlecircle.Login.get_by_sid(sid) if (login is None or login.is_valid() == False): logging.error("[ImageSearchHandler] invalid session id: {}".format(sid)) self.error(401) return list = littlecircle.Photo.query( littlecircle.Photo.deletedDate == None).order( littlecircle.Photo.uploadDate).fetch() array = [] for obj in list: array.append(obj.to_dict(login)) logging.info("[ImageSearchHandler] number of photo: {}".format(len(array))) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(json.dumps(array)) class ImageViewHandler(webapp2.RequestHandler): def get(self): self.response.headers['Content-Type'] = 'image/jpeg' # get the photo ID blob_key = self.request.get('id') if (core_util.is_missing(blob_key)): logging.error("[ImageViewHandler] missing id") self.error(404) return # check user login if (littlecircle.Login.is_valid_sid(self.request.get('sid')) == False): logging.error("[ImageViewHandler] invalid session id") self.error(401) return # get the photo size required size = self.request.get('size') logging.info("[ImageViewHandler] key: {} size: {}".format(blob_key, size)) # get the stored photo record (not the image) img = ndb.Key(littlecircle.Photo, blob_key).get() if (img is None): logging.error("[ImageViewHandler] cannot find image record: {}".format(blob_key)) self.error(404) return # get the full image fullImg = images.Image(blob_key=blob_key) if (fullImg is None): logging.error("[ImageViewHandler] cannot find full image: {}".format(blob_key)) self.error(404) return if size == '2': logging.info("[ImageViewHandler] send full: {}".format(blob_key)) # logging.info("[ImageViewHandler] orientation: {}".format(img.ori)) # self.redirect("/download/{}?ori={}".format(blob_key, img.ori)) orientation = img.ori logging.debug("[ImageViewHandler] orientation: {}".format(orientation)) rotate = core_util.get_rotate(orientation) logging.info("[ImageViewHandler] rotate image {}".format(rotate)) fullImg.rotate(rotate) self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(fullImg.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q)) elif size == '1': logging.info("[ImageViewHandler] send preview") try: # get the stored preview preview = img.preview if (preview is None): logging.info("[ImageViewHandler] preview not found, try to make it") fullImg.resize(width=LITTLECIRCLE_PREVIEW_W) preview = fullImg.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q) if (preview is None): raise Exception("[ImageViewHandler] cannot make preview: {}".format(blob_key)) logging.debug("[ImageViewHandler] save back the preview") img.preview = preview img.put() else: logging.debug("[ImageViewHandler] found stored preview") self.response.out.write(preview) except: logging.error("[ImageViewHandler] except: {}".format(sys.exc_info())) self.error(404) else: logging.info("[ImageViewHandler] send thumbnail") try: # get the stored thumbnail thumbnail = img.thumbnail if (thumbnail is None): logging.info("[ImageViewHandler] thumbnail not found, try to make it") fullImg.resize(width=LITTLECIRCLE_THUMBNAIL_W) thumbnail = fullImg.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q) if (thumbnail is None): raise Exception("[ImageViewHandler] cannot make thumbnail: {}".format(blob_key)) logging.debug("[ImageViewHandler] save back the thumbnail") img.thumbnail = thumbnail img.put() else: logging.debug("[ImageViewHandler] found stored thumbnail") self.response.out.write(thumbnail) except: logging.error("[ImageViewHandler] except: {}".format(sys.exc_info())) self.error(404) ''' class ImageDownloadHandler(webapp2.RequestHandler): #blobstore_handlers.BlobstoreDownloadHandler def get(self, resource): resource = str(urllib.unquote(resource)) if (core_util.is_missing(resource)): logging.error("[ImageDownloadHandler] missing resource") self.error(404) return blob_info = blobstore.BlobInfo.get(resource) if (blob_info is None): logging.error("[ImageDownloadHandler] cannot find image: {}".format(resource)) self.error(404) return rotate = 0 orientation = self.request.get('ori') logging.debug("[ImageDownloadHandler] orientation: {}".format(orientation)) img = images.Image(blob_key=blob_info) if orientation == '3': logging.info("[ImageDownloadHandler] rotate image") rotate = 180 size = blob_info.size name = blob_info.filename logging.info("[ImageDownloadHandler] {} ({})".format(name, size)) # self.send_blob(blob_info, save_as=name) img.rotate(rotate) self.response.headers['Content-Type'] = 'image/jpeg' self.response.out.write(img.execute_transforms(output_encoding=images.JPEG, quality=LITTLECIRCLE_IMG_Q)) ''' class ImageDeleteHandler(webapp2.RequestHandler): def get(self, url_sid, url_photo): # get the photo ID blob_key = str(urllib.unquote(url_photo)) if (core_util.is_missing(blob_key)): logging.error("[ImageDeleteHandler] missing id") self.error(404) return img = ndb.Key(littlecircle.Photo, blob_key).get() if (img is None): logging.error("[ImageDeleteHandler] missing image: {}".format(blob_key)) self.error(404) return # check user login sid = str(urllib.unquote(url_sid)) login = littlecircle.Login.get_by_sid(sid) if (login is None or login.is_valid() == False): logging.error("[ImageDeleteHandler] invalid session id: {}".format(sid)) self.error(401) return # check if the photo is belonged to the user k1 = img.owner k2 = login.user logging.info("[ImageDeleteHandler] owner: {}, login: {}".format(k1.id(), k2.id())) if (k1 != k2): logging.info("[ImageDeleteHandler] permission denied, image: {}".format(blob_key)) self.error(403) return # delete photo (set inactive) logging.info("[ImageDeleteHandler] delete image: {}".format(blob_key)) img.deletedBy = k2 img.deletedDate = datetime.datetime.now() img.put() self.response.headers['Content-Type'] = 'application/json' self.response.out.write(littlecircle.Resp(status=1).to_json())
copperoxide/littlecircle
gapp/photo_handler.py
Python
apache-2.0
13,084