code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
log = logging.getLogger(__name__) queue = multiprocessing.Queue() config = Config.from_context() with TempDir() as temp_dir: argv = ('sphinx-build', source, temp_dir) log.debug('Running sphinx-build for config values with args: %s', str(argv)) child = multiprocessing.Process(target=_read_config, args=(argv, config, current_name, queue)) child.start() child.join() # Block. if child.exitcode != 0: log.error('sphinx-build failed for branch/tag while reading config: %s', current_name) raise HandledError config = queue.get() return config
def read_config(source, current_name)
Read the Sphinx config for one version. :raise HandledError: If sphinx-build fails. Will be logged before raising. :param str source: Source directory to pass to sphinx-build. :param str current_name: The ref name of the current version being built. :return: Specific Sphinx config values. :rtype: dict
4.627587
3.949368
1.171728
# Add this extension's _templates directory to Sphinx. templates_dir = os.path.join(os.path.dirname(__file__), '_templates') app.builder.templates.pathchain.insert(0, templates_dir) app.builder.templates.loaders.insert(0, SphinxFileSystemLoader(templates_dir)) app.builder.templates.templatepathlen += 1 # Add versions.html to sidebar. if '**' not in app.config.html_sidebars: app.config.html_sidebars['**'] = StandaloneHTMLBuilder.default_sidebars + ['versions.html'] elif 'versions.html' not in app.config.html_sidebars['**']: app.config.html_sidebars['**'].append('versions.html')
def builder_inited(app)
Update the Sphinx builder. :param sphinx.application.Sphinx app: Sphinx application object.
3.690526
3.871658
0.953216
if cls.ABORT_AFTER_READ: config = {n: getattr(app.config, n) for n in (a for a in dir(app.config) if a.startswith('scv_'))} config['found_docs'] = tuple(str(d) for d in env.found_docs) config['master_doc'] = str(app.config.master_doc) cls.ABORT_AFTER_READ.put(config) sys.exit(0)
def env_updated(cls, app, env)
Abort Sphinx after initializing config and discovering all pages to build. :param sphinx.application.Sphinx app: Sphinx application object. :param sphinx.environment.BuildEnvironment env: Sphinx build environment.
4.89692
4.632906
1.056987
assert templatename or doctree # Unused, for linting. cls.VERSIONS.context = context versions = cls.VERSIONS this_remote = versions[cls.CURRENT_VERSION] banner_main_remote = versions[cls.BANNER_MAIN_VERSION] if cls.SHOW_BANNER else None # Update Jinja2 context. context['bitbucket_version'] = cls.CURRENT_VERSION context['current_version'] = cls.CURRENT_VERSION context['github_version'] = cls.CURRENT_VERSION context['html_theme'] = app.config.html_theme context['scv_banner_greatest_tag'] = cls.BANNER_GREATEST_TAG context['scv_banner_main_ref_is_branch'] = banner_main_remote['kind'] == 'heads' if cls.SHOW_BANNER else None context['scv_banner_main_ref_is_tag'] = banner_main_remote['kind'] == 'tags' if cls.SHOW_BANNER else None context['scv_banner_main_version'] = banner_main_remote['name'] if cls.SHOW_BANNER else None context['scv_banner_recent_tag'] = cls.BANNER_RECENT_TAG context['scv_is_branch'] = this_remote['kind'] == 'heads' context['scv_is_greatest_tag'] = this_remote == versions.greatest_tag_remote context['scv_is_recent_branch'] = this_remote == versions.recent_branch_remote context['scv_is_recent_ref'] = this_remote == versions.recent_remote context['scv_is_recent_tag'] = this_remote == versions.recent_tag_remote context['scv_is_root'] = cls.IS_ROOT context['scv_is_tag'] = this_remote['kind'] == 'tags' context['scv_show_banner'] = cls.SHOW_BANNER context['versions'] = versions context['vhasdoc'] = versions.vhasdoc context['vpathto'] = versions.vpathto # Insert banner into body. if cls.SHOW_BANNER and 'body' in context: parsed = app.builder.templates.render('banner.html', context) context['body'] = parsed + context['body'] # Handle overridden css_files. css_files = context.setdefault('css_files', list()) if '_static/banner.css' not in css_files: css_files.append('_static/banner.css') # Handle overridden html_static_path. if STATIC_DIR not in app.config.html_static_path: app.config.html_static_path.append(STATIC_DIR) # Reset last_updated with file's mtime (will be last git commit authored date). if app.config.html_last_updated_fmt is not None: file_path = app.env.doc2path(pagename) if os.path.isfile(file_path): lufmt = app.config.html_last_updated_fmt or getattr(locale, '_')('%b %d, %Y') mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)) context['last_updated'] = format_date(lufmt, mtime, language=app.config.language, warn=app.warn)
def html_page_context(cls, app, pagename, templatename, context, doctree)
Update the Jinja2 HTML context, exposes the Versions class instance to it. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes.
2.885471
2.826024
1.021035
def pre(rel_source): # Setup logging. if not NO_EXECUTE: setup_logging(verbose=config.verbose, colors=not config.no_colors) log = logging.getLogger(__name__) # Change current working directory. if config.chdir: os.chdir(config.chdir) log.debug('Working directory: %s', os.getcwd()) else: config.update(dict(chdir=os.getcwd()), overwrite=True) # Get and verify git root. try: config.update(dict(git_root=get_root(config.git_root or os.getcwd())), overwrite=True) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError # Look for local config. if config.no_local_conf: config.update(dict(local_conf=None), overwrite=True) elif not config.local_conf: candidates = [p for p in (os.path.join(s, 'conf.py') for s in rel_source) if os.path.isfile(p)] if candidates: config.update(dict(local_conf=candidates[0]), overwrite=True) else: log.debug("Didn't find a conf.py in any REL_SOURCE.") elif os.path.basename(config.local_conf) != 'conf.py': log.error('Path "%s" must end with conf.py.', config.local_conf) raise HandledError config['pre'] = pre # To be called by Click sub commands. config.update(options)
def cli(config, **options)
Build versioned Sphinx docs for every branch and tag pushed to origin. Supports only building locally with the "build" sub command or build and push to a remote with the "push" sub command. For more information for either run them with their own --help. The options below are global and must be specified before the sub command name (e.g. -N build ...). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param dict options: Additional Click options.
3.454018
3.393955
1.017697
func = click.option('-a', '--banner-greatest-tag', is_flag=True, help='Override banner-main-ref to be the tag with the highest version number.')(func) func = click.option('-A', '--banner-recent-tag', is_flag=True, help='Override banner-main-ref to be the most recent committed tag.')(func) func = click.option('-b', '--show-banner', help='Show a warning banner.', is_flag=True)(func) func = click.option('-B', '--banner-main-ref', help="Don't show banner on this ref and point banner URLs to this ref. Default master.")(func) func = click.option('-i', '--invert', help='Invert/reverse order of versions.', is_flag=True)(func) func = click.option('-p', '--priority', type=click.Choice(('branches', 'tags')), help="Group these kinds of versions at the top (for themes that don't separate them).")(func) func = click.option('-r', '--root-ref', help='The branch/tag at the root of DESTINATION. Will also be in subdir. Default master.')(func) func = click.option('-s', '--sort', multiple=True, type=click.Choice(('semver', 'alpha', 'time')), help='Sort versions. Specify multiple times to sort equal values of one kind.')(func) func = click.option('-t', '--greatest-tag', is_flag=True, help='Override root-ref to be the tag with the highest version number.')(func) func = click.option('-T', '--recent-tag', is_flag=True, help='Override root-ref to be the most recent committed tag.')(func) func = click.option('-w', '--whitelist-branches', multiple=True, help='Whitelist branches that match the pattern. Can be specified more than once.')(func) func = click.option('-W', '--whitelist-tags', multiple=True, help='Whitelist tags that match the pattern. Can be specified more than once.')(func) return func
def build_options(func)
Add "build" Click options to function. :param function func: The function to wrap. :return: The wrapped function. :rtype: function
3.319322
3.41315
0.97251
log = logging.getLogger(__name__) greatest_tag = config.banner_greatest_tag if banner else config.greatest_tag recent_tag = config.banner_recent_tag if banner else config.recent_tag if greatest_tag or recent_tag: candidates = [r for r in remotes if r['kind'] == 'tags'] if candidates: multi_sort(candidates, ['semver' if greatest_tag else 'time']) config.update({'banner_main_ref' if banner else 'root_ref': candidates[0]['name']}, overwrite=True) else: flag = '--banner-main-ref' if banner else '--root-ref' log.warning('No git tags with docs found in remote. Falling back to %s value.', flag) ref = config.banner_main_ref if banner else config.root_ref return ref in [r['name'] for r in remotes]
def override_root_main_ref(config, remotes, banner)
Override root_ref or banner_main_ref with tags in config if user requested. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param iter remotes: List of dicts from Versions.remotes. :param bool banner: Evaluate banner main ref instead of root ref. :return: If root/main ref exists. :rtype: bool
4.576181
4.096185
1.117181
if 'pre' in config: config.pop('pre')(rel_source) config.update({k: v for k, v in options.items() if v}) if config.local_conf: config.update(read_local_conf(config.local_conf), ignore_set=True) if NO_EXECUTE: raise RuntimeError(config, rel_source, destination) log = logging.getLogger(__name__) # Gather git data. log.info('Gathering info about the remote git repository...') conf_rel_paths = [os.path.join(s, 'conf.py') for s in rel_source] remotes = gather_git_info(config.git_root, conf_rel_paths, config.whitelist_branches, config.whitelist_tags) if not remotes: log.error('No docs found in any remote branch/tag. Nothing to do.') raise HandledError versions = Versions( remotes, sort=config.sort, priority=config.priority, invert=config.invert, ) # Get root ref. if not override_root_main_ref(config, versions.remotes, False): log.error('Root ref %s not found in: %s', config.root_ref, ' '.join(r[1] for r in remotes)) raise HandledError log.info('Root ref is: %s', config.root_ref) # Get banner main ref. if not config.show_banner: config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False), overwrite=True) elif not override_root_main_ref(config, versions.remotes, True): log.warning('Banner main ref %s not found in: %s', config.banner_main_ref, ' '.join(r[1] for r in remotes)) log.warning('Disabling banner.') config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False, show_banner=False), overwrite=True) else: log.info('Banner main ref is: %s', config.banner_main_ref) # Pre-build. log.info("Pre-running Sphinx to collect versions' master_doc and other info.") exported_root = pre_build(config.git_root, versions) if config.banner_main_ref and config.banner_main_ref not in [r['name'] for r in versions.remotes]: log.warning('Banner main ref %s failed during pre-run. Disabling banner.', config.banner_main_ref) config.update(dict(banner_greatest_tag=False, banner_main_ref=None, banner_recent_tag=False, show_banner=False), overwrite=True) # Build. build_all(exported_root, destination, versions) # Cleanup. log.debug('Removing: %s', exported_root) shutil.rmtree(exported_root) # Store versions in state for push(). config['versions'] = versions
def build(config, rel_source, destination, **options)
Fetch branches/tags and build all locally. Doesn't push anything to remote. Just fetch all remote branches and tags, export them to a temporary directory, run sphinx-build on each one, and then store all built documentation in DESTINATION. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DESTINATION is the path to the local directory that will hold all generated docs for all versions. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. build docs docs/_build/html -- -D setting=value). \f :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str destination: Destination directory to copy/overwrite built docs to. Does not delete old files. :param dict options: Additional Click options.
3.499224
3.306177
1.05839
if 'pre' in config: config.pop('pre')(rel_source) config.update({k: v for k, v in options.items() if v}) if config.local_conf: config.update(read_local_conf(config.local_conf), ignore_set=True) if NO_EXECUTE: raise RuntimeError(config, rel_source, dest_branch, rel_dest) log = logging.getLogger(__name__) # Clone, build, push. for _ in range(PUSH_RETRIES): with TempDir() as temp_dir: log.info('Cloning %s into temporary directory...', dest_branch) try: clone(config.git_root, temp_dir, config.push_remote, dest_branch, rel_dest, config.grm_exclude) except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.info('Building docs...') ctx.invoke(build, rel_source=rel_source, destination=os.path.join(temp_dir, rel_dest)) versions = config.pop('versions') log.info('Attempting to push to branch %s on remote repository.', dest_branch) try: if commit_and_push(temp_dir, config.push_remote, versions): return except GitError as exc: log.error(exc.message) log.error(exc.output) raise HandledError log.warning('Failed to push to remote repository. Retrying in %d seconds...', PUSH_SLEEP) time.sleep(PUSH_SLEEP) # Failed if this is reached. log.error('Ran out of retries, giving up.') raise HandledError
def push(ctx, config, rel_source, dest_branch, rel_dest, **options)
Build locally and then push to remote branch. First the build sub command is invoked which takes care of building all versions of your documentation in a temporary directory. If that succeeds then all built documents will be pushed to a remote branch. REL_SOURCE is the path to the docs directory relative to the git root. If the source directory has moved around between git tags you can specify additional directories. DEST_BRANCH is the branch name where generated docs will be committed to. The branch will then be pushed to remote. If there is a race condition with another job pushing to remote the docs will be re-generated and pushed again. REL_DEST is the path to the directory that will hold all generated docs for all versions relative to the git roof of DEST_BRANCH. To pass options to sphinx-build (run for every branch/tag) use a double hyphen (e.g. push docs gh-pages . -- -D setting=value). \f :param click.core.Context ctx: Click context. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param tuple rel_source: Possible relative paths (to git root) of Sphinx directory containing conf.py (e.g. docs). :param str dest_branch: Branch to clone and push to. :param str rel_dest: Relative path (to git root) to write generated docs to. :param dict options: Additional Click options.
3.974051
3.797866
1.04639
option = param.opts[0].lstrip('-') if param.param_type_name != 'option': return False, return True, option == 'version', option.lower(), option.swapcase()
def custom_sort(param)
Custom Click(Command|Group).params sorter. Case insensitive sort with capitals after lowercase. --version at the end since I can't sort --help. :param click.core.Option param: Parameter to evaluate. :return: Sort weight. :rtype: int
13.502499
11.148108
1.211192
self.params.sort(key=self.custom_sort) return super(ClickGroup, self).get_params(ctx)
def get_params(self, ctx)
Sort order of options before displaying. :param click.core.Context ctx: Click context. :return: super() return value.
7.515274
6.649148
1.130261
argv = kwargs.pop('args', click.get_os_args()) if '--' in argv: pos = argv.index('--') argv, self.overflow = argv[:pos], tuple(argv[pos + 1:]) else: argv, self.overflow = argv, tuple() return super(ClickGroup, self).main(args=argv, *args, **kwargs)
def main(self, *args, **kwargs)
Main function called by setuptools. :param list args: Passed to super(). :param dict kwargs: Passed to super(). :return: super() return value.
3.915684
4.35261
0.899617
if self.overflow: ctx.ensure_object(Config).update(dict(overflow=self.overflow)) return super(ClickGroup, self).invoke(ctx)
def invoke(self, ctx)
Inject overflow arguments into context state. :param click.core.Context ctx: Click context. :return: super() return value.
9.649299
6.883634
1.401774
try: ctx = click.get_current_context() except RuntimeError: return cls() return ctx.find_object(cls)
def from_context(cls)
Retrieve this class' instance from the current Click context. :return: Instance of this class. :rtype: Config
4.405407
4.16613
1.057434
log = logging.getLogger(__name__) valid = {i[0] for i in self} for key, value in params.items(): if not hasattr(self, key): raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, key)) if key not in valid: message = "'{}' object does not support item assignment on '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) if key in self._already_set: if ignore_set: log.debug('%s already set in config, skipping.', key) continue if not overwrite: message = "'{}' object does not support item re-assignment on '{}'" raise AttributeError(message.format(self.__class__.__name__, key)) setattr(self, key, value) self._already_set.add(key)
def update(self, params, ignore_set=False, overwrite=False)
Set instance values from dictionary. :param dict params: Click context params. :param bool ignore_set: Skip already-set values instead of raising AttributeError. :param bool overwrite: Allow overwriting already-set values.
2.469022
2.467199
1.000739
shutil.rmtree(self.name, onerror=lambda *a: os.chmod(a[1], __import__('stat').S_IWRITE) or os.unlink(a[1])) if os.path.exists(self.name): raise IOError(17, "File exists: '{}'".format(self.name))
def cleanup(self)
Recursively delete directory.
4.192884
3.752319
1.117411
exts = [] parent = dict(member_name=ciscohostingdevicemanager.DEVICE, collection_name=ciscohostingdevicemanager.DEVICES) controller = resource.Resource( RouterHostingDeviceSchedulerController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension( DEVICE_L3_ROUTERS, controller, parent, path_prefix="/dev_mgr")) parent = dict(member_name="router", collection_name=l3_const.ROUTERS) controller = resource.Resource( HostingDevicesHostingRouterController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(L3_ROUTER_DEVICES, controller, parent)) return exts
def get_resources(cls)
Returns Ext Resources.
6.481988
6.164684
1.051471
hd_db = self._get_hosting_device(context, hosting_device_id) if hd_db.cfg_agent_id: if hd_db.cfg_agent_id == cfg_agent_id: return LOG.debug('Hosting device %(hd_id)s has already been assigned to ' 'Cisco cfg agent %(agent_id)s', {'hd_id': hosting_device_id, 'agent_id': cfg_agent_id}) raise ciscocfgagentscheduler.HostingDeviceAssignedToCfgAgent( hosting_device_id=hosting_device_id, agent_id=cfg_agent_id) cfg_agent_db = get_agent_db_obj(self._get_agent(context, cfg_agent_id)) if (cfg_agent_db.agent_type != c_constants.AGENT_TYPE_CFG or cfg_agent_db.admin_state_up is not True): raise ciscocfgagentscheduler.InvalidCfgAgent(agent_id=cfg_agent_id) self._bind_hosting_device_to_cfg_agent(context, hd_db, cfg_agent_db) cfg_notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG) if cfg_notifier: cfg_notifier.hosting_devices_assigned_to_cfg_agent( context, [hosting_device_id], cfg_agent_db.host)
def assign_hosting_device_to_cfg_agent(self, context, cfg_agent_id, hosting_device_id)
Make config agent handle an (unassigned) hosting device.
2.052906
2.070042
0.991722
hd_db = self._get_hosting_device(context, hosting_device_id) if hd_db.cfg_agent_id is None and cfg_agent_id is None: return elif hd_db.cfg_agent_id != cfg_agent_id: LOG.debug('Hosting device %(hd_id)s is not assigned to Cisco ' 'cfg agent %(agent_id)s', {'hd_id': hosting_device_id, 'agent_id': cfg_agent_id}) raise ciscocfgagentscheduler.HostingDeviceNotAssignedToCfgAgent( hosting_device_id=hosting_device_id, agent_id=cfg_agent_id) cfg_agent_db = get_agent_db_obj(self._get_agent(context, cfg_agent_id)) cfg_notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG) if cfg_notifier: cfg_notifier.hosting_devices_unassigned_from_cfg_agent( context, [hosting_device_id], cfg_agent_db.host) self._bind_hosting_device_to_cfg_agent(context, hd_db, None)
def unassign_hosting_device_from_cfg_agent(self, context, cfg_agent_id, hosting_device_id)
Make config agent handle an (unassigned) hosting device.
2.220773
2.258433
0.983325
trunk_details = port.get('trunk_details') subports = trunk_details['sub_ports'] host_id = port.get(bc.dns.DNSNAME) context = bc.get_context() el_context = context.elevated() for subport in subports: bc.get_plugin().update_port(el_context, subport['port_id'], {'port': {bc.portbindings.HOST_ID: host_id, 'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER}}) # Set trunk to ACTIVE status. trunk_obj = bc.trunk_objects.Trunk.get_object( el_context, id=trunk_details['trunk_id']) trunk_obj.update(status=bc.trunk_consts.ACTIVE_STATUS)
def update_subports(self, port)
Set port attributes for trunk subports. For baremetal deployments only, set the neutron port attributes during the bind_port event.
3.746093
3.693715
1.01418
if self.node_list is None or self.node_uplink_list is None: return for node, port in zip(self.node_list.split(','), self.node_uplink_list.split(',')): if node.strip() == self.host_name: self.static_uplink = True self.static_uplink_port = port.strip() return
def read_static_uplink(self)
Read the static uplink from file, if given.
3.074152
2.901511
1.0595
LOG.info("Vlan change CB lvid %(lvid)s VDP %(vdp)s", {'lvid': lvid, 'vdp': vdp_vlan}) self.update_vm_result(port_uuid, constants.RESULT_SUCCESS, lvid=lvid, vdp_vlan=vdp_vlan, fail_reason=fail_reason)
def vdp_vlan_change_cb(self, port_uuid, lvid, vdp_vlan, fail_reason)
Callback function for updating the VDP VLAN in DB.
2.99568
3.156609
0.949018
LOG.info("In processing Bulk VM Event status %s", msg) time.sleep(3) if (not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict): # This condition shouldn't be hit as only when uplink is obtained, # save_uplink is called and that in turns calls this process_bulk. LOG.error("Uplink Port Event not received," "yet in bulk process") return ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink] for vm_dict in msg.msg_dict.get('vm_bulk_list'): if vm_dict['status'] == 'down': ovs_vdp_obj.pop_local_cache(vm_dict['port_uuid'], vm_dict['vm_mac'], vm_dict['net_uuid'], vm_dict['local_vlan'], vm_dict['vdp_vlan'], vm_dict['segmentation_id']) vm_msg = VdpQueMsg(constants.VM_MSG_TYPE, port_uuid=vm_dict['port_uuid'], vm_mac=vm_dict['vm_mac'], net_uuid=vm_dict['net_uuid'], segmentation_id=vm_dict['segmentation_id'], status=vm_dict['status'], oui=vm_dict['oui'], phy_uplink=phy_uplink) self.process_vm_event(vm_msg, phy_uplink)
def process_bulk_vm_event(self, msg, phy_uplink)
Process the VM bulk event usually after a restart.
4.14689
4.099261
1.011619
try: if (ovs_vdp.is_bridge_present(self.br_ex, self.root_helper) and ovs_vdp.is_bridge_present(self.br_integ, self.root_helper)): return True else: return False except Exception as e: LOG.error("Exception in is_openstack_running %s", str(e)) return False
def is_openstack_running(self)
Currently it just checks for the presence of both the bridges.
3.284983
2.804846
1.171181
cfg_dict = {} if topo_dict.bond_member_ports is not None: cfg_dict.update({'bond_member_ports': topo_dict.bond_member_ports}) if topo_dict.bond_interface is not None: cfg_dict.update({'bond_interface': topo_dict.bond_interface}) return cfg_dict
def _fill_topology_cfg(self, topo_dict)
Fills the extra configurations in the topology.
2.824811
2.750568
1.026992
bond_intf = sys_utils.get_bond_intf(self.phy_uplink) if bond_intf is None: return False self.save_uplink( fail_reason=constants.port_transition_bond_down_reason) self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='down', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) self.phy_uplink = None self.veth_intf = None self.uplink_det_compl = False # No veth interface self.save_uplink( uplink=bond_intf, fail_reason=constants.port_transition_bond_up_reason) self.phy_uplink = bond_intf self.process_uplink_ongoing = True upl_msg = VdpQueMsg(constants.UPLINK_MSG_TYPE, status='up', phy_uplink=self.phy_uplink, br_int=self.br_integ, br_ex=self.br_ex, root_helper=self.root_helper) self.que.enqueue(constants.Q_UPL_PRIO, upl_msg) return True
def uplink_bond_intf_process(self)
Process the case when uplink interface becomes part of a bond. This is called to check if the phy interface became a part of the bond. If the below condition is True, this means, a physical interface that was not a part of a bond was earlier discovered as uplink and now that interface became part of the bond. Usually, this doesn't happen as LLDP and in turn this function will first detect a 'down' followed by an 'up'. When regular interface becomes part of bond, it's rare for it to hit this 'normal' case. But, still providing the functionality if it happens. The following is done : a. Bring down the physical interface by sending a 'down' event b. Add the bond interface by sending an 'up' event Consquently, when bond is added that will be assigned to self.phy_uplink. Then, the below condition will be False. i.e.. 'get_bond_intf' will return False, when the argument is 'bond0'.
3.399033
3.324075
1.02255
if not self.bulk_vm_rcvd_flag: if self.bulk_vm_check_cnt >= 1: self.bulk_vm_check_cnt = 0 self.save_uplink(uplink=self.phy_uplink, veth_intf=self.veth_intf) LOG.info("Doing save_uplink again to request " "Bulk VM's") else: LOG.info("Bulk VM not received, incrementing count") self.bulk_vm_check_cnt += 1
def check_periodic_bulk_vm_notif_rcvd(self)
Bulk VM check handler called from periodic uplink detection. This gets called by the 'normal' stage of uplink detection. The bulk VM event sends all the VM's running in this agent. Sometimes during upgrades, it was found that due to some race condition, the server does not send the Bulk VM event. Whenever, a save_uplink is done by the agent, the server sends the Bulk VM event. If Bulk VM event is not received after few attempts, save_uplink is done to request the Bulk VM list. It's not protected with a mutex, since worst case, Bulk VM event will be sent twice, which is not that bad. When uplink is detected for the first time, it will hit the below else case and there a save_uplink is anyways done.
5.031529
3.317792
1.516529
LOG.info("In static_uplink_detect %(veth)s", {'veth': veth}) if self.static_uplink_first: self.static_uplink_first = False if self.phy_uplink is not None and ( self.phy_uplink != self.static_uplink_port): return 'down' if veth is None: return self.static_uplink_port else: return 'normal'
def static_uplink_detect(self, veth)
Return the static uplink based on argument passed. The very first time, this function is called, it returns the uplink port read from a file. After restart, when this function is called the first time, it returns 'normal' assuming a veth is passed to this function which will be the case if uplink processing is successfully done. If user modified the uplink configuration and restarted, a 'down' will be returned to clear the old uplink.
3.867913
3.110379
1.24355
return client.get((self.resource_path + HOSTING_DEVICE_CONFIG) % hosting_device_id)
def get_hosting_device_config(self, client, hosting_device_id)
Get config of hosting_device.
8.59275
8.822401
0.97397
request_url = self._build_url(['ClientClass', client_class_name]) return self._do_request('GET', request_url)
def get_client_class(self, client_class_name)
Returns a specific client class details from CPNR server.
5.216326
4.257677
1.225158
request_url = self._build_url(['VPN', vpn_name]) return self._do_request('GET', request_url)
def get_vpn(self, vpn_name)
Returns a specific VPN name details from CPNR server.
4.793988
4.120327
1.163497
request_url = self._build_url(['Scope'], vpn=vpnid) return self._do_request('GET', request_url)
def get_scopes(self, vpnid='.*')
Returns a list of all the scopes from CPNR server.
6.627053
5.545443
1.195045
request_url = self._build_url(['Scope', scope_name]) return self._do_request('GET', request_url)
def get_scope(self, scope_name)
Returns a specific scope name details from CPNR server.
5.269098
4.458838
1.18172
request_url = self._build_url(['ClientEntry', client_entry_name]) return self._do_request('GET', request_url)
def get_client_entry(self, client_entry_name)
Returns a specific client entry name details from CPNR server.
4.426147
3.776738
1.17195
query = address + "?action=releaseAddress&vpnId=" + vpnid request_url = self._build_url(['Lease', query]) return self._do_request('DELETE', request_url)
def release_address(self, address, vpnid)
Release a specific lease, called after delete_client_entry
8.344298
7.513527
1.11057
if qname in self._queues: return self._queues[qname].qsize() else: raise ValueError(_("queue %s is not defined"), qname)
def qsize(self, qname)
Return the approximate size of the queue.
3.670556
3.465287
1.059236
LOG.debug("get_nexusport_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id)
def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id)
Lists a nexusport binding.
2.812753
2.826337
0.995194
LOG.debug("get_nexus_switchport_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, switch_ip=switch_ip)
def get_nexus_switchport_binding(port_id, switch_ip)
Lists all bindings for this switch & port.
4.164877
3.87333
1.07527
LOG.debug("get_nexusvlan_binding() called") return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip)
def get_nexusvlan_binding(vlan_id, switch_ip)
Lists a vlan and switch binding.
4.336457
4.059239
1.068293
LOG.debug("get_reserved_bindings() called") if port_id: return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) elif switch_ip: return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id) else: return _lookup_all_nexus_bindings(vlan_id=vlan_id, instance_id=instance_id)
def get_reserved_bindings(vlan_id, instance_id, switch_ip=None, port_id=None)
Lists reserved bindings.
1.839036
1.824124
1.008175
if not port_id: LOG.warning("update_reserved_binding called with no state") return LOG.debug("update_reserved_binding called") session = bc.get_writer_session() if is_switch_binding: # For reserved switch binding binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id) binding.port_id = port_id else: # For reserved port binding binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) binding.is_native = is_native binding.channel_group = ch_grp session.merge(binding) session.flush() return binding
def update_reserved_binding(vlan_id, switch_ip, instance_id, port_id, is_switch_binding=True, is_native=False, ch_grp=0)
Updates reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id :param ch_grp: 0 if no port-channel else non-zero integer
2.336215
2.501079
0.934083
if not port_id: LOG.warning("remove_reserved_binding called with no state") return LOG.debug("remove_reserved_binding called") session = bc.get_writer_session() binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) for bind in binding: session.delete(bind) session.flush() return binding
def remove_reserved_binding(vlan_id, switch_ip, instance_id, port_id)
Removes reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id
3.330626
3.56302
0.934776
# overload port_id to contain switch state add_nexusport_binding( state, const.NO_VLAN_OR_VNI_ID, const.NO_VLAN_OR_VNI_ID, switch_ip, const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1)
def add_reserved_switch_binding(switch_ip, state)
Add a reserved switch binding.
9.52094
9.686724
0.982885
# overload port_id to contain switch state update_reserved_binding( const.NO_VLAN_OR_VNI_ID, switch_ip, const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1, state)
def update_reserved_switch_binding(switch_ip, state)
Update a reserved switch binding.
15.092273
15.575893
0.968951
LOG.debug("add_nexusport_binding() called") session = bc.get_writer_session() binding = nexus_models_v2.NexusPortBinding(port_id=port_id, vlan_id=vlan_id, vni=vni, switch_ip=switch_ip, instance_id=instance_id, is_native=is_native, channel_group=ch_grp) session.add(binding) session.flush() return binding
def add_nexusport_binding(port_id, vlan_id, vni, switch_ip, instance_id, is_native=False, ch_grp=0)
Adds a nexusport binding.
2.167875
2.138576
1.0137
LOG.debug("remove_nexusport_binding() called") session = bc.get_writer_session() binding = _lookup_all_nexus_bindings(session=session, vlan_id=vlan_id, vni=vni, switch_ip=switch_ip, port_id=port_id, instance_id=instance_id) for bind in binding: session.delete(bind) session.flush() return binding
def remove_nexusport_binding(port_id, vlan_id, vni, switch_ip, instance_id)
Removes a nexusport binding.
2.710407
2.745982
0.987045
if not new_vlan_id: LOG.warning("update_nexusport_binding called with no vlan") return LOG.debug("update_nexusport_binding called") session = bc.get_writer_session() binding = _lookup_one_nexus_binding(session=session, port_id=port_id) binding.vlan_id = new_vlan_id session.merge(binding) session.flush() return binding
def update_nexusport_binding(port_id, new_vlan_id)
Updates nexusport binding.
2.911277
2.863865
1.016555
LOG.debug("remove_all_nexusport_bindings() called") session = bc.get_writer_session() session.query(nexus_models_v2.NexusPortBinding).delete() session.flush()
def remove_all_nexusport_bindings()
Removes all nexusport bindings.
4.280177
3.960137
1.080815
LOG.debug("get_nexusvm_bindings() called") return _lookup_all_nexus_bindings(instance_id=instance_id, vlan_id=vlan_id)
def get_nexusvm_bindings(vlan_id, instance_id)
Lists nexusvm bindings.
4.333057
4.067116
1.065388
LOG.debug("get_port_vlan_switch_binding() called") return _lookup_all_nexus_bindings(port_id=port_id, switch_ip=switch_ip, vlan_id=vlan_id)
def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip)
Lists nexusvm bindings.
3.73739
3.071851
1.216657
LOG.debug("get_port_switch_bindings() called, " "port:'%(port_id)s', switch:'%(switch_ip)s'", {'port_id': port_id, 'switch_ip': switch_ip}) try: return _lookup_all_nexus_bindings(port_id=port_id, switch_ip=switch_ip) except c_exc.NexusPortBindingNotFound: pass
def get_port_switch_bindings(port_id, switch_ip)
List all vm/vlan bindings on a Nexus switch port.
2.77066
2.77618
0.998012
if session is None: session = bc.get_reader_session() query_method = getattr(session.query( nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) try: bindings = query_method() if bindings: return bindings except sa_exc.NoResultFound: pass raise c_exc.NexusPortBindingNotFound(**bfilter)
def _lookup_nexus_bindings(query_type, session=None, **bfilter)
Look up 'query_type' Nexus bindings matching the filter. :param query_type: 'all', 'one' or 'first' :param session: db session :param bfilter: filter for bindings query :returns: bindings if query gave a result, else raise NexusPortBindingNotFound.
3.242469
2.909008
1.114631
LOG.debug("add_nexusnve_binding() called") session = bc.get_writer_session() binding = nexus_models_v2.NexusNVEBinding(vni=vni, switch_ip=switch_ip, device_id=device_id, mcast_group=mcast_group) session.add(binding) session.flush() return binding
def add_nexusnve_binding(vni, switch_ip, device_id, mcast_group)
Adds a nexus nve binding.
2.331349
2.30291
1.012349
LOG.debug("remove_nexusnve_binding() called") session = bc.get_writer_session() binding = (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip, device_id=device_id).one()) if binding: session.delete(binding) session.flush() return binding
def remove_nexusnve_binding(vni, switch_ip, device_id)
Remove the nexus nve binding.
2.630441
2.703235
0.973071
LOG.debug("remove_all_nexusport_bindings() called") session = bc.get_writer_session() session.query(nexus_models_v2.NexusNVEBinding).delete() session.flush()
def remove_all_nexusnve_bindings()
Removes all nexusnve bindings.
5.388842
5.00199
1.07734
LOG.debug("get_nve_vni_switch_bindings() called") session = bc.get_reader_session() try: return (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip).all()) except sa_exc.NoResultFound: return None
def get_nve_vni_switch_bindings(vni, switch_ip)
Return the nexus nve binding(s) per switch.
3.00364
2.747939
1.093052
if session is None: session = bc.get_reader_session() query_method = getattr(session.query( nexus_models_v2.NexusHostMapping).filter_by(**bfilter), query_type) try: mappings = query_method() if mappings: return mappings except sa_exc.NoResultFound: pass raise c_exc.NexusHostMappingNotFound(**bfilter)
def _lookup_host_mappings(query_type, session=None, **bfilter)
Look up 'query_type' Nexus mappings matching the filter. :param query_type: 'all', 'one' or 'first' :param session: db session :param bfilter: filter for mappings query :returns: mappings if query gave a result, else raise NexusHostMappingNotFound.
3.344113
2.901472
1.152557
LOG.debug("add_nexusport_binding() called") session = bc.get_writer_session() mapping = nexus_models_v2.NexusHostMapping(host_id=host_id, if_id=interface, switch_ip=nexus_ip, ch_grp=ch_grp, is_static=is_static) try: session.add(mapping) session.flush() except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception() as ctxt: if is_static: ctxt.reraise = False LOG.debug("Duplicate static entry encountered " "host=%(host)s, if=%(if)s, ip=%(ip)s", {'host': host_id, 'if': interface, 'ip': nexus_ip}) return mapping
def add_host_mapping(host_id, nexus_ip, interface, ch_grp, is_static)
Add Host to interface mapping entry into mapping data base. :param host_id: is the name of the host to add :param interface: is the interface for this host :param nexus_ip: is the ip addr of the nexus switch for this interface :param ch_grp: is the port channel this interface belos :param is_static: whether this is from conf file or learned from baremetal.
2.593892
2.767703
0.9372
LOG.debug("update_host_mapping called") session = bc.get_writer_session() mapping = _lookup_one_host_mapping( session=session, host_id=host_id, if_id=interface, switch_ip=nexus_ip) mapping.ch_grp = new_ch_grp session.merge(mapping) session.flush() return mapping
def update_host_mapping(host_id, interface, nexus_ip, new_ch_grp)
Change channel_group in host/interface mapping data base.
3.728775
3.663207
1.017899
LOG.debug("remove_host_mapping() called") session = bc.get_writer_session() try: mapping = _lookup_one_host_mapping( session=session, if_id=interface, switch_ip=nexus_ip) session.delete(mapping) session.flush() except c_exc.NexusHostMappingNotFound: pass
def remove_host_mapping(interface, nexus_ip)
Remove host to interface mapping entry from mapping data base.
5.083319
5.031516
1.010296
LOG.debug("remove_host_mapping() called") session = bc.get_writer_session() try: mapping = _lookup_all_host_mappings( session=session, is_static=True) for host in mapping: session.delete(host) session.flush() except c_exc.NexusHostMappingNotFound: pass
def remove_all_static_host_mappings()
Remove all entries defined in config file from mapping data base.
5.840266
5.601339
1.042655
if session is None: session = bc.get_reader_session() if order: query_method = getattr(session.query( nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by( order), query_type) else: query_method = getattr(session.query( nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type) try: vpcs = query_method() if vpcs: return vpcs except sa_exc.NoResultFound: pass raise c_exc.NexusVPCAllocNotFound(**bfilter)
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter)
Look up 'query_type' Nexus VPC Allocs matching the filter. :param query_type: 'all', 'one' or 'first' :param session: db session :param order: select what field to order data :param bfilter: filter for mappings query :returns: VPCs if query gave a result, else raise NexusVPCAllocNotFound.
2.73715
2.220288
1.232791
if session is None: session = bc.get_reader_session() try: res = session.query( func.count(nexus_models_v2.NexusVPCAlloc.vpc_id), func.min(nexus_models_v2.NexusVPCAlloc.vpc_id), func.max(nexus_models_v2.NexusVPCAlloc.vpc_id), ).filter(nexus_models_v2.NexusVPCAlloc.switch_ip == bfilter['switch_ip']).one() count = res[0] sw_min = res[1] sw_max = res[2] return count, sw_min, sw_max except sa_exc.NoResultFound: pass raise c_exc.NexusVPCAllocNotFound(**bfilter)
def _lookup_vpc_count_min_max(session=None, **bfilter)
Look up count/min/max Nexus VPC Allocs for given switch. :param session: db session :param bfilter: filter for mappings query :returns: number of VPCs and min value if query gave a result, else raise NexusVPCAllocNotFound.
2.903746
2.208971
1.314524
'''Get intersect list of free vpcids in list of switches.''' session = bc.get_reader_session() prev_view = aliased(nexus_models_v2.NexusVPCAlloc) query = session.query(prev_view.vpc_id) prev_swip = switch_ip_list[0] for ip in switch_ip_list[1:]: cur_view = aliased(nexus_models_v2.NexusVPCAlloc) cur_swip = ip query = query.join(cur_view, sa.and_( prev_view.switch_ip == prev_swip, prev_view.active == False, # noqa cur_view.switch_ip == cur_swip, cur_view.active == False, # noqa prev_view.vpc_id == cur_view.vpc_id)) prev_view = cur_view prev_swip = cur_swip unique_vpcids = query.all() shuffle(unique_vpcids) return unique_vpcids
def _get_free_vpcids_on_switches(switch_ip_list)
Get intersect list of free vpcids in list of switches.
3.426439
3.140696
1.090981
LOG.debug("init_vpc_entries() called") if not vpc_list: return session = bc.get_writer_session() for vpc in vpc_list: vpc_alloc = nexus_models_v2.NexusVPCAlloc( switch_ip=nexus_ip, vpc_id=vpc, learned=False, active=False) session.add(vpc_alloc) session.flush()
def init_vpc_entries(nexus_ip, vpc_list)
Initialize switch/vpc entries in vpc alloc data base. param: nexus_ip ip addr of the nexus switch for this interface param: vpc_list list of vpc integers to create
4.662673
3.747136
1.24433
LOG.debug("update_vpc_entry called") session = bc.get_writer_session() with session.begin(): for n_ip in nexus_ips: flipit = not active x = session.execute( sa.update(nexus_models_v2.NexusVPCAlloc).values({ 'learned': learned, 'active': active}).where(sa.and_( nexus_models_v2.NexusVPCAlloc.switch_ip == n_ip, nexus_models_v2.NexusVPCAlloc.vpc_id == vpc_id, nexus_models_v2.NexusVPCAlloc.active == flipit ))) if x.rowcount != 1: raise c_exc.NexusVPCAllocNotFound( switch_ip=n_ip, vpc_id=vpc_id, active=active)
def update_vpc_entry(nexus_ips, vpc_id, learned, active)
Change active state in vpc_allocate data base.
3.057982
2.929809
1.043748
LOG.debug("alloc_vpc() called") vpc_id = 0 intersect = _get_free_vpcids_on_switches(nexus_ips) for intersect_tuple in intersect: try: update_vpc_entry(nexus_ips, intersect_tuple.vpc_id, False, True) vpc_id = intersect_tuple.vpc_id break except Exception: LOG.exception( "This exception is expected if another controller " "beat us to vpcid %(vpcid)s for nexus %(ip)s", {'vpcid': intersect_tuple.vpc_id, 'ip': ', '.join(map(str, nexus_ips))}) return vpc_id
def alloc_vpcid(nexus_ips)
Allocate a vpc id for the given list of switch_ips.
4.790106
4.5711
1.047911
LOG.debug("free_vpcid_for_switch_list() called") if vpc_id != 0: update_vpc_entry(nexus_ips, vpc_id, False, False)
def free_vpcid_for_switch_list(vpc_id, nexus_ips)
Free a vpc id for the given list of switch_ips.
4.460057
4.617698
0.965862
LOG.debug("free_vpcid_for_switch() called") if vpc_id != 0: update_vpc_entry([nexus_ip], vpc_id, False, False)
def free_vpcid_for_switch(vpc_id, nexus_ip)
Free a vpc id for the given switch_ip.
5.853302
5.851211
1.000357
LOG.debug("delete_vpcid_for_switch called") session = bc.get_writer_session() vpc = _lookup_one_vpc_allocs(vpc_id=vpc_id, switch_ip=switch_ip, active=False) session.delete(vpc) session.flush()
def delete_vpcid_for_switch(vpc_id, switch_ip)
Removes unused vpcid for a switch. :param vpc_id: vpc id to remove :param switch_ip: ip address of the switch
5.463362
5.437217
1.004809
exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(HostingDeviceSchedulerController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(CFG_AGENT_HOSTING_DEVICES, controller, parent)) parent = dict(member_name=ciscohostingdevicemanager.DEVICE, collection_name=ciscohostingdevicemanager.DEVICES) controller = resource.Resource( CfgAgentsHandlingHostingDeviceController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(HOSTING_DEVICE_CFG_AGENTS, controller, parent, PATH_PREFIX)) return exts
def get_resources(cls)
Returns Ext Resources.
6.5343
6.121295
1.06747
if root_helper: cmd = shlex.split(root_helper) + cmd cmd = map(str, cmd) log_output and LOG.info("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) return obj, cmd
def create_process(cmd, root_helper=None, addl_env=None, log_output=True)
Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it.
2.259328
2.476437
0.91233
intf_path = '/'.join(('/sys/class/net', intf)) intf_exist = os.path.exists(intf_path) if not intf_exist: LOG.error("Unable to get interface %(intf)s, Interface dir " "%(dir)s does not exist", {'intf': intf, 'dir': intf_path}) return False try: oper_file = '/'.join((intf_path, 'operstate')) with open(oper_file, 'r') as fd: oper_state = fd.read().strip('\n') if oper_state == 'up': return True except Exception as e: LOG.error("Exception in reading %s", str(e)) return False
def is_intf_up(intf)
Function to check if a interface is up.
2.60904
2.617372
0.996817
intf_list = [] base_dir = '/sys/class/net' dir_exist = os.path.exists(base_dir) if not dir_exist: LOG.error("Unable to get interface list :Base dir %s does not " "exist", base_dir) return intf_list dir_cont = os.listdir(base_dir) for subdir in dir_cont: dev_dir = base_dir + '/' + subdir + '/' + 'device' dev_exist = os.path.exists(dev_dir) if dev_exist: oper_state = is_intf_up(subdir) if oper_state is True: intf_list.append(subdir) else: LOG.info("Dev dir %s does not exist, not physical intf", dev_dir) return intf_list
def get_all_run_phy_intf()
Retrieve all physical interfaces that are operationally up.
2.800925
2.760655
1.014587
# Check for vnic_type if vnic_type not in self.supported_sriov_vnic_types: LOG.info('Non SR-IOV vnic_type: %s.', vnic_type) return False if not profile: return False # Check for vendor_info return self._check_for_supported_vendor(profile)
def check_vnic_type_and_vendor_info(self, vnic_type, profile)
Checks if this vnic_type and vendor device info are supported. Returns True if: 1. the port vnic_type is direct or macvtap and 2. the vendor_id and product_id of the port is supported by this MD Useful in determining if this MD should bind the current port.
3.88301
3.906827
0.993904
vendor_info = profile.get('pci_vendor_info') if not vendor_info: return False if vendor_info not in self.supported_pci_devs: return False return True
def _check_for_supported_vendor(self, profile)
Checks if the port belongs to a supported vendor. Returns True for supported_pci_devs.
3.760791
2.903005
1.295482
# Check if SSL certificate checking has been disabled. # If so, warn the user before proceeding. if not CONF.ml2_cisco_ucsm.ucsm_https_verify: LOG.warning(const.SSL_WARNING) # Monkey patch the UCS sdk version of urllib2 to disable # https verify if required. from networking_cisco.ml2_drivers.ucsm import ucs_urllib2 ucsmsdkhandle = importutils.import_module('UcsSdk.UcsHandle') ucsmsdkhandle.urllib2 = ucs_urllib2 ucsmsdk = importutils.import_module('UcsSdk') return ucsmsdk
def _import_ucsmsdk(self)
Imports the Ucsm SDK module. This module is not installed as part of the normal Neutron distributions. It is imported dynamically in this module so that the import can be mocked, allowing unit testing without requiring the installation of UcsSdk.
5.830048
5.709992
1.021026
try: resolved_dest = handle.ConfigResolveDn(service_profile_mo.PnDn) server_list = resolved_dest.OutConfig.GetChild() if not server_list: return "" return server_list[0].Name except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
def _get_server_name(self, handle, service_profile_mo, ucsm_ip)
Get the contents of the 'Name' field associated with UCS Server. When a valid connection hande to UCS Manager is handed in, the Name field associated with a UCS Server is returned.
7.470916
7.196132
1.038185
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms) for ucsm_ip in ucsm_ips: with self.ucsm_connect_disconnect(ucsm_ip) as handle: try: sp_list_temp = handle.ConfigResolveClass('lsServer', None, inHierarchical=False) if sp_list_temp and sp_list_temp.OutConfigs is not None: sp_list = sp_list_temp.OutConfigs.GetChild() or [] for sp in sp_list: if sp.PnDn: server_name = self._get_server_name(handle, sp, ucsm_ip) if (server_name and not sp.OperSrcTemplName): LOG.debug('Server %s info retrieved ' 'from UCSM %s', server_name, ucsm_ip) key = (ucsm_ip, server_name) self.ucsm_sp_dict[key] = str(sp.Dn) self.ucsm_host_dict[server_name] = ucsm_ip except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
def _create_ucsm_host_to_service_profile_mapping(self)
Reads list of Service profiles and finds associated Server.
4.322835
4.182076
1.033658
vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: handle.StartTransaction() vp1 = handle.GetManagedObject( None, self.ucsmsdk.FabricLanCloud.ClassId(), {self.ucsmsdk.FabricLanCloud.DN: const.VLAN_PATH}) if not vp1: LOG.warning('UCS Manager network driver Vlan Profile ' 'path at %s missing', const.VLAN_PATH) return False # Create a vlan profile with the given vlan_id vp2 = handle.AddManagedObject( vp1, self.ucsmsdk.FabricVlan.ClassId(), {self.ucsmsdk.FabricVlan.COMPRESSION_TYPE: const.VLAN_COMPRESSION_TYPE, self.ucsmsdk.FabricVlan.DN: vlan_profile_dest, self.ucsmsdk.FabricVlan.SHARING: const.NONE, self.ucsmsdk.FabricVlan.PUB_NW_NAME: "", self.ucsmsdk.FabricVlan.ID: str(vlan_id), self.ucsmsdk.FabricVlan.MCAST_POLICY_NAME: "", self.ucsmsdk.FabricVlan.NAME: vlan_name, self.ucsmsdk.FabricVlan.DEFAULT_NET: "no"}) handle.CompleteTransaction() if vp2: LOG.debug('UCS Manager network driver Created Vlan ' 'Profile %s at %s', vlan_name, vlan_profile_dest) return True except Exception as e: return self._handle_ucsm_exception(e, 'Vlan Profile', vlan_name, ucsm_ip)
def _create_vlanprofile(self, handle, vlan_id, ucsm_ip)
Creates VLAN profile to be assosiated with the Port Profile.
3.109555
3.146004
0.988414
ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info('UCS Manager network driver does not have UCSM IP ' 'for Host_id %s', str(host_id)) return False with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s', str(vlan_id)) return False if trunk_vlans: for vlan in trunk_vlans: if not self._create_vlanprofile(handle, vlan, ucsm_ip): LOG.error('UCS Manager network driver failed to ' 'create Vlan Profile for vlan %s', vlan) return False qos_policy = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sriov_qos_policy if qos_policy: LOG.debug('UCS Manager Network driver applying QoS Policy ' '%(qos)s to Port Profile %(port_profile)s', {'qos': qos_policy, 'port_profile': profile_name}) # Create Port Profile if not self._create_port_profile(handle, profile_name, vlan_id, vnic_type, ucsm_ip, trunk_vlans, qos_policy): LOG.error('UCS Manager network driver failed to create ' 'Port Profile %s', profile_name) return False return True
def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id, trunk_vlans)
Top level method to create Port Profiles on the UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in the creation of the Port Profile on the UCS Manager.
2.378867
2.42696
0.980184
virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] vlan_name = self.make_vlan_name(vlan_id) try: handle.StartTransaction() obj = handle.GetManagedObject( None, self.ucsmsdk.LsServer.ClassId(), {self.ucsmsdk.LsServer.DN: service_profile}) if not obj: LOG.debug('UCS Manager network driver could not find ' 'Service Profile %s in UCSM %s', service_profile, ucsm_ip) return False for eth_port_path in eth_port_paths: eth = handle.GetManagedObject( obj, self.ucsmsdk.VnicEther.ClassId(), {self.ucsmsdk.VnicEther.DN: eth_port_path}, True) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) eth_if = handle.AddManagedObject(eth, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not eth_if: LOG.debug('UCS Manager network driver could not ' 'update Service Profile %s with vlan %d', service_profile, vlan_id) return False else: LOG.debug('UCS Manager network driver did not find ' 'ethernet port at %s', eth_port_path) handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'Service Profile', vlan_name, ucsm_ip)
def _update_service_profile(self, handle, service_profile, vlan_id, ucsm_ip)
Updates Service Profile on the UCS Manager. Each of the ethernet ports on the Service Profile representing the UCS Server, is updated with the VLAN profile corresponding to the vlan_id passed in.
2.829095
2.819721
1.003324
ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info('UCS Manager network driver does not have UCSM IP ' 'for Host_id %s', str(host_id)) return False service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id)) if service_profile: LOG.debug('UCS Manager network driver Service Profile : %s', service_profile) else: LOG.info('UCS Manager network driver does not support ' 'Host_id %s', host_id) return False with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s', str(vlan_id)) return False # Update Service Profile if not self._update_service_profile(handle, service_profile, vlan_id, ucsm_ip): LOG.error('UCS Manager network driver failed to update ' 'Service Profile %(service_profile)s in UCSM ' '%(ucsm_ip)s', {'service_profile': service_profile, 'ucsm_ip': ucsm_ip}) return False return True
def update_serviceprofile(self, host_id, vlan_id)
Top level method to update Service Profiles on UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in a vlan_id getting programed on a server's ethernet ports and the Fabric Interconnect's network ports.
2.455843
2.521883
0.973813
ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info('UCS Manager network driver does not have UCSM IP ' 'for Host_id %s', str(host_id)) return False vlan_name = self.make_vlan_name(vlan_id) with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s', vlan_id) return False try: LOG.debug('VNIC Template Path: %s', vnic_template_path) vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('VNIC Template Path: %s for physnet %s', vnic_template_full_path, physnet) handle.StartTransaction() mo = handle.GetManagedObject( None, self.ucsmsdk.VnicLanConnTempl.ClassId(), {self.ucsmsdk.VnicLanConnTempl.DN: vnic_template_full_path}, True) if not mo: LOG.error('UCS Manager network driver could ' 'not find VNIC template %s', vnic_template_full_path) return False vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path: %s', vlan_dn) eth_if = handle.AddManagedObject(mo, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_dn, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not eth_if: LOG.error('UCS Manager network driver could ' 'not add VLAN %(vlan_name)s to VNIC ' 'template %(vnic_template_full_path)s', {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) return False handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def update_vnic_template(self, host_id, vlan_id, physnet, vnic_template_path, vnic_template)
Updates VNIC Template with the vlan_id.
2.521529
2.516076
1.002167
vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: handle.StartTransaction() obj = handle.GetManagedObject( None, self.ucsmsdk.FabricVlan.ClassId(), {self.ucsmsdk.FabricVlan.DN: vlan_profile_dest}) if obj: handle.RemoveManagedObject(obj) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip)
Deletes VLAN Profile from UCS Manager.
4.558576
4.392517
1.037805
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + port_profile) handle.StartTransaction() # Find port profile on the UCS Manager p_profile = handle.GetManagedObject( None, self.ucsmsdk.VnicProfile.ClassId(), {self.ucsmsdk.VnicProfile.NAME: port_profile, self.ucsmsdk.VnicProfile.DN: port_profile_dest}) if p_profile: handle.RemoveManagedObject(p_profile) else: LOG.warning('UCS Manager network driver did not find ' 'Port Profile %s to delete.', port_profile) handle.CompleteTransaction()
def _delete_port_profile_from_ucsm(self, handle, port_profile, ucsm_ip)
Deletes Port Profile from UCS Manager.
4.672278
4.488514
1.040941
try: self._delete_port_profile_from_ucsm(handle, port_profile, ucsm_ip) except Exception as e: # Add the Port Profile that we could not delete to the Port Profile # delete table. A periodic task will attempt to delete it. LOG.debug('Received Port Profile delete exception %s', e) self.ucsm_db.add_port_profile_to_delete_table(port_profile, ucsm_ip)
def _delete_port_profile(self, handle, port_profile, ucsm_ip)
Calls method to delete Port Profile from UCS Manager. If exception is raised by UCSM, then the PP is added to a DB table. The delete timer thread, tried to delete all PPs added to this table when it wakes up.
4.321809
3.698939
1.168391
service_profile_list = [] for key, value in six.iteritems(self.ucsm_sp_dict): if (ucsm_ip in key) and value: service_profile_list.append(value) if not service_profile_list: # Nothing to do return try: handle.StartTransaction() for service_profile in service_profile_list: virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] # 1. From the Service Profile config, access the # configuration for its ports. # 2. Check if that Vlan has been configured on each port # 3. If Vlan config found, remove it. obj = handle.GetManagedObject( None, self.ucsmsdk.LsServer.ClassId(), {self.ucsmsdk.LsServer.DN: service_profile}) if obj: # Check if this vlan_id has been configured on the # ports in this Service profile for eth_port_path in eth_port_paths: eth = handle.GetManagedObject( obj, self.ucsmsdk.VnicEther.ClassId(), {self.ucsmsdk.VnicEther.DN: eth_port_path}, True) if eth: vlan_name = self.make_vlan_name(vlan_id) vlan_path = eth_port_path + "/if-" + vlan_name vlan = handle.GetManagedObject(eth, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_path}) if vlan: # Found vlan config. Now remove it. handle.RemoveManagedObject(vlan) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _remove_vlan_from_all_service_profiles(self, handle, vlan_id, ucsm_ip)
Deletes VLAN Profile config from server's ethernet ports.
3.589023
3.527579
1.017418
sp_template_info_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values()) vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) try: handle.StartTransaction() # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = (sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template) obj = handle.GetManagedObject( None, self.ucsmsdk.LsServer.ClassId(), {self.ucsmsdk.LsServer.DN: sp_template_full_path}) if not obj: LOG.error('UCS Manager network driver could not ' 'find Service Profile template %s', sp_template_full_path) continue eth_port_paths = ["%s%s" % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.GetManagedObject( obj, self.ucsmsdk.VnicEther.ClassId(), {self.ucsmsdk.VnicEther.DN: eth_port_path}, True) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) vlan = handle.GetManagedObject(eth, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_path}) if vlan: # Found vlan config. Now remove it. handle.RemoveManagedObject(vlan) else: LOG.debug('UCS Manager network driver did not ' 'find VLAN %s at %s', vlan_name, eth_port_path) else: LOG.debug('UCS Manager network driver did not ' 'find ethernet port at %s', eth_port_path) handle.CompleteTransaction() return True except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip)
Deletes VLAN config from all SP Templates that have it.
2.828569
2.793706
1.012479
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip] vnic_template_info = ucsm.vnic_template_list.values() vlan_name = self.make_vlan_name(vlan_id) if not vnic_template_info: # Nothing to do return try: handle.StartTransaction() for temp_info in vnic_template_info: vnic_template = temp_info.template vnic_template_path = temp_info.path vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('vnic_template_full_path: %s', vnic_template_full_path) mo = handle.GetManagedObject( None, self.ucsmsdk.VnicLanConnTempl.ClassId(), {self.ucsmsdk.VnicLanConnTempl.DN: ( vnic_template_full_path)}, True) if not mo: LOG.error('UCS Manager network driver could ' 'not find VNIC template %s at', vnic_template_full_path) continue vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path; %s', vlan_dn) eth_if = handle.GetManagedObject(mo, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_dn}) if not eth_if: LOG.error('UCS Manager network driver could not ' 'delete VLAN %(vlan_name)s from VNIC ' 'template %(vnic_template_full_path)s', {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) if eth_if: handle.RemoveManagedObject(eth_if) handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def _remove_vlan_from_vnic_templates(self, handle, vlan_id, ucsm_ip)
Removes VLAN from all VNIC templates that have it enabled.
2.75686
2.746744
1.003683
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms) for ucsm_ip in ucsm_ips: with self.ucsm_connect_disconnect(ucsm_ip) as handle: LOG.debug('Deleting config for VLAN %d from UCSM %s', vlan_id, ucsm_ip) if (port_profile): self._delete_port_profile(handle, port_profile, ucsm_ip) ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip] if ucsm.sp_template_list: self._remove_vlan_from_all_sp_templates(handle, vlan_id, ucsm_ip) if ucsm.vnic_template_list: self._remove_vlan_from_vnic_templates(handle, vlan_id, ucsm_ip) if not (ucsm.sp_template_list and ucsm.vnic_template_list): self._remove_vlan_from_all_service_profiles(handle, vlan_id, ucsm_ip) self._delete_vlan_profile(handle, vlan_id, ucsm_ip) if trunk_vlans: for vlan_id in trunk_vlans: self._delete_vlan_profile(handle, vlan_id, ucsm_ip)
def delete_all_config_for_vlan(self, vlan_id, port_profile, trunk_vlans)
Top level method to delete all config for vlan_id.
2.432693
2.416476
1.006711
try: handle.Logout() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmDisconnectFailed(ucsm_ip=ucsm_ip, exc=e)
def ucs_manager_disconnect(self, handle, ucsm_ip)
Disconnects from the UCS Manager. After the disconnect, the handle associated with this connection is no longer valid.
7.732035
7.455419
1.037103
event_q = kwargs.get('event_queue') pri = kwargs.get('priority') if not event_q or not pri: return try: event_type = 'server.failure.recovery' payload = {} timestamp = time.ctime() data = (event_type, payload) event_q.put((pri, timestamp, data)) LOG.debug('Added failure recovery event to the queue.') except Exception as exc: LOG.exception('Error: %(exc)s for event %(event)s', {'exc': str(exc), 'event': event_type}) raise exc
def add_events(self, **kwargs)
Add failure event into the queue.
4.27709
3.781242
1.131134
self.lldp_cfgd = False self.local_intf = protocol_interface self.phy_interface = phy_interface self.remote_evb_cfgd = False self.remote_evb_mode = None self.remote_mgmt_addr = None self.remote_system_desc = None self.remote_system_name = None self.remote_port = None self.remote_chassis_id_mac = None self.remote_port_id_mac = None self.local_evb_cfgd = False self.local_evb_mode = None self.local_mgmt_address = None self.local_system_desc = None self.local_system_name = None self.local_port = None self.local_chassis_id_mac = None self.local_port_id_mac = None self.db_retry_status = False self.topo_send_cnt = 0 self.bond_interface = None self.bond_member_ports = None
def init_params(self, protocol_interface, phy_interface)
Initializing parameters.
2.524104
2.496473
1.011068
if bond_interface != self.bond_interface: self.bond_interface = bond_interface self.bond_member_ports = sys_utils.get_member_ports(bond_interface) return True return False
def cmp_update_bond_intf(self, bond_interface)
Update the bond interface and its members. Update the bond interface, if this interface is a part of bond Return True if there's a change.
3.590972
3.384933
1.06087
if remote_evb_mode != self.remote_evb_mode: self.remote_evb_mode = remote_evb_mode return True return False
def remote_evb_mode_uneq_store(self, remote_evb_mode)
Saves the EVB mode, if it is not the same as stored.
1.943822
1.844731
1.053715
if remote_evb_cfgd != self.remote_evb_cfgd: self.remote_evb_cfgd = remote_evb_cfgd return True return False
def remote_evb_cfgd_uneq_store(self, remote_evb_cfgd)
This saves the EVB cfg, if it is not the same as stored.
1.799477
1.72814
1.04128
if remote_mgmt_addr != self.remote_mgmt_addr: self.remote_mgmt_addr = remote_mgmt_addr return True return False
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr)
This function saves the MGMT address, if different from stored.
2.019674
1.844388
1.095038
if remote_system_desc != self.remote_system_desc: self.remote_system_desc = remote_system_desc return True return False
def remote_sys_desc_uneq_store(self, remote_system_desc)
This function saves the system desc, if different from stored.
2.16249
1.93225
1.119157
if remote_system_name != self.remote_system_name: self.remote_system_name = remote_system_name return True return False
def remote_sys_name_uneq_store(self, remote_system_name)
This function saves the system name, if different from stored.
2.175895
1.9327
1.125832