code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
resource_path = '/dev_mgr/hosting_devices/%s' return client.get((resource_path + HOSTING_DEVICE_CFG_AGENTS) % hosting_device_id, params=_params)
def list_config_agents_handling_hosting_device( self, client, hosting_device_id, **_params)
Fetches a list of config agents handling a hosting device.
11.233226
9.772735
1.149445
for ident in tunnel_range: if not self._is_valid_nexus_vni(ident): raise exc.NetworkTunnelRangeError( tunnel_range=tunnel_range, error=_("%(id)s is not a valid Nexus VNI value.") % {'id': ident}) if tunnel_range[1] < tunnel_range[0]: raise exc.NetworkTunnelRangeError( tunnel_range=tunnel_range, error=_("End of tunnel range is less than start of " "tunnel range."))
def _parse_nexus_vni_range(self, tunnel_range)
Raise an exception for invalid tunnel range or malformed range.
2.56186
2.478465
1.033648
# determine current configured allocatable vnis vxlan_vnis = set() for tun_min, tun_max in self.tunnel_ranges: vxlan_vnis |= set(six.moves.range(tun_min, tun_max + 1)) session = bc.get_writer_session() with session.begin(subtransactions=True): # remove from table unallocated tunnels not currently allocatable # fetch results as list via all() because we'll be iterating # through them twice allocs = (session.query(nexus_models_v2.NexusVxlanAllocation). with_lockmode("update").all()) # collect all vnis present in db existing_vnis = set(alloc.vxlan_vni for alloc in allocs) # collect those vnis that needs to be deleted from db vnis_to_remove = [alloc.vxlan_vni for alloc in allocs if (alloc.vxlan_vni not in vxlan_vnis and not alloc.allocated)] # Immediately delete vnis in chunks. This leaves no work for # flush at the end of transaction bulk_size = 100 chunked_vnis = (vnis_to_remove[i:i + bulk_size] for i in range(0, len(vnis_to_remove), bulk_size)) for vni_list in chunked_vnis: session.query(nexus_models_v2.NexusVxlanAllocation).filter( nexus_models_v2.NexusVxlanAllocation. vxlan_vni.in_(vni_list)).delete( synchronize_session=False) # collect vnis that need to be added vnis = list(vxlan_vnis - existing_vnis) chunked_vnis = (vnis[i:i + bulk_size] for i in range(0, len(vnis), bulk_size)) for vni_list in chunked_vnis: bulk = [{'vxlan_vni': vni, 'allocated': False} for vni in vni_list] session.execute(nexus_models_v2.NexusVxlanAllocation. __table__.insert(), bulk)
def sync_allocations(self)
Synchronize vxlan_allocations table with configured tunnel ranges.
3.052741
2.808191
1.087085
self.notify_listener = rpc.DfaNotifcationListener( topic_name, url, rpc.DfaNotificationEndpoints(self))
def _setup_notification_listener(self, topic_name, url)
Setup notification listener for a service.
13.549211
15.944277
0.849785
try: data = (event_type, payload) LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, ' 'payload: %(payload)s\n', ( {'event': event_type, 'payload': payload})) if 'create' in event_type: pri = self._create_pri elif 'delete' in event_type: pri = self._delete_pri elif 'update' in event_type: pri = self._update_pri else: pri = self._delete_pri self._pq.put((pri, timestamp, data)) except Exception as exc: LOG.exception('Error: %(err)s for event %(event)s', {'err': str(exc), 'event': event_type})
def callback(self, timestamp, event_type, payload)
Callback method for processing events in notification queue. :param timestamp: time the message is received. :param event_type: event type in the notification queue such as identity.project.created, identity.project.deleted. :param payload: Contains information of an event
3.227012
3.242894
0.995103
if not self._notify_queue: LOG.error('event_handler: no notification queue for %s', self._service_name) return LOG.debug('calling event handler for %s', self) self.start() self.wait()
def event_handler(self)
Wait on queue for listening to the events.
6.487397
5.398397
1.201726
try: resource_id = resource['id'] hosting_device = resource['hosting_device'] hd_id = hosting_device['id'] if hd_id in self._hosting_device_routing_drivers_binding: driver = self._hosting_device_routing_drivers_binding[hd_id] self._drivers[resource_id] = driver else: driver_class = resource['router_type']['cfg_agent_driver'] # save a copy of the obfuscated credentials obfusc_creds = dict(hosting_device.get('credentials')) if obfusc_creds: # get un-obfuscated password real_pw = self._cfg_agent.get_hosting_device_password( obfusc_creds.get('credentials_id')) hosting_device['credentials']['password'] = real_pw driver = importutils.import_object(driver_class, **hosting_device) self._hosting_device_routing_drivers_binding[hd_id] = driver if obfusc_creds: hosting_device['credentials'] = obfusc_creds self._drivers[resource_id] = driver return driver except ImportError: with excutils.save_and_reraise_exception(reraise=False): LOG.exception("Error loading cfg agent driver %(driver)s " "for hosting device template %(t_name)s" "(%(t_id)s)", {'driver': driver_class, 't_id': hd_id, 't_name': resource['name']}) raise cfg_exceptions.DriverNotExist(driver=driver_class) except KeyError as e: with excutils.save_and_reraise_exception(reraise=False): raise cfg_exceptions.DriverNotSetForMissingParameter(e)
def set_driver(self, resource)
Set the driver for a neutron resource. :param resource: Neutron resource in dict format. Expected keys:: { 'id': <value>, 'hosting_device': { 'id': <value>, }, 'router_type': {'cfg_agent_driver': <value>, } } :returns: driver object
3.147786
2.844426
1.106651
# Check if SSL certificate checking has been disabled. # If so, warn the user before proceeding. if not CONF.ml2_cisco_ucsm.ucsm_https_verify: LOG.warning(const.SSL_WARNING) # Monkey patch the ucsmsdk version of ssl to enable https_verify if # required from networking_cisco.ml2_drivers.ucsm import ucs_ssl ucs_driver = importutils.import_module('ucsmsdk.ucsdriver') ucs_driver.ssl = ucs_ssl class ucsmsdk(object): handle = importutils.import_class( 'ucsmsdk.ucshandle.UcsHandle') fabricVlan = importutils.import_class( 'ucsmsdk.mometa.fabric.FabricVlan.FabricVlan') vnicProfile = importutils.import_class( 'ucsmsdk.mometa.vnic.VnicProfile.VnicProfile') vnicEtherIf = importutils.import_class( 'ucsmsdk.mometa.vnic.VnicEtherIf.VnicEtherIf') vmVnicProfCl = importutils.import_class( 'ucsmsdk.mometa.vm.VmVnicProfCl.VmVnicProfCl') return ucsmsdk
def _import_ucsmsdk(self)
Imports the ucsmsdk module. This module is not installed as part of the normal Neutron distributions. It is imported dynamically in this module so that the import can be mocked, allowing unit testing without requiring the installation of ucsmsdk.
3.496112
3.437041
1.017187
if not self.ucsmsdk: self.ucsmsdk = self._import_ucsmsdk() ucsm = CONF.ml2_cisco_ucsm.ucsms.get(ucsm_ip) if not ucsm or not ucsm.ucsm_username or not ucsm.ucsm_password: LOG.error('UCS Manager network driver failed to get login ' 'credentials for UCSM %s', ucsm_ip) return None handle = self.ucsmsdk.handle(ucsm_ip, ucsm.ucsm_username, ucsm.ucsm_password) try: handle.login() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e) return handle
def ucs_manager_connect(self, ucsm_ip)
Connects to a UCS Manager.
3.490328
3.484288
1.001733
# Get list of UCSMs without host list given in the config ucsm_ips = [ip for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items() if not ucsm.ucsm_host_list] for ucsm_ip in ucsm_ips: with self.ucsm_connect_disconnect(ucsm_ip) as handle: try: sp_list = handle.query_classid('lsServer') if sp_list is not None: for sp in sp_list: if sp.pn_dn: server_name = handle.query_dn(sp.pn_dn).name if (server_name and not sp.oper_src_templ_name): LOG.debug('Server %s info retrieved ' 'from UCSM %s', server_name, ucsm_ip) key = (ucsm_ip, server_name) self.ucsm_sp_dict[key] = str(sp.dn) self.ucsm_host_dict[server_name] = ucsm_ip except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
def _create_ucsm_host_to_service_profile_mapping(self)
Reads list of Service profiles and finds associated Server.
4.082227
3.995788
1.021633
vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: vp1 = handle.query_dn(const.VLAN_PATH) if not vp1: LOG.warning('UCS Manager network driver Vlan Profile ' 'path at %s missing', const.VLAN_PATH) return False # Create a vlan profile with the given vlan_id vp2 = self.ucsmsdk.fabricVlan( parent_mo_or_dn=vp1, name=vlan_name, compression_type=const.VLAN_COMPRESSION_TYPE, sharing=const.NONE, pub_nw_name="", id=str(vlan_id), mcast_policy_name="", default_net="no") handle.add_mo(vp2) handle.commit() if vp2: LOG.debug('UCS Manager network driver Created Vlan ' 'Profile %s at %s', vlan_name, vlan_profile_dest) return True except Exception as e: return self._handle_ucsm_exception(e, 'Vlan Profile', vlan_name, ucsm_ip)
def _create_vlanprofile(self, handle, vlan_id, ucsm_ip)
Creates VLAN profile to able associated with the Port Profile.
3.78984
3.831928
0.989016
virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] vlan_name = self.make_vlan_name(vlan_id) try: obj = handle.query_dn(service_profile) if not obj: LOG.debug('UCS Manager network driver could not find ' 'Service Profile %s in UCSM %s', service_profile, ucsm_ip) return False for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: eth_if = self.ucsmsdk.vnicEtherIf( parent_mo_or_dn=eth, name=vlan_name, default_net="no") handle.add_mo(eth_if) if not eth_if: LOG.debug('UCS Manager network driver could not ' 'update Service Profile %s with vlan %d', service_profile, vlan_id) return False else: LOG.debug('UCS Manager network driver did not find ' 'ethernet port at %s', eth_port_path) handle.commit() return True except Exception as e: return self._handle_ucsm_exception(e, 'Service Profile', vlan_name, ucsm_ip)
def _update_service_profile(self, handle, service_profile, vlan_id, ucsm_ip)
Updates Service Profile on the UCS Manager. Each of the ethernet ports on the Service Profile representing the UCS Server, is updated with the VLAN profile corresponding to the vlan_id passed in.
3.245501
3.224639
1.00647
ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info('UCS Manager network driver does not have UCSM IP ' 'for Host_id %s', str(host_id)) return False vlan_name = self.make_vlan_name(vlan_id) with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s', vlan_id) return False try: LOG.debug('VNIC Template Path: %s', vnic_template_path) vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('VNIC Template Path: %s for physnet %s', vnic_template_full_path, physnet) mo = handle.query_dn(vnic_template_full_path) if not mo: LOG.error('UCS Manager network driver could ' 'not find VNIC template %s', vnic_template_full_path) return False vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path: %s', vlan_dn) eth_if = self.ucsmsdk.vnicEtherIf( parent_mo_or_dn=mo, name=vlan_name, default_net="no") handle.add_mo(eth_if) if not eth_if: LOG.error('UCS Manager network driver could ' 'not add VLAN %(vlan_name)s to VNIC ' 'template %(vnic_template_full_path)s', {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) return False handle.commit() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def update_vnic_template(self, host_id, vlan_id, physnet, vnic_template_path, vnic_template)
Updates VNIC Template with the vlan_id.
2.586516
2.583846
1.001034
vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: obj = handle.query_dn(vlan_profile_dest) if obj: handle.remove_mo(obj) handle.commit() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip)
Deletes VLAN Profile from UCS Manager.
4.227641
4.142558
1.020539
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + port_profile) # Find port profile on the UCS Manager p_profile = handle.query_dn(port_profile_dest) if p_profile: handle.remove_mo(p_profile) else: LOG.warning('UCS Manager network driver did not find ' 'Port Profile %s to delete.', port_profile) handle.commit()
def _delete_port_profile_from_ucsm(self, handle, port_profile, ucsm_ip)
Deletes Port Profile from UCS Manager.
5.357034
5.128131
1.044637
service_profile_list = [] for key, value in six.iteritems(self.ucsm_sp_dict): if (ucsm_ip in key) and value: service_profile_list.append(value) if not service_profile_list: # Nothing to do return try: for service_profile in service_profile_list: virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] # 1. From the Service Profile config, access the # configuration for its ports. # 2. Check if that Vlan has been configured on each port # 3. If Vlan config found, remove it. obj = handle.query_dn(service_profile) if obj: # Check if this vlan_id has been configured on the # ports in this Service profile for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_name = self.make_vlan_name(vlan_id) vlan_path = eth_port_path + "/if-" + vlan_name vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) handle.commit() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _remove_vlan_from_all_service_profiles(self, handle, vlan_id, ucsm_ip)
Deletes VLAN Profile config from server's ethernet ports.
3.865917
3.79031
1.019947
sp_template_info_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values()) vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) try: # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = (sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template) obj = handle.query_dn(sp_template_full_path) if not obj: LOG.error('UCS Manager network driver could not ' 'find Service Profile template %s', sp_template_full_path) continue eth_port_paths = ["%s%s" % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) else: LOG.debug('UCS Manager network driver did not ' 'find VLAN %s at %s', vlan_name, eth_port_path) else: LOG.debug('UCS Manager network driver did not ' 'find ethernet port at %s', eth_port_path) handle.commit() return True except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip)
Deletes VLAN config from all SP Templates that have it.
2.881916
2.850493
1.011023
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip] vnic_template_info = ucsm.vnic_template_list.values() vlan_name = self.make_vlan_name(vlan_id) if not vnic_template_info: # Nothing to do return try: for temp_info in vnic_template_info: vnic_template = temp_info.template vnic_template_path = temp_info.path vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('vnic_template_full_path: %s', vnic_template_full_path) mo = handle.query_dn(vnic_template_full_path) if not mo: LOG.error('UCS Manager network driver could ' 'not find VNIC template %s at', vnic_template_full_path) continue vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path; %s', vlan_dn) eth_if = handle.query_dn(vlan_dn) if not eth_if: LOG.error('UCS Manager network driver could not ' 'delete VLAN %(vlan_name)s from VNIC ' 'template %(vnic_template_full_path)s', {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) if eth_if: handle.remove_mo(eth_if) handle.commit() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def _remove_vlan_from_vnic_templates(self, handle, vlan_id, ucsm_ip)
Removes VLAN from all VNIC templates that have it enabled.
2.661371
2.685705
0.990939
required = set(['nova-conductor', 'nova-cert', 'nova-scheduler', 'nova-compute']) try: services = self._nclient.services.list() # There are several individual Nova client exceptions but they have # no other common base than Exception, hence the long list. except Exception as e: LOG.error('Failure determining running Nova services: %s', e) return False return not bool(required.difference( [service.binary for service in services if service.status == 'enabled' and service.state == 'up']))
def nova_services_up(self)
Checks if required Nova services are up and running. returns: True if all needed Nova services are up, False otherwise
6.594686
6.609022
0.997831
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]: context, plugin = plugin, context # TODO(gongysh) consider the disabled agent's router no_agent_binding = ~sql.exists().where( bc.Router.id == bc.rb_model.RouterL3AgentBinding.router_id) # Modified to only include routers of network namespace type ns_routertype_id = plugin.get_namespace_router_type_id(context) query = context.session.query(bc.Router.id) query = query.join(l3_models.RouterHostingDeviceBinding) query = query.filter( l3_models.RouterHostingDeviceBinding.router_type_id == ns_routertype_id, no_agent_binding) unscheduled_router_ids = [router_id_[0] for router_id_ in query] if unscheduled_router_ids: return plugin.get_routers( context, filters={'id': unscheduled_router_ids}) return []
def _get_unscheduled_routers(self, plugin, context)
Get routers with no agent binding.
4.749152
4.557323
1.042093
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]: context, plugin = plugin, context unscheduled_routers = [] for router in routers: if (router[routertype.TYPE_ATTR] != plugin.get_namespace_router_type_id(context)): # ignore non-namespace routers continue l3_agents = plugin.get_l3_agents_hosting_routers( context, [router['id']]) if l3_agents: LOG.debug('Router %(router_id)s has already been ' 'hosted by L3 agent %(agent_id)s', {'router_id': router['id'], 'agent_id': l3_agents[0]['id']}) else: unscheduled_routers.append(router) return unscheduled_routers
def _filter_unscheduled_routers(self, plugin, context, routers)
Filter from list of routers the ones that are not scheduled. Only for release < pike.
2.865072
2.890664
0.991147
underscheduled_routers = [] max_agents_for_ha = plugin.get_number_of_agents_for_scheduling(context) for router, count in plugin.get_routers_l3_agents_count(context): if (router[routertype.TYPE_ATTR] != plugin.get_namespace_router_type_id(context)): # ignore non-namespace routers continue if (count < 1 or router.get('ha', False) and count < max_agents_for_ha): # Either the router was un-scheduled (scheduled to 0 agents), # or it's an HA router and it was under-scheduled (scheduled to # less than max_agents_for_ha). Either way, it should be added # to the list of routers we want to handle. underscheduled_routers.append(router) return underscheduled_routers
def _get_underscheduled_routers(self, plugin, context)
For release >= pike.
4.153149
4.189255
0.991381
query = context.session.query(bc.Agent) query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG, host=agent_host, admin_state_up=True) try: cfg_agent_db = query.one() except (exc.MultipleResultsFound, exc.NoResultFound): LOG.debug('No enabled Cisco cfg agent on host %s', agent_host) return if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down( cfg_agent_db.heartbeat_timestamp): LOG.warning('Cisco cfg agent %s is not alive', cfg_agent_db.id) return cfg_agent_db
def auto_schedule_hosting_devices(self, plugin, context, agent_host)
Schedules unassociated hosting devices to Cisco cfg agent. Schedules hosting devices to agent running on <agent_host>.
3.927946
3.680235
1.067308
active_cfg_agents = plugin.get_cfg_agents(context, active=True) if not active_cfg_agents: LOG.warning('There are no active Cisco cfg agents') # No worries, once a Cisco cfg agent is started and # announces itself any "dangling" hosting devices # will be scheduled to it. return LOG.debug('Randomly selecting a Cisco cfg agent among %d candidates' % len(active_cfg_agents)) return random.choice(active_cfg_agents)
def schedule_hosting_device(self, plugin, context, hosting_device)
Selects Cisco cfg agent that will configure <hosting_device>.
5.78946
4.988242
1.160621
if port_db.device_owner == DEVICE_OWNER_ROUTER_GW: network = self._core_plugin.get_network(context, port_db.network_id) else: router = self.l3_plugin.get_router(context, port_db.device_id) ext_gw_info = router.get(EXTERNAL_GW_INFO) if not ext_gw_info: return {}, None network = self._core_plugin.get_network(context, ext_gw_info['network_id']) # network names in GBP workflow need to be reduced, since # the network may contain UUIDs external_network = self.get_ext_net_name(network['name']) # TODO(tbachman): see if we can get rid of the default transit_net = self.transit_nets_cfg.get( external_network) or self._default_ext_dict transit_net['network_name'] = external_network return transit_net, network
def _get_external_network_dict(self, context, port_db)
Get external network information Get the information about the external network, so that it can be used to create the hidden port, subnet, and network.
3.746837
3.814406
0.982286
if not self._apic_driver: try: self._apic_driver = (bc.get_plugin( 'GROUP_POLICY').policy_driver_manager. policy_drivers['apic'].obj) self._get_ext_net_name = self._get_ext_net_name_gbp self._get_vrf_context = self._get_vrf_context_gbp except AttributeError: LOG.info("GBP service plugin not present -- will " "try APIC ML2 plugin.") if not self._apic_driver: try: self._apic_driver = ( self._core_plugin.mechanism_manager.mech_drivers[ 'cisco_apic_ml2'].obj) self._get_ext_net_name = self._get_ext_net_name_neutron self._get_vrf_context = self._get_vrf_context_neutron except KeyError: LOG.error("APIC ML2 plugin not present: " "no APIC ML2 driver could be found.") raise AciDriverNoAciDriverInstalledOrConfigured() return self._apic_driver
def apic_driver(self)
Get APIC driver There are different drivers for the GBP workflow and Neutron workflow for APIC. First see if the GBP workflow is active, and if so get the APIC driver for it. If the GBP service isn't installed, try to get the driver from the Neutron (APIC ML2) workflow.
3.941596
3.46863
1.136355
if subnet['network_id'] == net['id']: return True network = self._core_plugin.get_network( context.elevated(), subnet['network_id']) ext_net_name = network['name'] if (APIC_SNAT_NET + '-') in ext_net_name: # This is APIC ML2 mode -- we need to strip the prefix ext_net_name = ext_net_name[len(APIC_SNAT_NET + '-'):] if net['id'] == ext_net_name: return True return False
def _snat_subnet_for_ext_net(self, context, subnet, net)
Determine if an SNAT subnet is for this external network. This method determines if a given SNAT subnet is intended for the passed external network. For APIC ML2/Neutron workflow, SNAT subnets are created on a separate network from the external network. The association with an external network is made by putting the name of the external network in the name of the SNAT network name, using a well-known prefix.
3.486148
3.134507
1.112184
if hosting_info.get('segmentation_id') is None: LOG.debug('No segmentation ID in hosting_info -- assigning') hosting_info['segmentation_id'] = ( port_db.hosting_info.get('segmentation_id')) is_external = (port_db.device_owner == DEVICE_OWNER_ROUTER_GW) hosting_info['physical_interface'] = self._get_interface_info( hosting_device['id'], port_db.network_id, is_external) ext_dict, net = self._get_external_network_dict(context, port_db) if is_external and ext_dict: hosting_info['network_name'] = ext_dict['network_name'] hosting_info['cidr_exposed'] = ext_dict['cidr_exposed'] hosting_info['gateway_ip'] = ext_dict['gateway_ip'] details = self.get_vrf_context(context, port_db['device_id'], port_db) router_id = port_db.device_id router = self.l3_plugin.get_router(context, router_id) # skip routers not created by the user -- they will have # empty-string tenant IDs if router.get(ROUTER_ROLE_ATTR): return hosting_info['vrf_id'] = details['vrf_id'] if ext_dict.get('global_config'): hosting_info['global_config'] = ( ext_dict['global_config']) self._add_snat_info(context, router, net, hosting_info) else: if ext_dict.get('interface_config'): hosting_info['interface_config'] = ext_dict['interface_config']
def extend_hosting_port_info(self, context, port_db, hosting_device, hosting_info)
Get the segmenetation ID and interface This extends the hosting info attribute with the segmentation ID and physical interface used on the external router to connect to the ACI fabric. The segmentation ID should have been set already by the call to allocate_hosting_port, but if it's not present, use the value from the port resource.
3.009115
2.93402
1.025594
# If this is a router interface, the VLAN comes from APIC. # If it's the gateway, the VLAN comes from the segment ID if port_db.get('device_owner') == DEVICE_OWNER_ROUTER_GW: ext_dict, net = self._get_external_network_dict(context, port_db) # If an OpFlex network is used on the external network, # the actual segment ID comes from the config file if net and net.get('provider:network_type') == 'opflex': if ext_dict.get('segmentation_id'): return {'allocated_port_id': port_db.id, 'allocated_vlan': ext_dict['segmentation_id']} else: raise AciDriverConfigMissingSegmentationId(ext_net=net) return super(AciVLANTrunkingPlugDriver, self).allocate_hosting_port( context, router_id, port_db, network_type, hosting_device_id) # shouldn't happen, but just in case if port_db.get('device_owner') != DEVICE_OWNER_ROUTER_INTF: return # get the external network that this port connects to. # if there isn't an external gateway yet on the router, # then don't allocate a port router = self.l3_plugin.get_router(context, router_id) gw_info = router[EXTERNAL_GW_INFO] if not gw_info: return network_id = gw_info.get('network_id') networks = self._core_plugin.get_networks( context.elevated(), {'id': [network_id]}) l3out_network = networks[0] l3out_name = self.get_ext_net_name(l3out_network['name']) # For VLAN apic driver provides VLAN tag details = self.get_vrf_context(context, router_id, port_db) if details is None: LOG.debug('aci_vlan_trunking_driver: No vrf_details') return vrf_name = details.get('vrf_name') vrf_tenant = details.get('vrf_tenant') allocated_vlan = self.apic_driver.l3out_vlan_alloc.get_vlan_allocated( l3out_name, vrf_name, vrf_tenant=vrf_tenant) if allocated_vlan is None: if not vrf_tenant: # TODO(tbachman): I can't remember why this is here return super(AciVLANTrunkingPlugDriver, self).allocate_hosting_port( context, router_id, port_db, network_type, hosting_device_id ) # Database must have been messed up if this happens ... return return {'allocated_port_id': port_db.id, 'allocated_vlan': allocated_vlan}
def allocate_hosting_port(self, context, router_id, port_db, network_type, hosting_device_id)
Get the VLAN and port for this hosting device The VLAN used between the APIC and the external router is stored by the APIC driver. This calls into the APIC driver to first get the ACI VRF information associated with this port, then uses that to look up the VLAN to use for this port to the external router (kept as part of the L3 Out policy in ACI).
3.468568
3.395487
1.021523
prefix = network_name[:re.search(UUID_REGEX, network_name).start() - 1] return prefix.strip(APIC_OWNED)
def _get_ext_net_name_gbp(self, network_name)
Get the external network name The name of the external network used in the APIC configuration file can be different from the name of the external network in Neutron, especially using the GBP workflow
12.348358
13.480124
0.916042
addrs = addr.split(':') if len(addrs) != 6: return False for m in addrs: try: if int(m, 16) > 255: return False except ValueError: return False return True
def is_valid_mac(addr)
Check the syntax of a given mac address. The acceptable format is xx:xx:xx:xx:xx:xx
2.115443
2.135142
0.990774
try: int_mask = (0xFFFFFFFF << (32 - int(mask))) & 0xFFFFFFFF gw_addr_int = struct.unpack('>L', socket.inet_aton(gw))[0] & int_mask return (socket.inet_ntoa(struct.pack("!I", gw_addr_int)) + '/' + str(mask)) except (socket.error, struct.error, ValueError, TypeError): return
def make_cidr(gw, mask)
Create network address in CIDR format. Return network address for a given gateway address and netmask.
2.548349
2.62084
0.972341
host_id = this_host try: for root, dirs, files in os.walk('/run/resource-agents'): for fi in files: if 'neutron-scale-' in fi: host_id = 'neutron-n-' + fi.split('-')[2] break return host_id except IndexError: return host_id
def find_agent_host_id(this_host)
Returns the neutron agent host id for RHEL-OSP6 HA setup.
5.411738
4.910706
1.102029
credentials = {} for switch_ip, attrs in nexus_switches.items(): credentials[switch_ip] = ( attrs[const.USERNAME], attrs[const.PASSWORD], attrs[const.HTTPS_VERIFY], attrs[const.HTTPS_CERT], None) if not attrs[const.HTTPS_VERIFY]: LOG.warning("HTTPS Certificate verification is " "disabled. Your connection to Nexus " "Switch %(ip)s is insecure.", {'ip': switch_ip}) return credentials
def _build_credentials(self, nexus_switches)
Build credential table for Rest API Client. :param nexus_switches: switch config :returns credentials: switch credentials list
3.714598
4.108396
0.904148
curr_timeout = time.time() - start_time if which in self.time_stats: self.time_stats[which]["total_time"] += curr_timeout self.time_stats[which]["total_count"] += 1 if (curr_timeout < self.time_stats[which]["min"]): self.time_stats[which]["min"] = curr_timeout if (curr_timeout > self.time_stats[which]["max"]): self.time_stats[which]["max"] = curr_timeout else: self.time_stats[which] = { "total_time": curr_timeout, "total_count": 1, "min": curr_timeout, "max": curr_timeout} LOG.debug("NEXUS_TIME_STATS %(switch)s, pid %(pid)d, tid %(tid)d: " "%(which)s_timeout %(curr)f count %(count)d " "average %(ave)f other %(other)d min %(min)f max %(max)f", {'switch': switch, 'pid': os.getpid(), 'tid': threading.current_thread().ident, 'which': which, 'curr': curr_timeout, 'count': self.time_stats[which]["total_count"], 'ave': (self.time_stats[which]["total_time"] / self.time_stats[which]["total_count"]), 'other': other, 'min': self.time_stats[which]["min"], 'max': self.time_stats[which]["max"]})
def capture_and_print_timeshot(self, start_time, which, other=99, switch="x.x.x.x")
Determine delta, keep track, and print results.
1.788083
1.801104
0.992771
if intf_type == "ethernet": path_interface = "phys-[eth" + interface + "]" else: path_interface = "aggr-[po" + interface + "]" action = snipp.PATH_IF % path_interface starttime = time.time() response = self.client.rest_get(action, nexus_host) self.capture_and_print_timeshot(starttime, "getif", switch=nexus_host) LOG.debug("GET call returned interface %(if_type)s %(interface)s " "config", {'if_type': intf_type, 'interface': interface}) return response
def get_interface_switch(self, nexus_host, intf_type, interface)
Get the interface data from host. :param nexus_host: IP address of Nexus switch :param intf_type: String which specifies interface type. example: ethernet :param interface: String indicating which interface. example: 1/19 :returns response: Returns interface data
6.27968
6.225435
1.008714
result = self.get_interface_switch(nexus_host, intf_type, interface) if_type = 'l1PhysIf' if intf_type == "ethernet" else 'pcAggrIf' if_info = result['imdata'][0][if_type] try: mode_cfg = if_info['attributes']['mode'] except Exception: mode_cfg = None mode_found = (mode_cfg == "trunk") try: vlan_list = if_info['attributes']['trunkVlans'] except Exception: vlan_list = None vlan_configured = (vlan_list != const.UNCONFIGURED_VLAN) return mode_found, vlan_configured
def _get_interface_switch_trunk_present( self, nexus_host, intf_type, interface)
Check if 'switchport trunk' configs present. :param nexus_host: IP address of Nexus switch :param intf_type: String which specifies interface type. example: ethernet :param interface: String indicating which interface. example: 1/19 :returns mode_found: True if 'trunk mode' present :returns vlan_configured: True if trunk allowed vlan list present
3.54263
3.027247
1.170248
if if_type != "ethernet": LOG.error("Unexpected interface type %(iftype)s when " "adding change group", {'iftype': if_type}) return starttime = time.time() path_snip = snipp.PATH_ALL path_interface = "phys-[eth" + port + "]" body_snip = snipp.BODY_ADD_CH_GRP % (ch_grp, ch_grp, path_interface) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "add_ch_group", switch=nexus_host)
def add_ch_grp_to_interface( self, nexus_host, if_type, port, ch_grp)
Applies channel-group n to ethernet interface.
5.427577
5.523718
0.982595
cli_cmds = self._get_user_port_channel_config(nexus_host, vpc_nbr) if cli_cmds: self._send_cli_conf_string(nexus_host, cli_cmds) else: vpc_str = str(vpc_nbr) path_snip = snipp.PATH_ALL body_snip = snipp.BODY_ADD_PORT_CH_P2 % (vpc_str, vpc_str) self.send_edit_string(nexus_host, path_snip, body_snip)
def _apply_user_port_channel_config(self, nexus_host, vpc_nbr)
Adds STP and no lacp suspend config to port channel.
4.721729
4.742059
0.995713
starttime = time.time() vpc_str = str(vpc_nbr) path_snip = snipp.PATH_ALL body_snip = snipp.BODY_ADD_PORT_CH % (vpc_str, vpc_str, vpc_str) self.send_edit_string(nexus_host, path_snip, body_snip) self._apply_user_port_channel_config(nexus_host, vpc_nbr) self.capture_and_print_timeshot( starttime, "create_port_channel", switch=nexus_host)
def create_port_channel(self, nexus_host, vpc_nbr)
Creates port channel n on Nexus switch.
5.510673
5.347096
1.030592
starttime = time.time() path_snip = snipp.PATH_ALL body_snip = snipp.BODY_DEL_PORT_CH % (vpc_nbr) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "delete_port_channel", switch=nexus_host)
def delete_port_channel(self, nexus_host, vpc_nbr)
Deletes delete port channel on Nexus switch.
6.61795
6.308422
1.049066
ch_grp = 0 # channel-group only applied to ethernet, # otherwise, return 0 if intf_type != 'ethernet': return ch_grp match_key = "eth" + interface action = snipp.PATH_GET_PC_MEMBERS starttime = time.time() result = self.client.rest_get(action, nexus_host) self.capture_and_print_timeshot(starttime, "getpc", switch=nexus_host) try: for pcmbr in result['imdata']: mbr_data = pcmbr['pcRsMbrIfs']['attributes'] if mbr_data['tSKey'] == match_key: _, nbr = mbr_data['parentSKey'].split("po") ch_grp = int(nbr) break except Exception: # Valid when there is no channel-group configured. ch_grp = 0 LOG.debug("GET interface %(key)s port channel is %(pc)d", {'key': match_key, 'pc': ch_grp}) return ch_grp
def _get_port_channel_group(self, nexus_host, intf_type, interface)
Look for 'channel-group x' config and return x. :param nexus_host: IP address of Nexus switch :param intf_type: String which specifies interface type. example: ethernet :param interface: String indicating which interface. example: 1/19 :returns pc_group: Returns port channel group if present else 0
6.495403
6.545672
0.99232
if not interfaces: return max_ifs = len(interfaces) starttime = time.time() learned, nexus_ip_list = self._build_host_list_and_verify_chgrp( interfaces) if not nexus_ip_list: return if max_ifs > 1: # update vpc db with learned vpcid or get new one. if learned: ch_grp = interfaces[0][-1] self._configure_learned_port_channel( nexus_ip_list, ch_grp) else: ch_grp = self._get_new_baremetal_portchannel_id(nexus_ip_list) else: ch_grp = 0 for i, (nexus_host, intf_type, nexus_port, is_native, ch_grp_saved) in enumerate(interfaces): if max_ifs > 1: if learned: ch_grp = ch_grp_saved else: self._config_new_baremetal_portchannel( ch_grp, nexus_host, intf_type, nexus_port) self._replace_interface_ch_grp(interfaces, i, ch_grp) # init port-channel instead of the provided ethernet intf_type = 'port-channel' nexus_port = str(ch_grp) else: self._replace_interface_ch_grp(interfaces, i, ch_grp) trunk_mode_present, vlan_present = ( self._get_interface_switch_trunk_present( nexus_host, intf_type, nexus_port)) if not vlan_present: self.send_enable_vlan_on_trunk_int( nexus_host, "", intf_type, nexus_port, False, not trunk_mode_present) elif not trunk_mode_present: LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host, nexus_help.format_interface_name( intf_type, nexus_port)) self.capture_and_print_timeshot( starttime, "init_bmif", switch=nexus_host)
def initialize_baremetal_switch_interfaces(self, interfaces)
Initialize Nexus interfaces and for initial baremetal event. This get/create port channel number, applies channel-group to ethernet interface, and initializes trunking on interface. :param interfaces: Receive a list of interfaces containing: nexus_host: IP address of Nexus switch intf_type: String which specifies interface type. example: ethernet interface: String indicating which interface. example: 1/19 is_native: Whether native vlan must be configured. ch_grp: May replace port channel to each entry. channel number is 0 if none
4.714894
4.202497
1.121927
if not interfaces: return starttime = time.time() if replay: try: vpcs = nxos_db.get_active_switch_vpc_allocs(switch_ip) except cexc.NexusVPCAllocNotFound: vpcs = [] for vpc in vpcs: # if this is an allocated vpc, then recreate it if not vpc.learned: self.create_port_channel(switch_ip, vpc.vpc_id) for i, (nexus_host, intf_type, nexus_port, is_native, ch_grp) in enumerate(interfaces): if replay and ch_grp != 0: try: vpc = nxos_db.get_switch_vpc_alloc(switch_ip, ch_grp) self.add_ch_grp_to_interface( nexus_host, intf_type, nexus_port, ch_grp) except cexc.NexusVPCAllocNotFound: pass # if channel-group exists, switch to port-channel # instead of the provided ethernet interface intf_type = 'port-channel' nexus_port = str(ch_grp) #substitute content of ch_grp no_chgrp_len = len(interfaces[i]) - 1 interfaces[i] = interfaces[i][:no_chgrp_len] + (ch_grp,) trunk_mode_present, vlan_present = ( self._get_interface_switch_trunk_present( nexus_host, intf_type, nexus_port)) if not vlan_present: self.send_enable_vlan_on_trunk_int( nexus_host, "", intf_type, nexus_port, False, not trunk_mode_present) elif not trunk_mode_present: LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host, nexus_help.format_interface_name( intf_type, nexus_port)) self.capture_and_print_timeshot( starttime, "get_allif", switch=nexus_host)
def initialize_all_switch_interfaces(self, interfaces, switch_ip=None, replay=True)
Configure Nexus interface and get port channel number. Called during switch replay or just init if no replay is configured. For latter case, only configured interfaces are affected by this method. During switch replay, the change group from the host mapping data base is used. There is no attempt to relearn port-channel from the Nexus switch. What we last knew it to be will persist. :param interfaces: List of interfaces for a given switch. ch_grp can be altered as last arg to each interface. If no ch_grp, this arg will be zero. :param switch_ip: IP address of Nexus switch :param replay: Whether in replay path
4.564939
4.416839
1.033531
starttime = time.time() response = self.client.rest_get( snipp.PATH_GET_NEXUS_TYPE, nexus_host) self.capture_and_print_timeshot( starttime, "gettype", switch=nexus_host) if response: try: result = response['imdata'][0]["eqptCh"]['attributes']['descr'] except Exception: # Nexus Type is not depended on at this time so it's ok # if can't get the Nexus type. The real purpose # of this method is to determine if the connection is active. result = '' nexus_type = re.findall( "Nexus\s*(\d)\d+\s*[0-9A-Z]+\s*" "[cC]hassis", result) if len(nexus_type) > 0: LOG.debug("GET call returned Nexus type %d", int(nexus_type[0])) return int(nexus_type[0]) else: result = '' LOG.debug("GET call failed to return Nexus type. Received %s.", result) return -1
def get_nexus_type(self, nexus_host)
Given the nexus host, get the type of Nexus switch. :param nexus_host: IP address of Nexus switch :returns: Nexus type
6.417047
6.169565
1.040113
starttime = time.time() if vni: body_snip = snipp.BODY_VXLAN_ALL_INCR % (vlanid, vni) else: body_snip = snipp.BODY_VLAN_ALL_INCR % vlanid conf_str += body_snip + snipp.BODY_VLAN_ALL_CONT self.capture_and_print_timeshot( starttime, "get_create_vlan", switch=nexus_host) return conf_str
def get_create_vlan(self, nexus_host, vlanid, vni, conf_str)
Returns an XML snippet for create VLAN on a Nexus Switch.
4.9621
4.827258
1.027933
starttime = time.time() if not vlanid_range: LOG.warning("Exiting set_all_vlan_states: " "No vlans to configure") return # Eliminate possible whitespace and separate vlans by commas vlan_id_list = re.sub(r'\s', '', vlanid_range).split(',') if not vlan_id_list or not vlan_id_list[0]: LOG.warning("Exiting set_all_vlan_states: " "No vlans to configure") return path_str, body_vlan_all = self.start_create_vlan() while vlan_id_list: rangev = vlan_id_list.pop(0) if '-' in rangev: fr, to = rangev.split('-') max = int(to) + 1 for vlan_id in range(int(fr), max): body_vlan_all = self.get_create_vlan( nexus_host, vlan_id, 0, body_vlan_all) else: body_vlan_all = self.get_create_vlan( nexus_host, rangev, 0, body_vlan_all) body_vlan_all = self.end_create_vlan(body_vlan_all) self.send_edit_string( nexus_host, path_str, body_vlan_all) self.capture_and_print_timeshot( starttime, "set_all_vlan_states", switch=nexus_host)
def set_all_vlan_states(self, nexus_host, vlanid_range)
Set the VLAN states to active.
2.908648
2.889199
1.006732
starttime = time.time() path_snip, body_snip = self.start_create_vlan() body_snip = self.get_create_vlan(nexus_host, vlanid, vni, body_snip) body_snip = self.end_create_vlan(body_snip) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "create_vlan_seg", switch=nexus_host)
def create_vlan(self, nexus_host, vlanid, vni)
Given switch, vlanid, vni, Create a VLAN on Switch.
4.642064
4.505816
1.030238
starttime = time.time() path_snip = snipp.PATH_VLAN % vlanid self.client.rest_delete(path_snip, nexus_host) self.capture_and_print_timeshot( starttime, "del_vlan", switch=nexus_host)
def delete_vlan(self, nexus_host, vlanid)
Delete a VLAN on Nexus Switch given the VLAN ID.
8.419439
7.845522
1.073152
starttime = time.time() LOG.debug("NexusDriver get if body config for host %s: " "if_type %s port %s", nexus_host, intf_type, interface) if intf_type == "ethernet": body_if_type = "l1PhysIf" path_interface = "phys-[eth" + interface + "]" else: body_if_type = "pcAggrIf" path_interface = "aggr-[po" + interface + "]" path_snip = (snipp.PATH_IF % (path_interface)) mode = snipp.BODY_PORT_CH_MODE if add_mode else '' if is_delete: increment_it = "-" debug_desc = "delif" native_vlan = "" else: native_vlan = 'vlan-' + str(vlanid) debug_desc = "createif" if vlanid is "": increment_it = "" else: increment_it = "+" if is_native: body_snip = (snipp.BODY_NATIVE_TRUNKVLAN % (body_if_type, mode, increment_it + str(vlanid), str(native_vlan))) else: body_snip = (snipp.BODY_TRUNKVLAN % (body_if_type, mode, increment_it + str(vlanid))) self.capture_and_print_timeshot( starttime, debug_desc, switch=nexus_host) return path_snip, body_snip
def _get_vlan_body_on_trunk_int(self, nexus_host, vlanid, intf_type, interface, is_native, is_delete, add_mode)
Prepares an XML snippet for VLAN on a trunk interface. :param nexus_host: IP address of Nexus switch :param vlanid: Vlanid(s) to add to interface :param intf_type: String which specifies interface type. example: ethernet :param interface: String indicating which interface. example: 1/19 :param is_native: Is native vlan config desired? :param is_delete: Is this a delete operation? :param add_mode: Add mode trunk :returns path_snippet, body_snippet
4.380177
4.115145
1.064404
starttime = time.time() path_snip, body_snip = self._get_vlan_body_on_trunk_int( nexus_host, vlanid, intf_type, interface, is_native, True, False) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "delif", switch=nexus_host)
def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, interface, is_native)
Disable a VLAN on a trunk interface.
5.588688
5.505629
1.015086
starttime = time.time() LOG.debug("NexusDriver edit config for host %s: path: %s body: %s", nexus_host, path_snip, body_snip) self.client.rest_post(path_snip, nexus_host, body_snip) self.capture_and_print_timeshot( starttime, "send_edit", switch=nexus_host)
def send_edit_string(self, nexus_host, path_snip, body_snip, check_to_close_session=True)
Sends rest Post request to Nexus switch.
5.086856
4.856843
1.047359
starttime = time.time() path_snip = snipp.PATH_USER_CMDS body_snip = snipp.BODY_USER_CONF_CMDS % ('1', cli_str) LOG.debug("NexusDriver CLI config for host %s: path: %s body: %s", nexus_host, path_snip, body_snip) self.nxapi_client.rest_post(path_snip, nexus_host, body_snip) self.capture_and_print_timeshot( starttime, "send_cliconf", switch=nexus_host)
def _send_cli_conf_string(self, nexus_host, cli_str)
Sends CLI Config commands to Nexus switch using NXAPI.
6.010205
5.660395
1.0618
path_snip, body_snip = self._get_vlan_body_on_trunk_int( nexus_host, vlanid, intf_type, interface, is_native, False, add_mode) self.send_edit_string(nexus_host, path_snip, body_snip)
def send_enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, interface, is_native, add_mode=False)
Gathers and sends an interface trunk XML snippet.
3.732432
3.708582
1.006431
starttime = time.time() self.create_vlan(nexus_host, vlan_id, vni) LOG.debug("NexusDriver created VLAN: %s", vlan_id) if nexus_port: self.send_enable_vlan_on_trunk_int( nexus_host, vlan_id, intf_type, nexus_port, is_native) self.capture_and_print_timeshot( starttime, "create_all", switch=nexus_host)
def create_and_trunk_vlan(self, nexus_host, vlan_id, intf_type, nexus_port, vni, is_native)
Create VLAN and trunk it on the specified ports.
4.520105
4.472147
1.010724
# Configure the "feature" commands and NVE interface # (without "member" subcommand configuration). # The Nexus 9K will not allow the "interface nve" configuration # until the "feature nv overlay" command is issued and installed. # To get around the N9K failing on the "interface nve" command # send the two XML snippets down separately. starttime = time.time() # Do CLI 'feature nv overlay' self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE, (snipp.BODY_VXLAN_STATE % "enabled")) # Do CLI 'feature vn-segment-vlan-based' self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE, (snipp.BODY_VNSEG_STATE % "enabled")) # Do CLI 'int nve1' to Create nve1 self.send_edit_string( nexus_host, (snipp.PATH_NVE_CREATE % nve_int_num), (snipp.BODY_NVE_CREATE % nve_int_num)) # Do CLI 'no shut # source-interface loopback %s' # beneath int nve1 self.send_edit_string( nexus_host, (snipp.PATH_NVE_CREATE % nve_int_num), (snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf))) self.capture_and_print_timeshot( starttime, "enable_vxlan", switch=nexus_host)
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf)
Enable VXLAN on the switch.
5.590399
5.576273
1.002533
# Removing the "feature nv overlay" configuration also # removes the "interface nve" configuration. starttime = time.time() # Do CLI 'no feature nv overlay' self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE, (snipp.BODY_VXLAN_STATE % "disabled")) # Do CLI 'no feature vn-segment-vlan-based' self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE, (snipp.BODY_VNSEG_STATE % "disabled")) self.capture_and_print_timeshot( starttime, "disable_vxlan", switch=nexus_host)
def disable_vxlan_feature(self, nexus_host)
Disable VXLAN on the switch.
7.296796
6.958428
1.048627
# Do CLI [no] member vni %s mcast-group %s # beneath int nve1 starttime = time.time() path = snipp.PATH_VNI_UPDATE % (nve_int_num, vni) body = snipp.BODY_VNI_UPDATE % (vni, vni, vni, mcast_group) self.send_edit_string(nexus_host, path, body) self.capture_and_print_timeshot( starttime, "create_nve", switch=nexus_host)
def create_nve_member(self, nexus_host, nve_int_num, vni, mcast_group)
Add a member configuration to the NVE interface.
7.658323
8.16593
0.937838
starttime = time.time() path_snip = snipp.PATH_VNI_UPDATE % (nve_int_num, vni) self.client.rest_delete(path_snip, nexus_host) self.capture_and_print_timeshot( starttime, "delete_nve", switch=nexus_host)
def delete_nve_member(self, nexus_host, nve_int_num, vni)
Delete a member configuration on the NVE interface.
7.224102
7.363157
0.981115
router_id = ri.router_name()[:self.DEV_NAME_LEN] is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region if is_multi_region_enabled: region_id = cfg.CONF.multi_region.region_id vrf_name = "%s-%s" % (router_id, region_id) else: vrf_name = router_id return vrf_name
def _get_vrf_name(self, ri)
overloaded method for generating a vrf_name that supports region_id
2.939233
2.789083
1.053835
try: vlan = port['hosting_info']['segmentation_id'] int_prefix = port['hosting_info']['physical_interface'] return '%s.%s' % (int_prefix, vlan) except KeyError as e: params = {'key': e} raise cfg_exc.DriverExpectedKeyNotSetException(**params)
def _get_interface_name_from_hosting_port(self, port)
Extract the underlying subinterface name for a port e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
5.281352
5.086143
1.03838
for item in list_containing_dicts_entries: if item.get(attribute_name) == attribute_value: return item return {}
def _get_item(list_containing_dicts_entries, attribute_value, attribute_name='subnet_id')
Searches a list of dicts and returns the first matching entry The dict entry returned contains the attribute 'attribute_name' whose value equals 'attribute_value'. If no such dict is found in the list an empty dict is returned.
2.217845
2.35703
0.940949
acl_present = self._check_acl(acl_no, network, netmask) if not acl_present: conf_str = snippets.CREATE_ACL % (acl_no, network, netmask) self._edit_running_config(conf_str, 'CREATE_ACL') pool_name = "%s_nat_pool" % vrf_name conf_str = asr1k_snippets.SET_DYN_SRC_TRL_POOL % (acl_no, pool_name, vrf_name) try: self._edit_running_config(conf_str, 'SET_DYN_SRC_TRL_POOL') except Exception as dyn_nat_e: LOG.info("Ignore exception for SET_DYN_SRC_TRL_POOL: %s. " "The config seems to be applied properly but netconf " "seems to report an error.", dyn_nat_e) conf_str = snippets.SET_NAT % (inner_itfc, 'inside') self._edit_running_config(conf_str, 'SET_NAT') conf_str = snippets.SET_NAT % (outer_itfc, 'outside') self._edit_running_config(conf_str, 'SET_NAT')
def _nat_rules_for_internet_access(self, acl_no, network, netmask, inner_itfc, outer_itfc, vrf_name)
Configure the NAT rules for an internal network. Configuring NAT rules in the ASR1k is a three step process. First create an ACL for the IP range of the internal network. Then enable dynamic source NATing on the external interface of the ASR1k for this ACL and VRF of the neutron router. Finally enable NAT on the interfaces of the ASR1k where the internal and external networks are connected. :param acl_no: ACL number of the internal network. :param network: internal network :param netmask: netmask of the internal network. :param inner_itfc: (name of) interface connected to the internal network :param outer_itfc: (name of) interface connected to the external network :param vrf_name: VRF corresponding to this virtual router :return: True if configuration succeeded :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. IOSXEConfigException
3.265395
3.031259
1.077241
acls = [] # first disable nat in all inner ports for port in ports: in_itfc_name = self._get_interface_name_from_hosting_port(port) acls.append(self._generate_acl_num_from_port(port)) is_alone = len(port['change_details']['current_ports']) == 1 if not intf_deleted and is_alone is True: self._remove_interface_nat(in_itfc_name, 'inside') # There is a possibility that the dynamic NAT rule cannot be removed # from the running config, if there is still traffic in the inner # interface causing a rule to be present in the NAT translation # table. For this we give 2 seconds for the 'inside NAT rule' to # expire and then clear the NAT translation table manually. This can # be costly and hence is not enabled here, pending further # sinvestigation. # LOG.debug("Sleep for 2 seconds before clearing NAT rules") # time.sleep(2) # clear the NAT translation table # self._remove_dyn_nat_translations() # remove dynamic nat rules and acls vrf_name = self._get_vrf_name(ri) ext_itfc_name = self._get_interface_name_from_hosting_port(ext_port) for acl in acls: self._remove_dyn_nat_rule(acl, ext_itfc_name, vrf_name)
def _remove_internal_nw_nat_rules(self, ri, ports, ext_port, intf_deleted=False)
Removes the NAT rules already configured when an internal network is removed. :param ri -- router-info object :param ports -- list of affected ports where network nat rules was affected :param ext_port -- external facing port :param intf_deleted-- If True, indicates that the subinterface was deleted.
6.494328
6.596666
0.984486
vlan = ex_gw_port['hosting_info']['segmentation_id'] hsrp_grp = ex_gw_port[ha.HA_INFO]['group'] LOG.debug("add floating_ip: %(fip)s, fixed_ip: %(fixed_ip)s, " "vrf: %(vrf)s, ex_gw_port: %(port)s", {'fip': floating_ip, 'fixed_ip': fixed_ip, 'vrf': vrf, 'port': ex_gw_port}) confstr = (asr1k_snippets.SET_STATIC_SRC_TRL_NO_VRF_MATCH % (fixed_ip, floating_ip, vrf, hsrp_grp, vlan)) self._edit_running_config(confstr, 'SET_STATIC_SRC_TRL_NO_VRF_MATCH')
def _do_add_floating_ip_asr1k(self, floating_ip, fixed_ip, vrf, ex_gw_port)
To implement a floating ip, an ip static nat is configured in the underlying router ex_gw_port contains data to derive the vlan associated with related subnet for the fixed ip. The vlan in turn is applied to the redundancy parameter for setting the IP NAT.
3.355476
3.23641
1.03679
# let the generic status update callback function handle this callback self.update_hosting_device_status(context, host, {const.HD_DEAD: hosting_device_ids})
def report_non_responding_hosting_devices(self, context, host, hosting_device_ids)
Report that a hosting device is determined to be dead. :param context: contains user information :param host: originator of callback :param hosting_device_ids: list of non-responding hosting devices
11.976209
9.898925
1.209849
for status, hd_ids in six.iteritems(status_info): # update hosting device entry in db to new status hd_spec = {'hosting_device': {'status': status}} for hd_id in hd_ids: self._dmplugin.update_hosting_device(context, hd_id, hd_spec) if status == const.HD_DEAD or status == const.HD_ERROR: self._dmplugin.handle_non_responding_hosting_devices( context, host, hd_ids)
def update_hosting_device_status(self, context, host, status_info)
Report status changes for hosting devices. :param context: contains user information :param host: originator of callback :param status_info: Dictionary with list of hosting device ids for each type of hosting device status to be updated i.e.:: { HD_ACTIVE: list_of_ids_of_active_hds, HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds, HD_DEAD: list_of_ids_of_dead_hds, ... }
4.078886
4.087251
0.997953
agent_ids = self._dmplugin.get_cfg_agents(context, active=None, filters={'host': [host]}) if agent_ids: return [self._dmplugin.get_device_info_for_agent(context, hd_db) for hd_db in self._dmplugin.get_hosting_devices_db( context, filters={'cfg_agent_id': [agent_ids[0].id]})] return []
def get_hosting_devices_for_agent(self, context, host)
Fetches routers that a Cisco cfg agent is managing. This function is supposed to be called when the agent has started, is ready to take on assignments and before any callbacks to fetch logical resources are issued. :param context: contains user information :param host: originator of callback :returns: dict of hosting devices managed by the cfg agent
4.319911
4.570486
0.945175
e_context = context.elevated() r_hd_binding_db = self._get_router_binding_info(e_context, router_id) if r_hd_binding_db.hosting_device_id: if r_hd_binding_db.hosting_device_id == hosting_device_id: return raise routertypeawarescheduler.RouterHostedByHostingDevice( router_id=router_id, hosting_device_id=hosting_device_id) rt_info = self.validate_hosting_device_router_combination( context, r_hd_binding_db, hosting_device_id) result = self.schedule_router_on_hosting_device( e_context, r_hd_binding_db, hosting_device_id, rt_info['slot_need']) if result: # refresh so that we get latest contents from DB e_context.session.expire(r_hd_binding_db) router = self.get_router(e_context, router_id) self.add_type_and_hosting_device_info( e_context, router, r_hd_binding_db, schedule=False) l3_cfg_notifier = self.agent_notifiers.get(AGENT_TYPE_L3_CFG) if l3_cfg_notifier: l3_cfg_notifier.router_added_to_hosting_device(context, router) else: raise routertypeawarescheduler.RouterSchedulingFailed( router_id=router_id, hosting_device_id=hosting_device_id)
def add_router_to_hosting_device(self, context, hosting_device_id, router_id)
Add a (non-hosted) router to a hosting device.
2.830357
2.814617
1.005592
e_context = context.elevated() r_hd_binding_db = self._get_router_binding_info(e_context, router_id) if r_hd_binding_db.hosting_device_id != hosting_device_id: raise routertypeawarescheduler.RouterNotHostedByHostingDevice( router_id=router_id, hosting_device_id=hosting_device_id) router = self.get_router(context, router_id) self.add_type_and_hosting_device_info( e_context, router, r_hd_binding_db, schedule=False) # conditionally remove router from backlog ensure it does not get # scheduled automatically self.remove_router_from_backlog(id) l3_cfg_notifier = self.agent_notifiers.get(AGENT_TYPE_L3_CFG) if l3_cfg_notifier: l3_cfg_notifier.router_removed_from_hosting_device(context, router) LOG.debug("Unscheduling router %s", r_hd_binding_db.router_id) self.unschedule_router_from_hosting_device(context, r_hd_binding_db) # now unbind the router from the hosting device with e_context.session.begin(subtransactions=True): r_hd_binding_db.hosting_device_id = None e_context.session.add(r_hd_binding_db)
def remove_router_from_hosting_device(self, context, hosting_device_id, router_id)
Remove the router from hosting device. After removal, the router will be non-hosted until there is update which leads to re-schedule or be added to another hosting device manually.
2.892442
2.930907
0.986876
num_agents = len(self.get_l3_agents(context, active=True, filters={'agent_modes': [bc.constants.L3_AGENT_MODE_LEGACY, bc.constants.L3_AGENT_MODE_DVR_SNAT]})) max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: LOG.info("Number of active agents lower than " "max_l3_agents_per_router. L3 agents " "available: %s", num_agents) else: num_agents = max_agents return num_agents
def get_number_of_agents_for_scheduling(self, context)
Return number of agents on which the router will be scheduled.
3.149342
3.03742
1.036848
context = kwargs['context'] subnet = kwargs['subnet'] l3plugin = bc.get_plugin(L3_ROUTER_NAT) for router in l3plugin.get_routers(context): if (router['external_gateway_info'] and (router['external_gateway_info']['network_id'] == subnet['network_id'])): router_data = {'router': router} l3plugin.update_router(context, router['id'], router_data)
def _notify_subnet_create(resource, event, trigger, **kwargs)
Called when a new subnet is created in the external network
2.537733
2.420301
1.048519
original_port = kwargs.get('original_port') updated_port = kwargs.get('port') if (updated_port is not None and original_port is not None and ( updated_port.get('admin_state_up')) != ( original_port.get('admin_state_up'))): new_port_data = {'port': {}} new_port_data['port']['admin_state_up'] = ( updated_port.get('admin_state_up')) original_device_owner = original_port.get('device_owner', '') if original_device_owner.startswith('network'): router_id = original_port.get('device_id') context = kwargs.get('context') l3plugin = bc.get_plugin(L3_ROUTER_NAT) if l3plugin and router_id: l3plugin._notify_port_update_routers(context, router_id, original_port, new_port_data, 'update_port_status_cfg')
def _notify_cfg_agent_port_update(resource, event, trigger, **kwargs)
Called when router port/interface is enabled/disabled
2.589232
2.532113
1.022558
entry = self.session.query(ucsm_model.PortProfile).filter_by( vlan_id=vlan_id, device_id=device_id).first() return entry and entry.created_on_ucs
def is_port_profile_created(self, vlan_id, device_id)
Indicates if port profile has been created on UCS Manager.
3.987273
3.288019
1.212667
entry = self.session.query(ucsm_model.PortProfile).filter_by( vlan_id=vlan_id, device_id=device_id).first() return entry.profile_id if entry else None
def get_port_profile_for_vlan(self, vlan_id, device_id)
Returns Vlan id associated with the port profile.
3.16261
2.931802
1.078726
if not self.get_port_profile_for_vlan(vlan_id, device_id): port_profile = ucsm_model.PortProfile(profile_id=profile_name, vlan_id=vlan_id, device_id=device_id, created_on_ucs=False) with self.session.begin(subtransactions=True): self.session.add(port_profile) return port_profile
def add_port_profile(self, profile_name, vlan_id, device_id)
Adds a port profile and its vlan_id to the table.
2.749694
2.763689
0.994936
with self.session.begin(subtransactions=True): port_profile = self.session.query( ucsm_model.PortProfile).filter_by( vlan_id=vlan_id, profile_id=profile_name, device_id=device_id).first() if port_profile: port_profile.created_on_ucs = True self.session.merge(port_profile) else: new_profile = ucsm_model.PortProfile(profile_id=profile_name, vlan_id=vlan_id, device_id=device_id, created_on_ucs=True) self.session.add(new_profile)
def set_port_profile_created(self, vlan_id, profile_name, device_id)
Sets created_on_ucs flag to True.
1.949106
1.683755
1.157595
with self.session.begin(subtransactions=True): try: self.session.query(ucsm_model.PortProfile).filter_by( vlan_id=vlan_id).delete() except orm.exc.NoResultFound: return
def delete_vlan_entry(self, vlan_id)
Deletes entry for a vlan_id if it exists.
3.279679
3.271039
1.002641
if not self.get_sp_template_vlan_entry(vlan_id, sp_template, ucsm_ip): entry = ucsm_model.ServiceProfileTemplate(vlan_id=vlan_id, sp_template=sp_template, device_id=ucsm_ip, updated_on_ucs=False) self.session.add(entry)
def add_service_profile_template(self, vlan_id, sp_template, ucsm_ip)
Adds an entry for a vlan_id on a SP template to the table.
3.522088
3.357837
1.048916
entry = self.get_sp_template_vlan_entry(vlan_id, sp_template, device_id) if entry: entry.updated_on_ucs = True self.session.merge(entry) return entry else: return False
def set_sp_template_updated(self, vlan_id, sp_template, device_id)
Sets update_on_ucs flag to True.
4.023263
3.019053
1.332624
with self.session.begin(subtransactions=True): try: self.session.query( ucsm_model.ServiceProfileTemplate).filter_by( vlan_id=vlan_id).delete() except orm.exc.NoResultFound: return
def delete_sp_template_for_vlan(self, vlan_id)
Deletes SP Template for a vlan_id if it exists.
3.081105
2.994014
1.029088
if not self.get_vnic_template_vlan_entry(vlan_id, vnic_template, ucsm_ip, physnet): vnic_t = ucsm_model.VnicTemplate(vlan_id=vlan_id, vnic_template=vnic_template, device_id=ucsm_ip, physnet=physnet, updated_on_ucs=False) with self.session.begin(subtransactions=True): self.session.add(vnic_t) return vnic_t
def add_vnic_template(self, vlan_id, ucsm_ip, vnic_template, physnet)
Adds an entry for a vlan_id on a SP template to the table.
2.889266
2.862706
1.009278
with self.session.begin(subtransactions=True): entry = self.get_vnic_template_vlan_entry(vlan_id, vnic_template, ucsm_ip, physnet) if entry: entry.updated_on_ucs = True self.session.merge(entry) return entry
def set_vnic_template_updated(self, vlan_id, ucsm_ip, vnic_template, physnet)
Sets update_on_ucs flag to True for a Vnic Template entry.
3.262332
2.600658
1.254426
with self.session.begin(subtransactions=True): try: self.session.query(ucsm_model.VnicTemplate).filter_by( vlan_id=vlan_id).delete() except orm.exc.NoResultFound: return
def delete_vnic_template_for_vlan(self, vlan_id)
Deletes VNIC Template for a vlan_id and physnet if it exists.
2.722914
2.64979
1.027596
count = self.session.query(ucsm_model.PortProfileDelete).filter_by( profile_id=profile_name, device_id=device_id).count() return count != 0
def has_port_profile_to_delete(self, profile_name, device_id)
Returns True if port profile delete table containes PP.
4.30599
3.493562
1.23255
if not self.has_port_profile_to_delete(profile_name, device_id): port_profile = ucsm_model.PortProfileDelete( profile_id=profile_name, device_id=device_id) with self.session.begin(subtransactions=True): self.session.add(port_profile) return port_profile
def add_port_profile_to_delete_table(self, profile_name, device_id)
Adds a port profile to the delete table.
2.994318
2.874777
1.041583
with self.session.begin(subtransactions=True): self.session.query(ucsm_model.PortProfileDelete).filter_by( profile_id=profile_name, device_id=device_id).delete()
def remove_port_profile_to_delete(self, profile_name, device_id)
Removes port profile to be deleted from table.
3.396217
3.1466
1.079329
pkt = DhcpPacket() (pkt.ciaddr,) = cls.struct('4s').unpack_from(buf, 12) (pkt.giaddr,) = cls.struct('4s').unpack_from(buf, 24) cls.struct('4s').pack_into(buf, 24, b'') pos = 240 while pos < len(buf): (opttag,) = cls.struct('B').unpack_from(buf, pos) if opttag == 0: pos += 1 continue if opttag == END: pkt.end = pos break (optlen,) = cls.struct('B').unpack_from(buf, pos + 1) startpos = pos pos += 2 if opttag != RELAY_AGENT_INFO: pos += optlen continue optend = pos + optlen while pos < optend: (subopttag, suboptlen) = cls.struct('BB').unpack_from(buf, pos) fmt = '%is' % (suboptlen,) (val,) = cls.struct(fmt).unpack_from(buf, pos + 2) pkt.relay_options[subopttag] = val pos += suboptlen + 2 cls.struct('%is' % (optlen + 2)).pack_into(buf, startpos, b'') pkt.buf = buf return pkt
def parse(cls, buf)
Parse DHCP Packet. 1. To get client IP Address(ciaddr). 2. To get relaying gateway IP Address(giaddr). 3. To get DHCP Relay Agent Information Option Suboption such as Link Selection, VSS, Server Identifier override.
2.685149
2.393055
1.122059
cctxt = self.client.prepare() cctxt.cast(context, 'report_non_responding_hosting_devices', host=self.host, hosting_device_ids=hd_ids)
def report_dead_hosting_devices(self, context, hd_ids=None)
Report that a hosting device cannot be contacted (presumed dead). :param: context: session context :param: hosting_device_ids: list of non-responding hosting devices :return: None
2.917577
2.817222
1.035622
cctxt = self.client.prepare() return cctxt.call(context, 'register_for_duty', host=self.host)
def register_for_duty(self, context)
Report that a config agent is ready for duty.
3.260872
2.615996
1.246513
cctxt = self.client.prepare() return cctxt.call(context, 'get_hosting_devices_for_agent', host=self.host)
def get_hosting_devices_for_agent(self, context)
Get a list of hosting devices assigned to this agent.
2.496678
2.235285
1.116939
LOG.debug("Processing services started") # Now we process only routing service, additional services will be # added in future if self.routing_service_helper: self.routing_service_helper.process_service(device_ids, removed_devices_info) else: LOG.warning("No routing service helper loaded") LOG.debug("Processing services completed")
def process_services(self, device_ids=None, removed_devices_info=None)
Process services managed by this config agent. This method is invoked by any of three scenarios. 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL` seconds. This is the most common scenario. In this mode, the method is called without any arguments. 2. Called by the `_process_backlogged_hosting_devices()` as part of the backlog processing task. In this mode, a list of device_ids are passed as arguments. These are the list of backlogged hosting devices that are now reachable and we want to sync services on them. 3. Called by the `hosting_devices_removed()` method. This is when the config agent has received a notification from the plugin that some hosting devices are going to be removed. The payload contains the details of the hosting devices and the associated neutron resources on them which should be processed and removed. To avoid race conditions with these scenarios, this function is protected by a lock. This method goes on to invoke `process_service()` on the different service helpers. :param device_ids: List of devices that are now available and needs to be processed :param removed_devices_info: Info about the hosting devices which are going to be removed and details of the resources hosted on them. Expected Format:: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ...}, 'deconfigure': True/False } :returns: None
5.050018
4.803609
1.051297
driver_mgr = self.get_routing_service_helper().driver_manager res = self._dev_status.check_backlogged_hosting_devices(driver_mgr) if res['reachable']: self.process_services(device_ids=res['reachable']) if res['revived']: LOG.debug("Reporting revived hosting devices: %s " % res['revived']) # trigger a sync only on the revived hosting-devices if self.conf.cfg_agent.enable_heartbeat is True: self.devmgr_rpc.report_revived_hosting_devices( context, hd_ids=res['revived']) self.process_services(device_ids=res['revived']) if res['dead']: LOG.debug("Reporting dead hosting devices: %s", res['dead']) self.devmgr_rpc.report_dead_hosting_devices(context, hd_ids=res['dead'])
def _process_backlogged_hosting_devices(self, context)
Process currently backlogged devices. Go through the currently backlogged devices and process them. For devices which are now reachable (compared to last time), we call `process_services()` passing the now reachable device's id. For devices which have passed the `hosting_device_dead_timeout` and hence presumed dead, execute a RPC to the plugin informing that. heartbeat revision res['reachable'] - hosting device went from Unknown to Active state process_services(...) res['revived'] - hosting device went from Dead to Active inform device manager that the hosting device is now responsive res['dead'] - hosting device went from Unknown to Dead inform device manager that the hosting device is non-responding As additional note for the revived case: Although the plugin was notified, there may be some lag before the plugin actually can reschedule it's backlogged routers. If process_services(device_ids...) isn't successful initially, subsequent device syncs will be attempted until MAX_DEVICE_SYNC_ATTEMPTS occurs. Main process_service task will resume if sync_devices is populated. :param context: RPC context :return: None
4.483235
3.498375
1.281519
try: if payload['admin_state_up']: #TODO(hareeshp): implement agent updated handling pass except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`agent_updated`. Error is %(error)s. Payload is " "%(payload)s", {'error': e, 'payload': payload})
def agent_updated(self, context, payload)
Deal with agent updated RPC message.
6.326921
5.477472
1.15508
LOG.debug("Got hosting device assigned, payload: %s" % payload) try: if payload['hosting_device_ids']: #TODO(hareeshp): implement assignment of hosting devices self.routing_service_helper.fullsync = True except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`hosting_devices_assigned_to_cfg_agent`. Error is " "%(error)s. Payload is %(payload)s", {'error': e, 'payload': payload})
def hosting_devices_assigned_to_cfg_agent(self, context, payload)
Deal with hosting devices assigned to this config agent.
6.223515
6.166898
1.009181
try: if payload['hosting_device_ids']: #TODO(hareeshp): implement unassignment of hosting devices pass except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`hosting_devices_unassigned_from_cfg_agent`. Error " "is %(error)s. Payload is %(payload)s", {'error': e, 'payload': payload})
def hosting_devices_unassigned_from_cfg_agent(self, context, payload)
Deal with hosting devices unassigned from this config agent.
4.787107
4.718843
1.014466
try: if payload['hosting_data']: if payload['hosting_data'].keys(): self.process_services(removed_devices_info=payload) except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`hosting_devices_removed`. Error is %(error)s. Payload " "is %(payload)s", {'error': e, 'payload': payload})
def hosting_devices_removed(self, context, payload)
Deal with hosting device removed RPC message.
5.542529
5.002872
1.107869
for attempts in range(MAX_REGISTRATION_ATTEMPTS): context = bc.context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) try: res = self.devmgr_rpc.register_for_duty(context) except Exception: res = False LOG.warning("[Agent registration] Rpc exception. Neutron " "may not be available or busy. Retrying " "in %0.2f seconds ", REGISTRATION_RETRY_DELAY) if res is True: LOG.info("[Agent registration] Agent successfully registered") return elif res is False: LOG.warning("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds ", REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!") raise SystemExit(_("Cfg Agent exiting")) LOG.error("[Agent registration] %d unsuccessful registration " "attempts. Exiting!", MAX_REGISTRATION_ATTEMPTS) raise SystemExit(_("Cfg Agent exiting"))
def _agent_registration(self)
Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself.
4.12517
3.541582
1.164782
LOG.debug("Report state task started") self.keepalive_iteration += 1 if self.keepalive_iteration == self.report_iteration: self._prepare_full_report_data() self.keepalive_iteration = 0 LOG.debug("State report: %s", pprint.pformat(self.agent_state)) else: self.agent_state.pop('configurations', None) self.agent_state['local_time'] = datetime.now().strftime( ISO8601_TIME_FORMAT) LOG.debug("State report: %s", self.agent_state) self.send_agent_report(self.agent_state, self.context)
def _report_state(self)
Report state to the plugin. This task run every `keepalive_interval` period. Collects, creates and sends a summary of the services currently managed by this agent. Data is collected from the service helper(s). Refer the `configurations` dict for the parameters reported. :return: None
3.844638
3.341642
1.150524
try: self.state_rpc.report_state(context, report, self.use_call) report.pop('start_flag', None) self.use_call = False LOG.debug("Send agent report successfully completed") except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() return except Exception: LOG.warning("Failed sending agent report!")
def send_agent_report(self, report, context)
Send the agent report via RPC.
6.175443
5.992886
1.030462