sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_port_switch_bindings(port_id, switch_ip):
"""List all vm/vlan bindings on a Nexus switch port."""
LOG.debug("get_port_switch_bindings() called, "
"port:'%(port_id)s', switch:'%(switch_ip)s'",
{'port_id': port_id, 'switch_ip': switch_ip})
try:
return _lookup_all_nexus_bindings(port_id=port_id,
switch_ip=switch_ip)
except c_exc.NexusPortBindingNotFound:
pass | List all vm/vlan bindings on a Nexus switch port. | entailment |
def _lookup_nexus_bindings(query_type, session=None, **bfilter):
"""Look up 'query_type' Nexus bindings matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param bfilter: filter for bindings query
:returns: bindings if query gave a result, else
raise NexusPortBindingNotFound.
"""
if session is None:
session = bc.get_reader_session()
query_method = getattr(session.query(
nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type)
try:
bindings = query_method()
if bindings:
return bindings
except sa_exc.NoResultFound:
pass
raise c_exc.NexusPortBindingNotFound(**bfilter) | Look up 'query_type' Nexus bindings matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param bfilter: filter for bindings query
:returns: bindings if query gave a result, else
raise NexusPortBindingNotFound. | entailment |
def add_nexusnve_binding(vni, switch_ip, device_id, mcast_group):
"""Adds a nexus nve binding."""
LOG.debug("add_nexusnve_binding() called")
session = bc.get_writer_session()
binding = nexus_models_v2.NexusNVEBinding(vni=vni,
switch_ip=switch_ip,
device_id=device_id,
mcast_group=mcast_group)
session.add(binding)
session.flush()
return binding | Adds a nexus nve binding. | entailment |
def remove_nexusnve_binding(vni, switch_ip, device_id):
"""Remove the nexus nve binding."""
LOG.debug("remove_nexusnve_binding() called")
session = bc.get_writer_session()
binding = (session.query(nexus_models_v2.NexusNVEBinding).
filter_by(vni=vni, switch_ip=switch_ip,
device_id=device_id).one())
if binding:
session.delete(binding)
session.flush()
return binding | Remove the nexus nve binding. | entailment |
def remove_all_nexusnve_bindings():
"""Removes all nexusnve bindings."""
LOG.debug("remove_all_nexusport_bindings() called")
session = bc.get_writer_session()
session.query(nexus_models_v2.NexusNVEBinding).delete()
session.flush() | Removes all nexusnve bindings. | entailment |
def get_nve_vni_switch_bindings(vni, switch_ip):
"""Return the nexus nve binding(s) per switch."""
LOG.debug("get_nve_vni_switch_bindings() called")
session = bc.get_reader_session()
try:
return (session.query(nexus_models_v2.NexusNVEBinding).
filter_by(vni=vni, switch_ip=switch_ip).all())
except sa_exc.NoResultFound:
return None | Return the nexus nve binding(s) per switch. | entailment |
def _lookup_host_mappings(query_type, session=None, **bfilter):
"""Look up 'query_type' Nexus mappings matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param bfilter: filter for mappings query
:returns: mappings if query gave a result, else
raise NexusHostMappingNotFound.
"""
if session is None:
session = bc.get_reader_session()
query_method = getattr(session.query(
nexus_models_v2.NexusHostMapping).filter_by(**bfilter), query_type)
try:
mappings = query_method()
if mappings:
return mappings
except sa_exc.NoResultFound:
pass
raise c_exc.NexusHostMappingNotFound(**bfilter) | Look up 'query_type' Nexus mappings matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param bfilter: filter for mappings query
:returns: mappings if query gave a result, else
raise NexusHostMappingNotFound. | entailment |
def add_host_mapping(host_id, nexus_ip, interface, ch_grp, is_static):
"""Add Host to interface mapping entry into mapping data base.
:param host_id: is the name of the host to add
:param interface: is the interface for this host
:param nexus_ip: is the ip addr of the nexus switch for this interface
:param ch_grp: is the port channel this interface belos
:param is_static: whether this is from conf file or learned from baremetal.
"""
LOG.debug("add_nexusport_binding() called")
session = bc.get_writer_session()
mapping = nexus_models_v2.NexusHostMapping(host_id=host_id,
if_id=interface,
switch_ip=nexus_ip,
ch_grp=ch_grp,
is_static=is_static)
try:
session.add(mapping)
session.flush()
except db_exc.DBDuplicateEntry:
with excutils.save_and_reraise_exception() as ctxt:
if is_static:
ctxt.reraise = False
LOG.debug("Duplicate static entry encountered "
"host=%(host)s, if=%(if)s, ip=%(ip)s",
{'host': host_id, 'if': interface,
'ip': nexus_ip})
return mapping | Add Host to interface mapping entry into mapping data base.
:param host_id: is the name of the host to add
:param interface: is the interface for this host
:param nexus_ip: is the ip addr of the nexus switch for this interface
:param ch_grp: is the port channel this interface belos
:param is_static: whether this is from conf file or learned from baremetal. | entailment |
def update_host_mapping(host_id, interface, nexus_ip, new_ch_grp):
"""Change channel_group in host/interface mapping data base."""
LOG.debug("update_host_mapping called")
session = bc.get_writer_session()
mapping = _lookup_one_host_mapping(
session=session,
host_id=host_id,
if_id=interface,
switch_ip=nexus_ip)
mapping.ch_grp = new_ch_grp
session.merge(mapping)
session.flush()
return mapping | Change channel_group in host/interface mapping data base. | entailment |
def remove_host_mapping(interface, nexus_ip):
"""Remove host to interface mapping entry from mapping data base."""
LOG.debug("remove_host_mapping() called")
session = bc.get_writer_session()
try:
mapping = _lookup_one_host_mapping(
session=session,
if_id=interface,
switch_ip=nexus_ip)
session.delete(mapping)
session.flush()
except c_exc.NexusHostMappingNotFound:
pass | Remove host to interface mapping entry from mapping data base. | entailment |
def remove_all_static_host_mappings():
"""Remove all entries defined in config file from mapping data base."""
LOG.debug("remove_host_mapping() called")
session = bc.get_writer_session()
try:
mapping = _lookup_all_host_mappings(
session=session,
is_static=True)
for host in mapping:
session.delete(host)
session.flush()
except c_exc.NexusHostMappingNotFound:
pass | Remove all entries defined in config file from mapping data base. | entailment |
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter):
"""Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
if order:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by(
order),
query_type)
else:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type)
try:
vpcs = query_method()
if vpcs:
return vpcs
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound. | entailment |
def _lookup_vpc_count_min_max(session=None, **bfilter):
"""Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
try:
res = session.query(
func.count(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.min(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.max(nexus_models_v2.NexusVPCAlloc.vpc_id),
).filter(nexus_models_v2.NexusVPCAlloc.switch_ip ==
bfilter['switch_ip']).one()
count = res[0]
sw_min = res[1]
sw_max = res[2]
return count, sw_min, sw_max
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound. | entailment |
def _get_free_vpcids_on_switches(switch_ip_list):
'''Get intersect list of free vpcids in list of switches.'''
session = bc.get_reader_session()
prev_view = aliased(nexus_models_v2.NexusVPCAlloc)
query = session.query(prev_view.vpc_id)
prev_swip = switch_ip_list[0]
for ip in switch_ip_list[1:]:
cur_view = aliased(nexus_models_v2.NexusVPCAlloc)
cur_swip = ip
query = query.join(cur_view, sa.and_(
prev_view.switch_ip == prev_swip, prev_view.active == False, # noqa
cur_view.switch_ip == cur_swip, cur_view.active == False, # noqa
prev_view.vpc_id == cur_view.vpc_id))
prev_view = cur_view
prev_swip = cur_swip
unique_vpcids = query.all()
shuffle(unique_vpcids)
return unique_vpcids | Get intersect list of free vpcids in list of switches. | entailment |
def init_vpc_entries(nexus_ip, vpc_list):
"""Initialize switch/vpc entries in vpc alloc data base.
param: nexus_ip ip addr of the nexus switch for this interface
param: vpc_list list of vpc integers to create
"""
LOG.debug("init_vpc_entries() called")
if not vpc_list:
return
session = bc.get_writer_session()
for vpc in vpc_list:
vpc_alloc = nexus_models_v2.NexusVPCAlloc(
switch_ip=nexus_ip,
vpc_id=vpc,
learned=False,
active=False)
session.add(vpc_alloc)
session.flush() | Initialize switch/vpc entries in vpc alloc data base.
param: nexus_ip ip addr of the nexus switch for this interface
param: vpc_list list of vpc integers to create | entailment |
def update_vpc_entry(nexus_ips, vpc_id, learned, active):
"""Change active state in vpc_allocate data base."""
LOG.debug("update_vpc_entry called")
session = bc.get_writer_session()
with session.begin():
for n_ip in nexus_ips:
flipit = not active
x = session.execute(
sa.update(nexus_models_v2.NexusVPCAlloc).values({
'learned': learned,
'active': active}).where(sa.and_(
nexus_models_v2.NexusVPCAlloc.switch_ip == n_ip,
nexus_models_v2.NexusVPCAlloc.vpc_id == vpc_id,
nexus_models_v2.NexusVPCAlloc.active == flipit
)))
if x.rowcount != 1:
raise c_exc.NexusVPCAllocNotFound(
switch_ip=n_ip, vpc_id=vpc_id, active=active) | Change active state in vpc_allocate data base. | entailment |
def alloc_vpcid(nexus_ips):
"""Allocate a vpc id for the given list of switch_ips."""
LOG.debug("alloc_vpc() called")
vpc_id = 0
intersect = _get_free_vpcids_on_switches(nexus_ips)
for intersect_tuple in intersect:
try:
update_vpc_entry(nexus_ips, intersect_tuple.vpc_id,
False, True)
vpc_id = intersect_tuple.vpc_id
break
except Exception:
LOG.exception(
"This exception is expected if another controller "
"beat us to vpcid %(vpcid)s for nexus %(ip)s",
{'vpcid': intersect_tuple.vpc_id,
'ip': ', '.join(map(str, nexus_ips))})
return vpc_id | Allocate a vpc id for the given list of switch_ips. | entailment |
def free_vpcid_for_switch_list(vpc_id, nexus_ips):
"""Free a vpc id for the given list of switch_ips."""
LOG.debug("free_vpcid_for_switch_list() called")
if vpc_id != 0:
update_vpc_entry(nexus_ips, vpc_id, False, False) | Free a vpc id for the given list of switch_ips. | entailment |
def free_vpcid_for_switch(vpc_id, nexus_ip):
"""Free a vpc id for the given switch_ip."""
LOG.debug("free_vpcid_for_switch() called")
if vpc_id != 0:
update_vpc_entry([nexus_ip], vpc_id, False, False) | Free a vpc id for the given switch_ip. | entailment |
def delete_vpcid_for_switch(vpc_id, switch_ip):
"""Removes unused vpcid for a switch.
:param vpc_id: vpc id to remove
:param switch_ip: ip address of the switch
"""
LOG.debug("delete_vpcid_for_switch called")
session = bc.get_writer_session()
vpc = _lookup_one_vpc_allocs(vpc_id=vpc_id,
switch_ip=switch_ip,
active=False)
session.delete(vpc)
session.flush() | Removes unused vpcid for a switch.
:param vpc_id: vpc id to remove
:param switch_ip: ip address of the switch | entailment |
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(HostingDeviceSchedulerController(),
cb_faults.FAULT_MAP)
exts.append(extensions.ResourceExtension(CFG_AGENT_HOSTING_DEVICES,
controller, parent))
parent = dict(member_name=ciscohostingdevicemanager.DEVICE,
collection_name=ciscohostingdevicemanager.DEVICES)
controller = resource.Resource(
CfgAgentsHandlingHostingDeviceController(), cb_faults.FAULT_MAP)
exts.append(extensions.ResourceExtension(HOSTING_DEVICE_CFG_AGENTS,
controller, parent,
PATH_PREFIX))
return exts | Returns Ext Resources. | entailment |
def create_process(cmd, root_helper=None, addl_env=None, log_output=True):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
log_output and LOG.info("Running command: %s", cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
return obj, cmd | Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it. | entailment |
def is_intf_up(intf):
"""Function to check if a interface is up. """
intf_path = '/'.join(('/sys/class/net', intf))
intf_exist = os.path.exists(intf_path)
if not intf_exist:
LOG.error("Unable to get interface %(intf)s, Interface dir "
"%(dir)s does not exist",
{'intf': intf, 'dir': intf_path})
return False
try:
oper_file = '/'.join((intf_path, 'operstate'))
with open(oper_file, 'r') as fd:
oper_state = fd.read().strip('\n')
if oper_state == 'up':
return True
except Exception as e:
LOG.error("Exception in reading %s", str(e))
return False | Function to check if a interface is up. | entailment |
def get_all_run_phy_intf():
"""Retrieve all physical interfaces that are operationally up. """
intf_list = []
base_dir = '/sys/class/net'
dir_exist = os.path.exists(base_dir)
if not dir_exist:
LOG.error("Unable to get interface list :Base dir %s does not "
"exist", base_dir)
return intf_list
dir_cont = os.listdir(base_dir)
for subdir in dir_cont:
dev_dir = base_dir + '/' + subdir + '/' + 'device'
dev_exist = os.path.exists(dev_dir)
if dev_exist:
oper_state = is_intf_up(subdir)
if oper_state is True:
intf_list.append(subdir)
else:
LOG.info("Dev dir %s does not exist, not physical intf",
dev_dir)
return intf_list | Retrieve all physical interfaces that are operationally up. | entailment |
def check_vnic_type_and_vendor_info(self, vnic_type, profile):
"""Checks if this vnic_type and vendor device info are supported.
Returns True if:
1. the port vnic_type is direct or macvtap and
2. the vendor_id and product_id of the port is supported by
this MD
Useful in determining if this MD should bind the current
port.
"""
# Check for vnic_type
if vnic_type not in self.supported_sriov_vnic_types:
LOG.info('Non SR-IOV vnic_type: %s.', vnic_type)
return False
if not profile:
return False
# Check for vendor_info
return self._check_for_supported_vendor(profile) | Checks if this vnic_type and vendor device info are supported.
Returns True if:
1. the port vnic_type is direct or macvtap and
2. the vendor_id and product_id of the port is supported by
this MD
Useful in determining if this MD should bind the current
port. | entailment |
def _check_for_supported_vendor(self, profile):
"""Checks if the port belongs to a supported vendor.
Returns True for supported_pci_devs.
"""
vendor_info = profile.get('pci_vendor_info')
if not vendor_info:
return False
if vendor_info not in self.supported_pci_devs:
return False
return True | Checks if the port belongs to a supported vendor.
Returns True for supported_pci_devs. | entailment |
def _import_ucsmsdk(self):
"""Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk.
"""
# Check if SSL certificate checking has been disabled.
# If so, warn the user before proceeding.
if not CONF.ml2_cisco_ucsm.ucsm_https_verify:
LOG.warning(const.SSL_WARNING)
# Monkey patch the UCS sdk version of urllib2 to disable
# https verify if required.
from networking_cisco.ml2_drivers.ucsm import ucs_urllib2
ucsmsdkhandle = importutils.import_module('UcsSdk.UcsHandle')
ucsmsdkhandle.urllib2 = ucs_urllib2
ucsmsdk = importutils.import_module('UcsSdk')
return ucsmsdk | Imports the Ucsm SDK module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of UcsSdk. | entailment |
def _get_server_name(self, handle, service_profile_mo, ucsm_ip):
"""Get the contents of the 'Name' field associated with UCS Server.
When a valid connection hande to UCS Manager is handed in, the Name
field associated with a UCS Server is returned.
"""
try:
resolved_dest = handle.ConfigResolveDn(service_profile_mo.PnDn)
server_list = resolved_dest.OutConfig.GetChild()
if not server_list:
return ""
return server_list[0].Name
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e) | Get the contents of the 'Name' field associated with UCS Server.
When a valid connection hande to UCS Manager is handed in, the Name
field associated with a UCS Server is returned. | entailment |
def _create_ucsm_host_to_service_profile_mapping(self):
"""Reads list of Service profiles and finds associated Server."""
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms)
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
sp_list_temp = handle.ConfigResolveClass('lsServer', None,
inHierarchical=False)
if sp_list_temp and sp_list_temp.OutConfigs is not None:
sp_list = sp_list_temp.OutConfigs.GetChild() or []
for sp in sp_list:
if sp.PnDn:
server_name = self._get_server_name(handle, sp,
ucsm_ip)
if (server_name and not sp.OperSrcTemplName):
LOG.debug('Server %s info retrieved '
'from UCSM %s', server_name, ucsm_ip)
key = (ucsm_ip, server_name)
self.ucsm_sp_dict[key] = str(sp.Dn)
self.ucsm_host_dict[server_name] = ucsm_ip
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e) | Reads list of Service profiles and finds associated Server. | entailment |
def _create_vlanprofile(self, handle, vlan_id, ucsm_ip):
"""Creates VLAN profile to be assosiated with the Port Profile."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
handle.StartTransaction()
vp1 = handle.GetManagedObject(
None,
self.ucsmsdk.FabricLanCloud.ClassId(),
{self.ucsmsdk.FabricLanCloud.DN: const.VLAN_PATH})
if not vp1:
LOG.warning('UCS Manager network driver Vlan Profile '
'path at %s missing', const.VLAN_PATH)
return False
# Create a vlan profile with the given vlan_id
vp2 = handle.AddManagedObject(
vp1,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.COMPRESSION_TYPE:
const.VLAN_COMPRESSION_TYPE,
self.ucsmsdk.FabricVlan.DN: vlan_profile_dest,
self.ucsmsdk.FabricVlan.SHARING: const.NONE,
self.ucsmsdk.FabricVlan.PUB_NW_NAME: "",
self.ucsmsdk.FabricVlan.ID: str(vlan_id),
self.ucsmsdk.FabricVlan.MCAST_POLICY_NAME: "",
self.ucsmsdk.FabricVlan.NAME: vlan_name,
self.ucsmsdk.FabricVlan.DEFAULT_NET: "no"})
handle.CompleteTransaction()
if vp2:
LOG.debug('UCS Manager network driver Created Vlan '
'Profile %s at %s', vlan_name, vlan_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Vlan Profile',
vlan_name, ucsm_ip) | Creates VLAN profile to be assosiated with the Port Profile. | entailment |
def _create_port_profile(self, handle, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans, qos_policy):
"""Creates a Port Profile on the UCS Manager.
Significant parameters set in the port profile are:
1. Port profile name - Should match what was set in vif_details
2. High performance mode - For VM-FEX to be enabled/configured on
the port using this port profile, this mode should be enabled.
3. Vlan id - Vlan id used by traffic to and from the port.
"""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name)
vlan_name = self.make_vlan_name(vlan_id)
vlan_associate_path = (const.PORT_PROFILESETDN +
const.VNIC_PATH_PREFIX + profile_name +
const.VLAN_PATH_PREFIX + vlan_name)
cl_profile_name = const.CLIENT_PROFILE_NAME_PREFIX + str(vlan_id)
cl_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name + const.CLIENT_PROFILE_PATH_PREFIX +
cl_profile_name)
# Remove this Port Profile from the delete DB table if it was
# addded there due to a previous delete.
self.ucsm_db.remove_port_profile_to_delete(profile_name, ucsm_ip)
# Check if direct or macvtap mode
if vnic_type == bc.portbindings.VNIC_DIRECT:
port_mode = const.HIGH_PERF
else:
port_mode = const.NONE
try:
handle.StartTransaction()
port_profile = handle.GetManagedObject(
None,
self.ucsmsdk.VnicProfileSet.ClassId(),
{self.ucsmsdk.VnicProfileSet.DN: const.PORT_PROFILESETDN})
if not port_profile:
LOG.warning('UCS Manager network driver Port Profile '
'path at %s missing',
const.PORT_PROFILESETDN)
return False
# Create a port profile on the UCS Manager
p_profile = handle.AddManagedObject(
port_profile,
self.ucsmsdk.VnicProfile.ClassId(),
{self.ucsmsdk.VnicProfile.NAME: profile_name,
self.ucsmsdk.VnicProfile.POLICY_OWNER: "local",
self.ucsmsdk.VnicProfile.NW_CTRL_POLICY_NAME: "",
self.ucsmsdk.VnicProfile.PIN_TO_GROUP_NAME: "",
self.ucsmsdk.VnicProfile.DN: port_profile_dest,
self.ucsmsdk.VnicProfile.DESCR: const.DESCR,
self.ucsmsdk.VnicProfile.QOS_POLICY_NAME: qos_policy,
self.ucsmsdk.VnicProfile.HOST_NW_IOPERF: port_mode,
self.ucsmsdk.VnicProfile.MAX_PORTS: const.MAX_PORTS})
if not p_profile:
LOG.warning('UCS Manager network driver could not '
'create Port Profile %s.', profile_name)
return False
LOG.debug('UCS Manager network driver associating Vlan '
'Profile with Port Profile at %s',
vlan_associate_path)
# Associate port profile with vlan profile
mo = handle.AddManagedObject(
p_profile,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "yes"}, True)
if not mo:
LOG.warning('UCS Manager network driver cannot '
'associate Vlan Profile to Port '
'Profile %s', profile_name)
return False
LOG.debug('UCS Manager network driver created Port Profile %s '
'at %s', profile_name, port_profile_dest)
# For Multi VLAN trunk support
if trunk_vlans:
for vlan in trunk_vlans:
vlan_name = self.make_vlan_name(vlan)
vlan_associate_path = (const.PORT_PROFILESETDN +
const.VNIC_PATH_PREFIX + profile_name +
const.VLAN_PATH_PREFIX + vlan_name)
# Associate port profile with vlan profile
# for the trunk vlans
mo = handle.AddManagedObject(
p_profile,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_associate_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not mo:
LOG.warning('UCS Manager network driver cannot '
'associate Vlan %(vlan)d to Port '
'Profile %(profile)s',
{'vlan': vlan, 'profile': profile_name})
cl_profile = handle.AddManagedObject(
p_profile,
self.ucsmsdk.VmVnicProfCl.ClassId(),
{self.ucsmsdk.VmVnicProfCl.ORG_PATH: ".*",
self.ucsmsdk.VmVnicProfCl.DN: cl_profile_dest,
self.ucsmsdk.VmVnicProfCl.NAME: cl_profile_name,
self.ucsmsdk.VmVnicProfCl.POLICY_OWNER: "local",
self.ucsmsdk.VmVnicProfCl.SW_NAME: ".*",
self.ucsmsdk.VmVnicProfCl.DC_NAME: ".*",
self.ucsmsdk.VmVnicProfCl.DESCR: const.DESCR})
handle.CompleteTransaction()
if not cl_profile:
LOG.warning('UCS Manager network driver could not '
'create Client Profile %s.',
cl_profile_name)
return False
LOG.debug('UCS Manager network driver created Client Profile '
'%s at %s', cl_profile_name, cl_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Port Profile',
profile_name, ucsm_ip) | Creates a Port Profile on the UCS Manager.
Significant parameters set in the port profile are:
1. Port profile name - Should match what was set in vif_details
2. High performance mode - For VM-FEX to be enabled/configured on
the port using this port profile, this mode should be enabled.
3. Vlan id - Vlan id used by traffic to and from the port. | entailment |
def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id,
trunk_vlans):
"""Top level method to create Port Profiles on the UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in the creation of the Port Profile on the UCS
Manager.
"""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', str(vlan_id))
return False
if trunk_vlans:
for vlan in trunk_vlans:
if not self._create_vlanprofile(handle, vlan, ucsm_ip):
LOG.error('UCS Manager network driver failed to '
'create Vlan Profile for vlan %s', vlan)
return False
qos_policy = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sriov_qos_policy
if qos_policy:
LOG.debug('UCS Manager Network driver applying QoS Policy '
'%(qos)s to Port Profile %(port_profile)s',
{'qos': qos_policy, 'port_profile': profile_name})
# Create Port Profile
if not self._create_port_profile(handle, profile_name,
vlan_id, vnic_type,
ucsm_ip, trunk_vlans,
qos_policy):
LOG.error('UCS Manager network driver failed to create '
'Port Profile %s', profile_name)
return False
return True | Top level method to create Port Profiles on the UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in the creation of the Port Profile on the UCS
Manager. | entailment |
def _update_service_profile(self, handle, service_profile,
vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
vlan_name = self.make_vlan_name(vlan_id)
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: service_profile})
if not obj:
LOG.debug('UCS Manager network driver could not find '
'Service Profile %s in UCSM %s',
service_profile, ucsm_ip)
return False
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path}, True)
if eth:
vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX +
vlan_name)
eth_if = handle.AddManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not eth_if:
LOG.debug('UCS Manager network driver could not '
'update Service Profile %s with vlan %d',
service_profile, vlan_id)
return False
else:
LOG.debug('UCS Manager network driver did not find '
'ethernet port at %s', eth_port_path)
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile',
vlan_name, ucsm_ip) | Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in. | entailment |
def update_serviceprofile(self, host_id, vlan_id):
"""Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
"""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id))
if service_profile:
LOG.debug('UCS Manager network driver Service Profile : %s',
service_profile)
else:
LOG.info('UCS Manager network driver does not support '
'Host_id %s', host_id)
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', str(vlan_id))
return False
# Update Service Profile
if not self._update_service_profile(handle,
service_profile,
vlan_id,
ucsm_ip):
LOG.error('UCS Manager network driver failed to update '
'Service Profile %(service_profile)s in UCSM '
'%(ucsm_ip)s',
{'service_profile': service_profile, 'ucsm_ip': ucsm_ip})
return False
return True | Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports. | entailment |
def update_vnic_template(self, host_id, vlan_id, physnet,
vnic_template_path, vnic_template):
"""Updates VNIC Template with the vlan_id."""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
vlan_name = self.make_vlan_name(vlan_id)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', vlan_id)
return False
try:
LOG.debug('VNIC Template Path: %s', vnic_template_path)
vnic_template_full_path = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
LOG.debug('VNIC Template Path: %s for physnet %s',
vnic_template_full_path, physnet)
handle.StartTransaction()
mo = handle.GetManagedObject(
None,
self.ucsmsdk.VnicLanConnTempl.ClassId(),
{self.ucsmsdk.VnicLanConnTempl.DN:
vnic_template_full_path}, True)
if not mo:
LOG.error('UCS Manager network driver could '
'not find VNIC template %s',
vnic_template_full_path)
return False
vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX +
vlan_name)
LOG.debug('VNIC Template VLAN path: %s', vlan_dn)
eth_if = handle.AddManagedObject(mo,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_dn,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not eth_if:
LOG.error('UCS Manager network driver could '
'not add VLAN %(vlan_name)s to VNIC '
'template %(vnic_template_full_path)s',
{'vlan_name': vlan_name,
'vnic_template_full_path': vnic_template_full_path})
return False
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'VNIC Template',
vlan_id, ucsm_ip) | Updates VNIC Template with the vlan_id. | entailment |
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.DN: vlan_profile_dest})
if obj:
handle.RemoveManagedObject(obj)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | Deletes VLAN Profile from UCS Manager. | entailment |
def _delete_port_profile_from_ucsm(self, handle, port_profile, ucsm_ip):
"""Deletes Port Profile from UCS Manager."""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
port_profile)
handle.StartTransaction()
# Find port profile on the UCS Manager
p_profile = handle.GetManagedObject(
None,
self.ucsmsdk.VnicProfile.ClassId(),
{self.ucsmsdk.VnicProfile.NAME: port_profile,
self.ucsmsdk.VnicProfile.DN: port_profile_dest})
if p_profile:
handle.RemoveManagedObject(p_profile)
else:
LOG.warning('UCS Manager network driver did not find '
'Port Profile %s to delete.',
port_profile)
handle.CompleteTransaction() | Deletes Port Profile from UCS Manager. | entailment |
def _delete_port_profile(self, handle, port_profile, ucsm_ip):
"""Calls method to delete Port Profile from UCS Manager.
If exception is raised by UCSM, then the PP is added to
a DB table. The delete timer thread, tried to delete all
PPs added to this table when it wakes up.
"""
try:
self._delete_port_profile_from_ucsm(handle, port_profile, ucsm_ip)
except Exception as e:
# Add the Port Profile that we could not delete to the Port Profile
# delete table. A periodic task will attempt to delete it.
LOG.debug('Received Port Profile delete exception %s', e)
self.ucsm_db.add_port_profile_to_delete_table(port_profile,
ucsm_ip) | Calls method to delete Port Profile from UCS Manager.
If exception is raised by UCSM, then the PP is added to
a DB table. The delete timer thread, tried to delete all
PPs added to this table when it wakes up. | entailment |
def _remove_vlan_from_all_service_profiles(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile config from server's ethernet ports."""
service_profile_list = []
for key, value in six.iteritems(self.ucsm_sp_dict):
if (ucsm_ip in key) and value:
service_profile_list.append(value)
if not service_profile_list:
# Nothing to do
return
try:
handle.StartTransaction()
for service_profile in service_profile_list:
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
# 1. From the Service Profile config, access the
# configuration for its ports.
# 2. Check if that Vlan has been configured on each port
# 3. If Vlan config found, remove it.
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: service_profile})
if obj:
# Check if this vlan_id has been configured on the
# ports in this Service profile
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path},
True)
if eth:
vlan_name = self.make_vlan_name(vlan_id)
vlan_path = eth_port_path + "/if-" + vlan_name
vlan = handle.GetManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path})
if vlan:
# Found vlan config. Now remove it.
handle.RemoveManagedObject(vlan)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN Profile config from server's ethernet ports. | entailment |
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN config from all SP Templates that have it."""
sp_template_info_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values())
vlan_name = self.make_vlan_name(vlan_id)
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
try:
handle.StartTransaction()
# sp_template_info_list is a list of tuples.
# Each tuple is of the form :
# (ucsm_ip, sp_template_path, sp_template)
for sp_template_info in sp_template_info_list:
sp_template_path = sp_template_info.path
sp_template = sp_template_info.name
sp_template_full_path = (sp_template_path +
const.SP_TEMPLATE_PREFIX + sp_template)
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: sp_template_full_path})
if not obj:
LOG.error('UCS Manager network driver could not '
'find Service Profile template %s',
sp_template_full_path)
continue
eth_port_paths = ["%s%s" % (sp_template_full_path, ep)
for ep in virtio_port_list]
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path}, True)
if eth:
vlan_path = (eth_port_path +
const.VLAN_PATH_PREFIX + vlan_name)
vlan = handle.GetManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path})
if vlan:
# Found vlan config. Now remove it.
handle.RemoveManagedObject(vlan)
else:
LOG.debug('UCS Manager network driver did not '
'find VLAN %s at %s', vlan_name, eth_port_path)
else:
LOG.debug('UCS Manager network driver did not '
'find ethernet port at %s', eth_port_path)
handle.CompleteTransaction()
return True
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN config from all SP Templates that have it. | entailment |
def _remove_vlan_from_vnic_templates(self, handle, vlan_id, ucsm_ip):
"""Removes VLAN from all VNIC templates that have it enabled."""
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
vnic_template_info = ucsm.vnic_template_list.values()
vlan_name = self.make_vlan_name(vlan_id)
if not vnic_template_info:
# Nothing to do
return
try:
handle.StartTransaction()
for temp_info in vnic_template_info:
vnic_template = temp_info.template
vnic_template_path = temp_info.path
vnic_template_full_path = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
LOG.debug('vnic_template_full_path: %s',
vnic_template_full_path)
mo = handle.GetManagedObject(
None,
self.ucsmsdk.VnicLanConnTempl.ClassId(),
{self.ucsmsdk.VnicLanConnTempl.DN: (
vnic_template_full_path)},
True)
if not mo:
LOG.error('UCS Manager network driver could '
'not find VNIC template %s at',
vnic_template_full_path)
continue
vlan_dn = (vnic_template_full_path +
const.VLAN_PATH_PREFIX + vlan_name)
LOG.debug('VNIC Template VLAN path; %s', vlan_dn)
eth_if = handle.GetManagedObject(mo,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_dn})
if not eth_if:
LOG.error('UCS Manager network driver could not '
'delete VLAN %(vlan_name)s from VNIC '
'template %(vnic_template_full_path)s',
{'vlan_name': vlan_name,
'vnic_template_full_path':
vnic_template_full_path})
if eth_if:
handle.RemoveManagedObject(eth_if)
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'VNIC Template',
vlan_id, ucsm_ip) | Removes VLAN from all VNIC templates that have it enabled. | entailment |
def delete_all_config_for_vlan(self, vlan_id, port_profile,
trunk_vlans):
"""Top level method to delete all config for vlan_id."""
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms)
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
LOG.debug('Deleting config for VLAN %d from UCSM %s', vlan_id,
ucsm_ip)
if (port_profile):
self._delete_port_profile(handle, port_profile, ucsm_ip)
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
if ucsm.sp_template_list:
self._remove_vlan_from_all_sp_templates(handle,
vlan_id,
ucsm_ip)
if ucsm.vnic_template_list:
self._remove_vlan_from_vnic_templates(handle,
vlan_id,
ucsm_ip)
if not (ucsm.sp_template_list and
ucsm.vnic_template_list):
self._remove_vlan_from_all_service_profiles(handle,
vlan_id,
ucsm_ip)
self._delete_vlan_profile(handle, vlan_id, ucsm_ip)
if trunk_vlans:
for vlan_id in trunk_vlans:
self._delete_vlan_profile(handle, vlan_id, ucsm_ip) | Top level method to delete all config for vlan_id. | entailment |
def ucs_manager_disconnect(self, handle, ucsm_ip):
"""Disconnects from the UCS Manager.
After the disconnect, the handle associated with this connection
is no longer valid.
"""
try:
handle.Logout()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmDisconnectFailed(ucsm_ip=ucsm_ip, exc=e) | Disconnects from the UCS Manager.
After the disconnect, the handle associated with this connection
is no longer valid. | entailment |
def add_events(self, **kwargs):
"""Add failure event into the queue."""
event_q = kwargs.get('event_queue')
pri = kwargs.get('priority')
if not event_q or not pri:
return
try:
event_type = 'server.failure.recovery'
payload = {}
timestamp = time.ctime()
data = (event_type, payload)
event_q.put((pri, timestamp, data))
LOG.debug('Added failure recovery event to the queue.')
except Exception as exc:
LOG.exception('Error: %(exc)s for event %(event)s',
{'exc': str(exc), 'event': event_type})
raise exc | Add failure event into the queue. | entailment |
def failure_recovery(self, fail_info):
"""Failure recovery task.
In case of failure in projects, network and VM create/delete, this
task goes through all failure cases and try the request.
"""
# Read failed entries from project database and send request
# (create/delete - depends on failure type) to DCNM
# 1. Try failure recovery for create project.
LOG.info("Started failure_recovery.")
projs = self.get_fialed_projects_entries(constants.CREATE_FAIL)
for proj in projs:
LOG.debug("Failure recovery for project %(name)s.", (
{'name': proj.name}))
# Try to create the project in DCNM
try:
self.dcnm_client.create_project(self.cfg.dcnm.orchestrator_id,
proj.name,
self.cfg.dcnm.
default_partition_name,
proj.dci_id)
except dexc.DfaClientRequestFailed as e:
LOG.error("failure_recovery: Failed to create %(proj)s "
"on DCNM : %(reason)s",
{'proj': proj.name, 'reason': str(e)})
else:
# Request is sent successfully, update the database.
self.update_project_info_cache(proj.id, dci_id=proj.dci_id,
name=proj.name,
opcode='update')
LOG.debug('Success on failure recovery for '
'project %(name)s', {'name': proj.name})
# 1.1 Try failure recovery for update project.
projs = self.get_fialed_projects_entries(constants.UPDATE_FAIL)
for proj in projs:
LOG.debug("Failure recovery for project %(name)s.", (
{'name': proj.name}))
# This was failure of updating DCI id of the project in DCNM.
try:
self.dcnm_client.update_project(proj.name,
self.cfg.dcnm.
default_partition_name,
proj.dci_id)
except dexc.DfaClientRequestFailed as exc:
LOG.error("failure_recovery: Failed to update %(proj)s "
"on DCNM : %(reason)s",
{'proj': proj.name, 'reason': str(exc)})
else:
# Request is sent successfully, update the database.
self.update_project_info_cache(proj.id,
dci_id=proj.dci_id,
name=proj.name,
opcode='update')
LOG.debug('Success on failure recovery update for '
'project %(name)s', {'name': proj.name})
# 2. Try failure recovery for create network.
nets = self.get_all_networks()
for net in nets:
if (net.result == constants.CREATE_FAIL and
net.source.lower() == 'openstack'):
net_id = net.network_id
try:
subnets = self.neutron_event.nclient.list_subnets(
network_id=net_id).get('subnets')
except dexc.ConnectionFailed:
LOG.exception('Failed to get subnets list.')
continue
for subnet in subnets:
tenant_name = self.get_project_name(subnet['tenant_id'])
snet = utils.Dict2Obj(subnet)
try:
# Check if config_profile is not NULL.
if not net.config_profile:
cfgp, fwd_mod = (
self.dcnm_client.
get_config_profile_for_network(net.name))
net.config_profile = cfgp
net.fwd_mod = fwd_mod
self.dcnm_client.create_network(tenant_name, net, snet,
self.dcnm_dhcp)
except dexc.DfaClientRequestFailed:
# Still is failure, only log the error.
LOG.error('Failed to create network %(net)s.',
{'net': net.name})
else:
# Request is sent to DCNM, update the database
params = dict(
columns=dict(config_profile=net.config_profile,
fwd_mod=net.fwd_mod,
result=constants.RESULT_SUCCESS))
self.update_network(net_id, **params)
LOG.debug("Success on failure recovery to create "
"%(net)s", {'net': net.name})
# 3. Try Failure recovery for VM create and delete.
instances = self.get_vms()
for vm in instances:
vm_info = dict(status=vm.status,
vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host,
port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
if vm.result == constants.CREATE_FAIL:
try:
self.neutron_event.send_vm_info(str(vm.host), str(vm_info))
except Exception as e:
# Failed to send info to the agent. Keep the data in the
# database as failure to send it later.
LOG.error('Failed to send VM info to agent. '
'Reason %s', str(e))
else:
params = dict(columns=dict(
result=constants.RESULT_SUCCESS))
self.update_vm_db(vm.port_id, **params)
LOG.info('Created VM %(vm)s.', {'vm': vm.name})
for vm in instances:
if vm.result == constants.DELETE_FAIL:
vm_info['status'] = 'down'
try:
self.neutron_event.send_vm_info(str(vm.host), str(vm_info))
except Exception as e:
LOG.error('Failed to send VM info to agent. '
'Reason %s', str(e))
else:
self.delete_vm_db(vm.port_id)
LOG.info('Deleted VM %(vm)s from DB.',
{'vm': vm.name})
# 4. Try failure recovery for delete network.
for net in nets:
if (net.result == constants.DELETE_FAIL and
net.source.lower() == 'openstack'):
net_id = net.network_id
segid = net.segmentation_id
tenant_name = self.get_project_name(net.tenant_id)
try:
self.dcnm_client.delete_network(tenant_name, net)
except dexc.DfaClientRequestFailed:
# Still is failure, only log the error.
LOG.error('Failed to delete network %(net)s.',
{'net': net.name})
else:
# Request is sent to DCNM, delete the entry
# from database and return the segmentation id to the
# pool.
self.delete_network_db(net_id)
self.segmentation_pool.add(segid)
LOG.debug("Success on failure recovery to deleted "
"%(net)s", {'net': net.name})
# 5. Try failure recovery for delete project.
projs = self.get_fialed_projects_entries(constants.DELETE_FAIL)
for proj in projs:
LOG.debug("Failure recovery for project %(name)s.", (
{'name': proj.name}))
# Try to delete the project in DCNM
try:
self.dcnm_client.delete_project(proj.name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed as e:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failure recovery is failed to delete "
"%(project)s on DCNM : %(reason)s",
{'project': proj.name, 'reason': str(e)})
else:
# Delete was successful, now update the database.
self.update_project_info_cache(proj.id, opcode='delete')
LOG.debug("Success on failure recovery to deleted "
"%(project)s", {'project': proj.name})
# 6. Do failure recovery for Firewall service
self.fw_retry_failures()
# 7. DHCP port consistency check for HA.
if self.need_dhcp_check():
nets = self.get_all_networks()
for net in nets:
net_id = net.network_id
LOG.debug("dhcp consistency check for net id %s", net_id)
self.correct_dhcp_ports(net_id)
self.decrement_dhcp_check()
LOG.info("Finished failure_recovery.") | Failure recovery task.
In case of failure in projects, network and VM create/delete, this
task goes through all failure cases and try the request. | entailment |
def init_params(self, protocol_interface, phy_interface):
"""Initializing parameters. """
self.lldp_cfgd = False
self.local_intf = protocol_interface
self.phy_interface = phy_interface
self.remote_evb_cfgd = False
self.remote_evb_mode = None
self.remote_mgmt_addr = None
self.remote_system_desc = None
self.remote_system_name = None
self.remote_port = None
self.remote_chassis_id_mac = None
self.remote_port_id_mac = None
self.local_evb_cfgd = False
self.local_evb_mode = None
self.local_mgmt_address = None
self.local_system_desc = None
self.local_system_name = None
self.local_port = None
self.local_chassis_id_mac = None
self.local_port_id_mac = None
self.db_retry_status = False
self.topo_send_cnt = 0
self.bond_interface = None
self.bond_member_ports = None | Initializing parameters. | entailment |
def cmp_update_bond_intf(self, bond_interface):
"""Update the bond interface and its members.
Update the bond interface, if this interface is a part of bond
Return True if there's a change.
"""
if bond_interface != self.bond_interface:
self.bond_interface = bond_interface
self.bond_member_ports = sys_utils.get_member_ports(bond_interface)
return True
return False | Update the bond interface and its members.
Update the bond interface, if this interface is a part of bond
Return True if there's a change. | entailment |
def remote_evb_mode_uneq_store(self, remote_evb_mode):
"""Saves the EVB mode, if it is not the same as stored. """
if remote_evb_mode != self.remote_evb_mode:
self.remote_evb_mode = remote_evb_mode
return True
return False | Saves the EVB mode, if it is not the same as stored. | entailment |
def remote_evb_cfgd_uneq_store(self, remote_evb_cfgd):
"""This saves the EVB cfg, if it is not the same as stored. """
if remote_evb_cfgd != self.remote_evb_cfgd:
self.remote_evb_cfgd = remote_evb_cfgd
return True
return False | This saves the EVB cfg, if it is not the same as stored. | entailment |
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr):
"""This function saves the MGMT address, if different from stored. """
if remote_mgmt_addr != self.remote_mgmt_addr:
self.remote_mgmt_addr = remote_mgmt_addr
return True
return False | This function saves the MGMT address, if different from stored. | entailment |
def remote_sys_desc_uneq_store(self, remote_system_desc):
"""This function saves the system desc, if different from stored. """
if remote_system_desc != self.remote_system_desc:
self.remote_system_desc = remote_system_desc
return True
return False | This function saves the system desc, if different from stored. | entailment |
def remote_sys_name_uneq_store(self, remote_system_name):
"""This function saves the system name, if different from stored. """
if remote_system_name != self.remote_system_name:
self.remote_system_name = remote_system_name
return True
return False | This function saves the system name, if different from stored. | entailment |
def remote_port_uneq_store(self, remote_port):
"""This function saves the port, if different from stored. """
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False | This function saves the port, if different from stored. | entailment |
def remote_chassis_id_mac_uneq_store(self, remote_chassis_id_mac):
"""This function saves the Chassis MAC, if different from stored. """
if remote_chassis_id_mac != self.remote_chassis_id_mac:
self.remote_chassis_id_mac = remote_chassis_id_mac
return True
return False | This function saves the Chassis MAC, if different from stored. | entailment |
def remote_port_id_mac_uneq_store(self, remote_port_id_mac):
"""This function saves the port MAC, if different from stored. """
if remote_port_id_mac != self.remote_port_id_mac:
self.remote_port_id_mac = remote_port_id_mac
return True
return False | This function saves the port MAC, if different from stored. | entailment |
def get_lldp_status(cls, intf):
"""Retrieves the LLDP status. """
if intf not in cls.topo_intf_obj_dict:
LOG.error("Interface %s not configured at all", intf)
return False
intf_obj = cls.topo_intf_obj_dict.get(intf)
return intf_obj.get_lldp_status() | Retrieves the LLDP status. | entailment |
def _init_cfg_interfaces(self, cb, intf_list=None, all_intf=True):
"""Configure the interfaces during init time. """
if not all_intf:
self.intf_list = intf_list
else:
self.intf_list = sys_utils.get_all_run_phy_intf()
self.cb = cb
self.intf_attr = {}
self.cfg_lldp_interface_list(self.intf_list) | Configure the interfaces during init time. | entailment |
def cfg_intf(self, protocol_interface, phy_interface=None):
"""Called by application to add an interface to the list. """
self.intf_list.append(protocol_interface)
self.cfg_lldp_interface(protocol_interface, phy_interface) | Called by application to add an interface to the list. | entailment |
def create_attr_obj(self, protocol_interface, phy_interface):
"""Creates the local interface attribute object and stores it. """
self.intf_attr[protocol_interface] = TopoIntfAttr(
protocol_interface, phy_interface)
self.store_obj(protocol_interface, self.intf_attr[protocol_interface]) | Creates the local interface attribute object and stores it. | entailment |
def cmp_store_tlv_params(self, intf, tlv_data):
"""Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different.
"""
flag = False
attr_obj = self.get_attr_obj(intf)
remote_evb_mode = self.pub_lldp.get_remote_evb_mode(tlv_data)
if attr_obj.remote_evb_mode_uneq_store(remote_evb_mode):
flag = True
remote_evb_cfgd = self.pub_lldp.get_remote_evb_cfgd(tlv_data)
if attr_obj.remote_evb_cfgd_uneq_store(remote_evb_cfgd):
flag = True
remote_mgmt_addr = self.pub_lldp.get_remote_mgmt_addr(tlv_data)
if attr_obj.remote_mgmt_addr_uneq_store(remote_mgmt_addr):
flag = True
remote_sys_desc = self.pub_lldp.get_remote_sys_desc(tlv_data)
if attr_obj.remote_sys_desc_uneq_store(remote_sys_desc):
flag = True
remote_sys_name = self.pub_lldp.get_remote_sys_name(tlv_data)
if attr_obj.remote_sys_name_uneq_store(remote_sys_name):
flag = True
remote_port = self.pub_lldp.get_remote_port(tlv_data)
if attr_obj.remote_port_uneq_store(remote_port):
flag = True
remote_chassis_id_mac = self.pub_lldp.\
get_remote_chassis_id_mac(tlv_data)
if attr_obj.remote_chassis_id_mac_uneq_store(remote_chassis_id_mac):
flag = True
remote_port_id_mac = self.pub_lldp.get_remote_port_id_mac(tlv_data)
if attr_obj.remote_port_id_mac_uneq_store(remote_port_id_mac):
flag = True
return flag | Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different. | entailment |
def cfg_lldp_interface(self, protocol_interface, phy_interface=None):
"""Cfg LLDP on interface and create object. """
if phy_interface is None:
phy_interface = protocol_interface
self.create_attr_obj(protocol_interface, phy_interface)
ret = self.pub_lldp.enable_lldp(protocol_interface)
attr_obj = self.get_attr_obj(protocol_interface)
attr_obj.update_lldp_status(ret) | Cfg LLDP on interface and create object. | entailment |
def periodic_discovery_task(self):
"""Periodic task that checks the interface TLV attributes. """
try:
self._periodic_task_int()
except Exception as exc:
LOG.error("Exception caught in periodic discovery task %s",
str(exc)) | Periodic task that checks the interface TLV attributes. | entailment |
def _check_bond_interface_change(self, phy_interface, attr_obj):
"""Check if there's any change in bond interface.
First check if the interface passed itself is a bond-interface and then
retrieve the member list and compare.
Next, check if the interface passed is a part of the bond interface and
then retrieve the member list and compare.
"""
bond_phy = sys_utils.get_bond_intf(phy_interface)
if sys_utils.is_intf_bond(phy_interface):
bond_intf = phy_interface
else:
bond_intf = bond_phy
# This can be an addition or removal of the interface to a bond.
bond_intf_change = attr_obj.cmp_update_bond_intf(bond_intf)
return bond_intf_change | Check if there's any change in bond interface.
First check if the interface passed itself is a bond-interface and then
retrieve the member list and compare.
Next, check if the interface passed is a part of the bond interface and
then retrieve the member list and compare. | entailment |
def _periodic_task_int(self):
"""Internal periodic discovery task routine to check TLV attributes.
This routine retrieves the LLDP TLC's on all its configured interfaces.
If the retrieved TLC is different than the stored TLV, it invokes the
callback.
"""
for intf in self.intf_list:
attr_obj = self.get_attr_obj(intf)
status = attr_obj.get_lldp_status()
if not status:
ret = self.pub_lldp.enable_lldp(intf)
attr_obj.update_lldp_status(ret)
continue
bond_intf_change = self._check_bond_interface_change(
attr_obj.get_phy_interface(), attr_obj)
tlv_data = self.pub_lldp.get_lldp_tlv(intf)
# This should take care of storing the information of interest
if self.cmp_store_tlv_params(intf, tlv_data) or (
attr_obj.get_db_retry_status() or bond_intf_change or (
attr_obj.get_topo_disc_send_cnt() > (
constants.TOPO_DISC_SEND_THRESHOLD))):
# Passing the interface attribute object to CB
ret = self.cb(intf, attr_obj)
status = not ret
attr_obj.store_db_retry_status(status)
attr_obj.reset_topo_disc_send_cnt()
else:
attr_obj.incr_topo_disc_send_cnt() | Internal periodic discovery task routine to check TLV attributes.
This routine retrieves the LLDP TLC's on all its configured interfaces.
If the retrieved TLC is different than the stored TLV, it invokes the
callback. | entailment |
def _get_cookie(self, mgmt_ip, config, refresh=False):
"""Performs authentication and retries cookie."""
if mgmt_ip not in self.credentials:
return None
security_data = self.credentials[mgmt_ip]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
if not refresh and security_data[const.COOKIE_TUPLE]:
return security_data[const.COOKIE_TUPLE], verify
payload = {"aaaUser": {"attributes": {
"name": security_data[const.UNAME_TUPLE],
"pwd": security_data[const.PW_TUPLE]}}}
headers = {"Content-type": "application/json", "Accept": "text/plain"}
url = "{0}://{1}/api/aaaLogin.json".format(DEFAULT_SCHEME, mgmt_ip)
try:
response = self.session.request('POST',
url,
data=jsonutils.dumps(payload),
headers=headers,
verify=verify,
timeout=self.timeout * 2)
except Exception as e:
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e)
self.status = response.status_code
if response.status_code == requests.codes.OK:
cookie = response.headers.get('Set-Cookie')
security_data = (
security_data[const.UNAME_TUPLE:const.COOKIE_TUPLE] +
(cookie,))
self.credentials[mgmt_ip] = security_data
return cookie, verify
else:
e = "REST API connect returned Error code: "
e += str(self.status)
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e) | Performs authentication and retries cookie. | entailment |
def send_request(self, method, action, body=None,
headers=None, ipaddr=None):
"""Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
the server in plain text format.
Exception is raised when server replies with an INTERNAL SERVER ERROR
status code (500) i.e. an error has occurred on the server or SERVICE
UNAVAILABLE (404) i.e. server is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict of arguments which are sent as part of the request
:param headers: header for the HTTP request
:param server_ip: server_ip for the HTTP request.
:returns: JSON or plain text in HTTP response
"""
action = ''.join([self.scheme, '://%s/', action])
if netaddr.valid_ipv6(ipaddr):
# Enclose IPv6 address in [] in the URL
action = action % ("[%s]" % ipaddr)
else:
# IPv4 address
action = action % ipaddr
config = action + " : " + body if body else action
# if cookie needed and one not previously created
if self.request_cookie:
cookie, verify = self._get_cookie(ipaddr, config)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
else:
if ipaddr not in self.credentials:
raise cexc.NexusCredentialNotFound(switch_ip=ipaddr)
else:
headers = {'Content-Type': 'application/json'}
security_data = self.credentials[ipaddr]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
self.session.auth = (security_data[0], security_data[1])
if self.status != requests.codes.OK:
return {}
for attempt in range(self.max_retries + 1):
try:
LOG.debug("[Nexus %(ipaddr)s attempt %(id)s]: Connecting.." %
{"ipaddr": ipaddr, "id": attempt})
response = self.session.request(
method,
action,
data=body,
headers=headers,
verify=verify,
timeout=self.timeout)
if (self.request_cookie and
response.status_code in CREDENTIAL_EXPIRED):
# if need new cookie
cookie, verify = self._get_cookie(
ipaddr, config, refresh=True)
headers = {"Content-type": "application/json",
"Accept": "text/plain", "Cookie": cookie}
continue
except Exception as e:
LOG.error(
"Exception raised %(err)s for Rest/NXAPI %(cfg)s",
{'err': str(e), 'cfg': config})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=e)
else:
break
status_string = requests.status_codes._codes[response.status_code][0]
if response.status_code in self.accepted_codes:
LOG.debug(
"Good status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
# 'text/json' used with nxapi else application/json with restapi
output = {}
if ('application/json' in response.headers['content-type'] or
'text/json' in response.headers['content-type']):
try:
output = response.json()
except Exception as e:
LOG.exception(
"Unexpected error encountered extracting "
"json body from response.")
if 'ins_api' in output:
# do special nxapi response handling
try:
cli_resp = output['ins_api']['outputs']['output']
except Exception:
cli_resp = []
# Check results for each command
for cli in cli_resp:
try:
status = int((cli['code']))
except ValueError:
status = 'bad_status %s' % cli['code']
if status not in self.accepted_codes:
excpt = "ins_api CLI failure occurred "
"with cli return code %s" % str(status)
raise cexc.NexusConfigFailed(
nexus_host=ipaddr, config=config,
exc=excpt)
return output
else:
LOG.error(
"Bad status %(status)s(%(code)d) returned for %(url)s",
{'status': status_string,
'code': response.status_code,
'url': action})
LOG.error("Response text: %(txt)s",
{'txt': response.text})
raise cexc.NexusConfigFailed(nexus_host=ipaddr,
config=config,
exc=response.text) | Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
the server in plain text format.
Exception is raised when server replies with an INTERNAL SERVER ERROR
status code (500) i.e. an error has occurred on the server or SERVICE
UNAVAILABLE (404) i.e. server is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict of arguments which are sent as part of the request
:param headers: header for the HTTP request
:param server_ip: server_ip for the HTTP request.
:returns: JSON or plain text in HTTP response | entailment |
def _initialize_trunk_interfaces_to_none(self, switch_ip, replay=True):
"""Initialize all nexus interfaces to trunk allowed none."""
try:
# The following determines if the switch interfaces are
# in place. If so, make sure they have a basic trunk
# configuration applied to none.
switch_ifs = self._mdriver._get_switch_interfaces(
switch_ip, cfg_only=(False if replay else True))
if not switch_ifs:
LOG.debug("Skipping switch %s which has no configured "
"interfaces",
switch_ip)
return
self._driver.initialize_all_switch_interfaces(
switch_ifs, switch_ip)
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning("Unable to initialize interfaces to "
"switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay init_interface')
if self._mdriver.is_replay_enabled():
return | Initialize all nexus interfaces to trunk allowed none. | entailment |
def replay_config(self, switch_ip):
"""Sends pending config data in OpenStack to Nexus."""
LOG.debug("Replaying config for switch ip %(switch_ip)s",
{'switch_ip': switch_ip})
# Before replaying all config, initialize trunk interfaces
# to none as required. If this fails, the switch may not
# be up all the way. Quit and retry later.
try:
self._initialize_trunk_interfaces_to_none(switch_ip)
except Exception:
return
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
# If configured to set global VXLAN values and
# there exists VXLAN data base entries, then configure
# the "interface nve" entry on the switch.
if (len(nve_bindings) > 0 and
cfg.CONF.ml2_cisco.vxlan_global_config):
LOG.debug("Nexus: Replay NVE Interface")
loopback = self._mdriver.get_nve_loopback(switch_ip)
self._driver.enable_vxlan_feature(switch_ip,
const.NVE_INT_NUM, loopback)
for x in nve_bindings:
try:
self._driver.create_nve_member(switch_ip,
const.NVE_INT_NUM, x.vni, x.mcast_group)
except Exception as e:
LOG.error("Failed to configure nve_member for "
"switch %(switch_ip)s, vni %(vni)s"
"Reason:%(reason)s ",
{'switch_ip': switch_ip, 'vni': x.vni,
'reason': e})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay create_nve_member')
return
try:
port_bindings = nxos_db.get_nexusport_switch_bindings(switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warning("No port entries found for switch ip "
"%(switch_ip)s during replay.",
{'switch_ip': switch_ip})
return
try:
self._mdriver.configure_switch_entries(
switch_ip, port_bindings)
except Exception as e:
LOG.error("Unexpected exception while replaying "
"entries for switch %(switch_ip)s, Reason:%(reason)s ",
{'switch_ip': switch_ip, 'reason': e})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay switch_entries') | Sends pending config data in OpenStack to Nexus. | entailment |
def check_connections(self):
"""Check connection between OpenStack to Nexus device."""
switch_connections = self._mdriver.get_all_switch_ips()
for switch_ip in switch_connections:
state = self._mdriver.get_switch_ip_and_active_state(switch_ip)
config_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
contact_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.debug("check_connections() thread %(thid)d, switch "
"%(switch_ip)s state %(state)s "
"contact_failure %(contact_failure)d "
"config_failure %(config_failure)d ",
{'thid': threading.current_thread().ident,
'switch_ip': switch_ip, 'state': state,
'contact_failure': contact_failure,
'config_failure': config_failure})
try:
# Send a simple get nexus type to determine if
# the switch is up
nexus_type = self._driver.get_nexus_type(switch_ip)
except Exception:
if state != const.SWITCH_INACTIVE:
LOG.error("Lost connection to switch ip "
"%(switch_ip)s", {'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_INACTIVE)
else:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
else:
if state == const.SWITCH_RESTORE_S2:
try:
self._mdriver.configure_next_batch_of_vlans(switch_ip)
except Exception as e:
LOG.error("Unexpected exception while replaying "
"entries for switch %(switch_ip)s, "
"Reason:%(reason)s ",
{'switch_ip': switch_ip, 'reason': e})
self._mdriver.register_switch_as_inactive(
switch_ip, 'replay next_vlan_batch')
continue
if state == const.SWITCH_INACTIVE:
self._configure_nexus_type(switch_ip, nexus_type)
LOG.info("Re-established connection to switch "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_RESTORE_S1)
self.replay_config(switch_ip)
# If replay failed, it stops trying to configure db entries
# and sets switch state to inactive so this caller knows
# it failed. If it did fail, we increment the
# retry counter else reset it to 0.
if self._mdriver.get_switch_ip_and_active_state(
switch_ip) == const.SWITCH_INACTIVE:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
LOG.warning("Replay config failed for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
else:
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.info("Replay config successful for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip}) | Check connection between OpenStack to Nexus device. | entailment |
def _load_nexus_cfg_driver(self):
"""Load Nexus Config driver.
:raises SystemExit of 1 if driver cannot be loaded
"""
try:
loaded_class = runtime_utils.load_class_by_alias_or_classname(
'networking_cisco.ml2.nexus_driver', 'restapi')
return loaded_class(CONF.ml2_cisco.nexus_switches)
except ImportError:
LOG.error("Error loading Nexus Config driver 'restapi'")
raise SystemExit(1) | Load Nexus Config driver.
:raises SystemExit of 1 if driver cannot be loaded | entailment |
def _switch_defined(self, switch_ip):
"""Verify this ip address is defined (for Nexus)."""
switch = cfg.CONF.ml2_cisco.nexus_switches.get(switch_ip)
if switch and switch.username and switch.password:
return True
else:
return False | Verify this ip address is defined (for Nexus). | entailment |
def _pop_vlan_range(self, switch_ip, size):
"""Extract a specific number of vlans from storage.
Purpose: Can only send a limited number of vlans
to Nexus at a time.
Sample Use Cases:
1) vlan_range is a list of vlans. If there is a
list 1000, 1001, 1002, thru 2000 and size is 6,
then the result is '1000-1005' and 1006 thru 2000
is pushed back into storage.
2) if the list is 1000, 1003, 1004, 1006 thru 2000
and size is 6, then the result is
'1000, 1003-1004, 1006-1008' and 1009 thru 2000
is pushed back into storage for next time.
"""
vlan_range = self._get_switch_vlan_range(switch_ip)
sized_range = ''
fr = 0
to = 0
# if vlan_range not empty and haven't met requested size
while size > 0 and vlan_range:
vlan_id, vni = vlan_range.pop(0)
size -= 1
if fr == 0 and to == 0:
fr = vlan_id
to = vlan_id
else:
diff = vlan_id - to
if diff == 1:
to = vlan_id
else:
if fr == to:
sized_range += str(to) + ','
else:
sized_range += str(fr) + '-'
sized_range += str(to) + ','
fr = vlan_id
to = vlan_id
if fr != 0:
if fr == to:
sized_range += str(to)
else:
sized_range += str(fr) + '-'
sized_range += str(to)
self._save_switch_vlan_range(switch_ip, vlan_range)
return sized_range | Extract a specific number of vlans from storage.
Purpose: Can only send a limited number of vlans
to Nexus at a time.
Sample Use Cases:
1) vlan_range is a list of vlans. If there is a
list 1000, 1001, 1002, thru 2000 and size is 6,
then the result is '1000-1005' and 1006 thru 2000
is pushed back into storage.
2) if the list is 1000, 1003, 1004, 1006 thru 2000
and size is 6, then the result is
'1000, 1003-1004, 1006-1008' and 1009 thru 2000
is pushed back into storage for next time. | entailment |
def get_all_switch_ips(self):
"""Using reserved switch binding get all switch ips."""
switch_connections = []
try:
bindings = nxos_db.get_reserved_switch_binding()
except excep.NexusPortBindingNotFound:
LOG.error("No switch bindings in the port data base")
bindings = []
for switch in bindings:
switch_connections.append(switch.switch_ip)
return switch_connections | Using reserved switch binding get all switch ips. | entailment |
def _get_baremetal_switch_info(self, link_info):
"""Get switch_info dictionary from context."""
try:
switch_info = link_info['switch_info']
if not isinstance(switch_info, dict):
switch_info = jsonutils.loads(switch_info)
except Exception as e:
LOG.error("switch_info can't be decoded: %(exp)s",
{"exp": e})
switch_info = {}
return switch_info | Get switch_info dictionary from context. | entailment |
def _supported_baremetal_transaction(self, context):
"""Verify transaction is complete and for us."""
port = context.current
if self.trunk.is_trunk_subport_baremetal(port):
return self._baremetal_set_binding(context)
if not nexus_help.is_baremetal(port):
return False
if bc.portbindings.PROFILE not in port:
return False
profile = port[bc.portbindings.PROFILE]
if 'local_link_information' not in profile:
return False
all_link_info = profile['local_link_information']
selected = False
for link_info in all_link_info:
if 'port_id' not in link_info:
return False
switch_info = self._get_baremetal_switch_info(
link_info)
if 'switch_ip' in switch_info:
switch_ip = switch_info['switch_ip']
else:
return False
if self._switch_defined(switch_ip):
selected = True
else:
LOG.warning("Skip switch %s. Not configured "
"in ini file" % switch_ip)
if not selected:
return False
selected = self._baremetal_set_binding(context, all_link_info)
if selected:
self._init_baremetal_trunk_interfaces(
context.current, context.top_bound_segment)
if self.trunk.is_trunk_parentport(port):
self.trunk.update_subports(port)
return selected | Verify transaction is complete and for us. | entailment |
def _get_baremetal_switches(self, port):
"""Get switch ip addresses from baremetal transaction.
This method is used to extract switch information
from the transaction where VNIC_TYPE is baremetal.
:param port: Received port transaction
:returns: list of all switches
:returns: list of only switches which are active
"""
all_switches = set()
active_switches = set()
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
switch_info = self._get_baremetal_switch_info(link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
all_switches.add(switch_ip)
if self.is_switch_active(switch_ip):
active_switches.add(switch_ip)
return list(all_switches), list(active_switches) | Get switch ip addresses from baremetal transaction.
This method is used to extract switch information
from the transaction where VNIC_TYPE is baremetal.
:param port: Received port transaction
:returns: list of all switches
:returns: list of only switches which are active | entailment |
def _get_baremetal_connections(self, port,
only_active_switch=False,
from_segment=False):
"""Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native
"""
connections = []
is_native = False if self.trunk.is_trunk_subport(port) else True
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
# Extract port info
intf_type, port = nexus_help.split_interface_name(
link_info['port_id'])
# Determine if this switch is to be skipped
switch_info = self._get_baremetal_switch_info(
link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
# Requested connections for only active switches
if (only_active_switch and
not self.is_switch_active(switch_ip)):
continue
ch_grp = 0
if not from_segment:
try:
reserved = nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(
intf_type, port))
if reserved[0].ch_grp > 0:
ch_grp = reserved[0].ch_grp
intf_type, port = nexus_help.split_interface_name(
'', ch_grp)
except excep.NexusHostMappingNotFound:
pass
connections.append((switch_ip, intf_type, port,
is_native, ch_grp))
return connections | Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native | entailment |
def _init_baremetal_trunk_interfaces(self, port_seg, segment):
"""Initialize baremetal switch interfaces and DB entry.
With baremetal transactions, the interfaces are not
known during initialization so they must be initialized
when the transactions are received.
* Reserved switch entries are added if needed.
* Reserved port entries are added.
* Determine if port channel is configured on the
interface and store it so we know to create a port-channel
binding instead of that defined in the transaction.
In this case, the RESERVED binding is the ethernet interface
with port-channel stored in channel-group field.
When this channel-group is not 0, we know to create a port binding
as a port-channel instead of interface ethernet.
"""
# interfaces list requiring switch initialization and
# reserved port and port_binding db entry creation
list_to_init = []
# interfaces list requiring reserved port and port_binding
# db entry creation
inactive_switch = []
connections = self._get_baremetal_connections(
port_seg, False, True)
for switch_ip, intf_type, port, is_native, _ in connections:
try:
nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(intf_type, port))
except excep.NexusHostMappingNotFound:
if self.is_switch_active(switch_ip):
# channel-group added later
list_to_init.append(
(switch_ip, intf_type, port, is_native, 0))
else:
inactive_switch.append(
(switch_ip, intf_type, port, is_native, 0))
# channel_group is appended to tuples in list_to_init
self.driver.initialize_baremetal_switch_interfaces(list_to_init)
host_id = port_seg.get('dns_name')
if host_id is None:
host_id = const.RESERVED_PORT_HOST_ID
# Add inactive list to list_to_init to create RESERVED
# port data base entries
list_to_init += inactive_switch
for switch_ip, intf_type, port, is_native, ch_grp in list_to_init:
nxos_db.add_host_mapping(
host_id,
switch_ip,
nexus_help.format_interface_name(intf_type, port),
ch_grp, False) | Initialize baremetal switch interfaces and DB entry.
With baremetal transactions, the interfaces are not
known during initialization so they must be initialized
when the transactions are received.
* Reserved switch entries are added if needed.
* Reserved port entries are added.
* Determine if port channel is configured on the
interface and store it so we know to create a port-channel
binding instead of that defined in the transaction.
In this case, the RESERVED binding is the ethernet interface
with port-channel stored in channel-group field.
When this channel-group is not 0, we know to create a port binding
as a port-channel instead of interface ethernet. | entailment |
def _get_host_switches(self, host_id):
"""Get switch IPs from configured host mapping.
This method is used to extract switch information
from transactions where VNIC_TYPE is normal.
Information is extracted from ini file which
is stored in _nexus_switches.
:param host_id: host_name from transaction
:returns: list of all switches
:returns: list of only switches which are active
"""
all_switches = set()
active_switches = set()
try:
host_list = nxos_db.get_host_mappings(host_id)
for mapping in host_list:
all_switches.add(mapping.switch_ip)
if self.is_switch_active(mapping.switch_ip):
active_switches.add(mapping.switch_ip)
except excep.NexusHostMappingNotFound:
pass
return list(all_switches), list(active_switches) | Get switch IPs from configured host mapping.
This method is used to extract switch information
from transactions where VNIC_TYPE is normal.
Information is extracted from ini file which
is stored in _nexus_switches.
:param host_id: host_name from transaction
:returns: list of all switches
:returns: list of only switches which are active | entailment |
def _get_host_connections(self, host_id,
only_active_switch=False):
"""Get switch IPs and interfaces from config host mapping.
This method is used to extract switch/interface
information from ini files when VNIC_TYPE is
normal. The ini files contain host to interface
mappings.
:param host_id: Host name from transaction
:param only_active_switch: Indicator for selecting only
connections for switches that are active
:returns: list of switch_ip, intf_type, port_id, is_native
"""
host_found = False
host_connections = []
try:
host_ifs = nxos_db.get_host_mappings(host_id)
except excep.NexusHostMappingNotFound:
host_ifs = []
for ifs in host_ifs:
host_found = True
if (only_active_switch and
not self.is_switch_active(ifs.switch_ip)):
continue
intf_type, port = nexus_help.split_interface_name(
ifs.if_id, ifs.ch_grp)
# is_native set to const.NOT_NATIVE for
# VNIC_TYPE of normal
host_connections.append((
ifs.switch_ip, intf_type, port,
const.NOT_NATIVE, ifs.ch_grp))
if not host_found:
LOG.warning(HOST_NOT_FOUND, host_id)
return host_connections | Get switch IPs and interfaces from config host mapping.
This method is used to extract switch/interface
information from ini files when VNIC_TYPE is
normal. The ini files contain host to interface
mappings.
:param host_id: Host name from transaction
:param only_active_switch: Indicator for selecting only
connections for switches that are active
:returns: list of switch_ip, intf_type, port_id, is_native | entailment |
def _get_switch_interfaces(self, requested_switch_ip, cfg_only=False):
"""Get switch interfaces from host mapping DB.
For a given switch, this returns all known port
interfaces for a given switch. These have been
learned from received baremetal transactions and
from configuration file.
:param requested_switch_ip: switch_ip
:returns: list of switch_ip, intf_type, port_id, is_native
"""
switch_ifs = []
try:
port_info = nxos_db.get_switch_host_mappings(
requested_switch_ip)
except excep.NexusHostMappingNotFound:
port_info = []
for binding in port_info:
if cfg_only and not binding.is_static:
continue
intf_type, port = nexus_help.split_interface_name(
binding.if_id)
switch_ifs.append(
(requested_switch_ip, intf_type, port,
const.NOT_NATIVE, binding.ch_grp))
return switch_ifs | Get switch interfaces from host mapping DB.
For a given switch, this returns all known port
interfaces for a given switch. These have been
learned from received baremetal transactions and
from configuration file.
:param requested_switch_ip: switch_ip
:returns: list of switch_ip, intf_type, port_id, is_native | entailment |
def _configure_nve_db(self, vni, device_id, mcast_group, host_id):
"""Create the nexus NVE database entry.
Called during update precommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if not nxos_db.get_nve_vni_member_bindings(vni, switch_ip,
device_id):
nxos_db.add_nexusnve_binding(vni, switch_ip, device_id,
mcast_group) | Create the nexus NVE database entry.
Called during update precommit port event. | entailment |
def _configure_nve_member(self, vni, device_id, mcast_group, host_id):
"""Add "member vni" configuration to the NVE interface.
Called during update postcommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
# If configured to set global VXLAN values then
# If this is the first database entry for this switch_ip
# then configure the "interface nve" entry on the switch.
if cfg.CONF.ml2_cisco.vxlan_global_config:
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
if len(nve_bindings) == 1:
LOG.debug("Nexus: create NVE interface")
loopback = self.get_nve_loopback(switch_ip)
self.driver.enable_vxlan_feature(switch_ip,
const.NVE_INT_NUM, loopback)
# If this is the first database entry for this (VNI, switch_ip)
# then configure the "member vni #" entry on the switch.
member_bindings = nxos_db.get_nve_vni_switch_bindings(vni,
switch_ip)
if len(member_bindings) == 1:
LOG.debug("Nexus: add member")
self.driver.create_nve_member(switch_ip, const.NVE_INT_NUM,
vni, mcast_group) | Add "member vni" configuration to the NVE interface.
Called during update postcommit port event. | entailment |
def _delete_nve_db(self, vni, device_id, mcast_group, host_id):
"""Delete the nexus NVE database entry.
Called during delete precommit port event.
"""
rows = nxos_db.get_nve_vni_deviceid_bindings(vni, device_id)
for row in rows:
nxos_db.remove_nexusnve_binding(vni, row.switch_ip, device_id) | Delete the nexus NVE database entry.
Called during delete precommit port event. | entailment |
def _delete_nve_member(self, vni, device_id, mcast_group, host_id):
"""Remove "member vni" configuration from the NVE interface.
Called during delete postcommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if not nxos_db.get_nve_vni_switch_bindings(vni, switch_ip):
self.driver.delete_nve_member(switch_ip,
const.NVE_INT_NUM, vni)
if (cfg.CONF.ml2_cisco.vxlan_global_config and
not nxos_db.get_nve_switch_bindings(switch_ip)):
self.driver.disable_vxlan_feature(switch_ip) | Remove "member vni" configuration from the NVE interface.
Called during delete postcommit port event. | entailment |
def _configure_nxos_db(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Create the nexus database entry.
Called during update precommit port event.
"""
connections = self._get_port_connections(port, host_id)
for switch_ip, intf_type, nexus_port, is_native, ch_grp in connections:
port_id = nexus_help.format_interface_name(
intf_type, nexus_port, ch_grp)
try:
nxos_db.get_nexusport_binding(port_id, vlan_id, switch_ip,
device_id)
except excep.NexusPortBindingNotFound:
nxos_db.add_nexusport_binding(port_id, str(vlan_id), str(vni),
switch_ip, device_id,
is_native) | Create the nexus database entry.
Called during update precommit port event. | entailment |
def _gather_config_parms(self, is_provider_vlan, vlan_id):
"""Collect auto_create, auto_trunk from config."""
if is_provider_vlan:
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
else:
auto_create = True
auto_trunk = True
return auto_create, auto_trunk | Collect auto_create, auto_trunk from config. | entailment |
def _configure_port_binding(self, is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port, vni):
"""Conditionally calls vlan and port Nexus drivers."""
# This implies VLAN, VNI, and Port are all duplicate.
# Then there is nothing to configure in Nexus.
if duplicate_type == const.DUPLICATE_PORT:
return
auto_create, auto_trunk = self._gather_config_parms(
is_provider_vlan, vlan_id)
# if type DUPLICATE_VLAN, don't create vlan
if duplicate_type == const.DUPLICATE_VLAN:
auto_create = False
if auto_create and auto_trunk:
LOG.debug("Nexus: create vlan %s and add to interface", vlan_id)
self.driver.create_and_trunk_vlan(
switch_ip, vlan_id, intf_type,
nexus_port, vni, is_native)
elif auto_create:
LOG.debug("Nexus: create vlan %s", vlan_id)
self.driver.create_vlan(switch_ip, vlan_id, vni)
elif auto_trunk:
LOG.debug("Nexus: trunk vlan %s", vlan_id)
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, vlan_id,
intf_type, nexus_port, is_native) | Conditionally calls vlan and port Nexus drivers. | entailment |
def _get_compressed_vlan_list(self, pvlan_ids):
"""Generate a compressed vlan list ready for XML using a vlan set.
Sample Use Case:
Input vlan set:
--------------
1 - s = set([11, 50, 25, 30, 15, 16, 3, 8, 2, 1])
2 - s = set([87, 11, 50, 25, 30, 15, 16, 3, 8, 2, 1, 88])
Returned compressed XML list:
----------------------------
1 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30', '50']
2 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30',
'50', '87-88']
"""
if not pvlan_ids:
return []
pvlan_list = list(pvlan_ids)
pvlan_list.sort()
compressed_list = []
begin = -1
prev_vlan = -1
for port_vlan in pvlan_list:
if prev_vlan == -1:
prev_vlan = port_vlan
else:
if (port_vlan - prev_vlan) == 1:
if begin == -1:
begin = prev_vlan
prev_vlan = port_vlan
else:
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%d-%d" % (begin, prev_vlan))
begin = -1
prev_vlan = port_vlan
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%s-%s" % (begin, prev_vlan))
return compressed_list | Generate a compressed vlan list ready for XML using a vlan set.
Sample Use Case:
Input vlan set:
--------------
1 - s = set([11, 50, 25, 30, 15, 16, 3, 8, 2, 1])
2 - s = set([87, 11, 50, 25, 30, 15, 16, 3, 8, 2, 1, 88])
Returned compressed XML list:
----------------------------
1 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30', '50']
2 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30',
'50', '87-88'] | entailment |
def _restore_port_binding(self,
switch_ip, pvlan_ids,
port, native_vlan):
"""Restores a set of vlans for a given port."""
intf_type, nexus_port = nexus_help.split_interface_name(port)
# If native_vlan is configured, this is isolated since
# two configs (native + trunk) must be sent for this vlan only.
if native_vlan != 0:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, native_vlan,
intf_type, nexus_port, True)
# If this is the only vlan
if len(pvlan_ids) == 1:
return
concat_vlans = ''
compressed_vlans = self._get_compressed_vlan_list(pvlan_ids)
for pvlan in compressed_vlans:
if concat_vlans == '':
concat_vlans = "%s" % pvlan
else:
concat_vlans += ",%s" % pvlan
# if string starts getting a bit long, send it.
if len(concat_vlans) >= const.CREATE_PORT_VLAN_LENGTH:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False)
concat_vlans = ''
# Send remaining vlans if any
if len(concat_vlans):
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False) | Restores a set of vlans for a given port. | entailment |
def _restore_vxlan_entries(self, switch_ip, vlans):
"""Restore vxlan entries on a Nexus switch."""
count = 1
conf_str = ''
vnsegment_sent = 0
path_str, conf_str = self.driver.start_create_vlan()
# At this time, this will only configure vni information when needed
while vnsegment_sent < const.CREATE_VLAN_BATCH and vlans:
vlan_id, vni = vlans.pop(0)
# Add it to the batch
conf_str = self.driver.get_create_vlan(
switch_ip, vlan_id, vni, conf_str)
# batch size has been met
if (count == const.CREATE_VLAN_SEND_SIZE):
conf_str = self.driver.end_create_vlan(conf_str)
self.driver.send_edit_string(switch_ip, path_str, conf_str)
vnsegment_sent += count
conf_str = ''
count = 1
else:
count += 1
# batch size was not met
if conf_str:
vnsegment_sent += count
conf_str = self.driver.end_create_vlan(conf_str)
self.driver.send_edit_string(switch_ip, path_str, conf_str)
conf_str = ''
LOG.debug("Switch %s VLAN vn-segment replay summary: %d",
switch_ip, vnsegment_sent) | Restore vxlan entries on a Nexus switch. | entailment |
def _configure_port_entries(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Create a nexus switch entry.
if needed, create a VLAN in the appropriate switch or port and
configure the appropriate interfaces for this VLAN.
Called during update postcommit port event.
"""
connections = self._get_active_port_connections(port, host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan created in this loop.
vlan_already_created = []
starttime = time.time()
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
try:
all_bindings = nxos_db.get_nexusvlan_binding(
vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warning("Switch %(switch_ip)s and Vlan "
"%(vlan_id)s not found in port binding "
"database. Skipping this update",
{'switch_ip': switch_ip, 'vlan_id': vlan_id})
continue
previous_bindings = [row for row in all_bindings
if row.instance_id != device_id]
if previous_bindings and (switch_ip in vlan_already_created):
duplicate_type = const.DUPLICATE_VLAN
else:
vlan_already_created.append(switch_ip)
duplicate_type = const.NO_DUPLICATE
port_starttime = time.time()
try:
self._configure_port_binding(
is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port,
vni)
except Exception:
with excutils.save_and_reraise_exception():
self.driver.capture_and_print_timeshot(
port_starttime, "port_configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
port_starttime, "port_config",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "config") | Create a nexus switch entry.
if needed, create a VLAN in the appropriate switch or port and
configure the appropriate interfaces for this VLAN.
Called during update postcommit port event. | entailment |
def configure_next_batch_of_vlans(self, switch_ip):
"""Get next batch of vlans and send them to Nexus."""
next_range = self._pop_vlan_range(
switch_ip, const.CREATE_VLAN_BATCH)
if next_range:
try:
self.driver.set_all_vlan_states(
switch_ip, next_range)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error encountered restoring vlans "
"for switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._save_switch_vlan_range(switch_ip, [])
vxlan_range = self._get_switch_vxlan_range(switch_ip)
if vxlan_range:
try:
self._restore_vxlan_entries(switch_ip, vxlan_range)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error encountered restoring vxlans "
"for switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._save_switch_vxlan_range(switch_ip, [])
# if no more vlans to restore, we're done. go active.
if (not self._get_switch_vlan_range(switch_ip) and
not self._get_switch_vxlan_range(switch_ip)):
self.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_ACTIVE)
LOG.info("Restore of Nexus switch "
"ip %(switch_ip)s is complete",
{'switch_ip': switch_ip})
else:
LOG.debug(("Restored batch of VLANS on "
"Nexus switch ip %(switch_ip)s"),
{'switch_ip': switch_ip}) | Get next batch of vlans and send them to Nexus. | entailment |
def configure_switch_entries(self, switch_ip, port_bindings):
"""Create a nexus switch entry in Nexus.
The port_bindings is sorted by vlan_id, vni, port_id.
When there is a change in vlan_id or vni, then vlan
data is configured in Nexus device.
Otherwise we check if there is a change in port_id
where we configure the port with vlan trunk config.
Called during switch replay event.
"""
prev_vlan = -1
prev_vni = -1
prev_port = None
prev_native_vlan = 0
starttime = time.time()
port_bindings.sort(key=lambda x: (x.port_id, x.vlan_id, x.vni))
self.driver.capture_and_print_timeshot(
starttime, "replay_t2_aft_sort",
switch=switch_ip)
# Let's make these lists a set to exclude duplicates
vlans = set()
pvlans = set()
interface_count = 0
duplicate_port = 0
vlan_count = 0
for port in port_bindings:
if nxos_db.is_reserved_binding(port):
continue
auto_create, auto_trunk = self._gather_config_parms(
nxos_db.is_provider_vlan(port.vlan_id), port.vlan_id)
if port.port_id == prev_port:
if port.vlan_id == prev_vlan and port.vni == prev_vni:
# Same port/Same Vlan - skip duplicate
duplicate_port += 1
continue
else:
# Same port/different Vlan - track it
vlan_count += 1
if auto_create:
vlans.add((port.vlan_id, port.vni))
if auto_trunk:
pvlans.add(port.vlan_id)
if port.is_native:
prev_native_vlan = port.vlan_id
else:
# Different port - write out interface trunk on previous port
if prev_port:
interface_count += 1
LOG.debug("Switch %s port %s replay summary: unique vlan "
"count %d, duplicate port entries %d",
switch_ip, prev_port, vlan_count, duplicate_port)
duplicate_port = 0
vlan_count = 0
if pvlans:
self._restore_port_binding(
switch_ip, pvlans, prev_port, prev_native_vlan)
pvlans.clear()
prev_native_vlan = 0
# Start tracking new port
if auto_create:
vlans.add((port.vlan_id, port.vni))
if auto_trunk:
pvlans.add(port.vlan_id)
prev_port = port.port_id
if port.is_native:
prev_native_vlan = port.vlan_id
if pvlans:
LOG.debug("Switch %s port %s replay summary: unique vlan "
"count %d, duplicate port entries %d",
switch_ip, port.port_id, vlan_count, duplicate_port)
self._restore_port_binding(
switch_ip, pvlans, prev_port, prev_native_vlan)
LOG.debug("Replayed total %d ports for Switch %s",
interface_count + 1, switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "replay_part_1",
switch=switch_ip)
vlans = list(vlans)
if vlans:
vlans.sort()
vlan, vni = vlans[0]
if vni == 0:
self._save_switch_vlan_range(switch_ip, vlans)
else:
self._save_switch_vxlan_range(switch_ip, vlans)
self.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_RESTORE_S2)
self.configure_next_batch_of_vlans(switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "replay_part_2",
switch=switch_ip) | Create a nexus switch entry in Nexus.
The port_bindings is sorted by vlan_id, vni, port_id.
When there is a change in vlan_id or vni, then vlan
data is configured in Nexus device.
Otherwise we check if there is a change in port_id
where we configure the port with vlan trunk config.
Called during switch replay event. | entailment |
def _delete_nxos_db(self, unused, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Delete the nexus database entry.
Called during delete precommit port event.
"""
try:
rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)
for row in rows:
nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,
row.vni, row.switch_ip, row.instance_id)
except excep.NexusPortBindingNotFound:
return | Delete the nexus database entry.
Called during delete precommit port event. | entailment |
def _delete_port_channel_resources(self, host_id, switch_ip,
intf_type, nexus_port, port_id):
'''This determines if port channel id needs to be freed.'''
# if this connection is not a port-channel, nothing to do.
if intf_type != 'port-channel':
return
# Check if this driver created it and its no longer needed.
try:
vpc = nxos_db.get_switch_vpc_alloc(
switch_ip, nexus_port)
except excep.NexusVPCAllocNotFound:
# This can occur for non-baremetal configured
# port-channels. Nothing more to do.
LOG.debug("Switch %s portchannel %s vpc entry not "
"found in vpcid alloc table.",
switch_ip, nexus_port)
return
# if this isn't one which was allocated or learned,
# don't do any further processing.
if not vpc.active:
LOG.debug("Switch %s portchannel %s vpc entry not "
"active.",
switch_ip, nexus_port)
return
# Is this port-channel still in use?
# If so, nothing more to do.
try:
nxos_db.get_nexus_switchport_binding(port_id, switch_ip)
LOG.debug("Switch %s portchannel %s port entries "
"in use. Skipping port-channel clean-up.",
switch_ip, nexus_port)
return
except excep.NexusPortBindingNotFound:
pass
# need to get ethernet interface name
try:
mapping = nxos_db.get_switch_and_host_mappings(
host_id, switch_ip)
eth_type, eth_port = nexus_help.split_interface_name(
mapping[0].if_id)
except excep.NexusHostMappingNotFound:
LOG.warning("Switch %s hostid %s host_mapping not "
"found. Skipping port-channel clean-up.",
switch_ip, host_id)
return
# Remove the channel group from ethernet interface
# and remove port channel from this switch.
if not vpc.learned:
self.driver.delete_ch_grp_to_interface(
switch_ip, eth_type, eth_port,
nexus_port)
self.driver.delete_port_channel(switch_ip,
nexus_port)
try:
nxos_db.free_vpcid_for_switch(nexus_port, switch_ip)
LOG.info("Released portchannel %s resources for "
"switch %s",
nexus_port, switch_ip)
except excep.NexusVPCAllocNotFound:
# Not all learned port channels will be in this db when
# they're outside the configured vpc_pool so
# this exception may be possible.
LOG.warning("Failed to free vpcid %s for switch %s "
"since it did not exist in table.",
nexus_port, switch_ip) | This determines if port channel id needs to be freed. | entailment |
def _delete_switch_entry(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Delete the nexus switch entry.
By accessing the current db entries determine if switch
configuration can be removed.
Called during delete postcommit port event.
"""
connections = self._get_active_port_connections(port, host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan removed in this loop.
vlan_already_removed = []
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
# if there are no remaining db entries using this vlan on this
# nexus switch port then remove vlan from the switchport trunk.
port_id = nexus_help.format_interface_name(intf_type, nexus_port)
auto_create = True
auto_trunk = True
if is_provider_vlan:
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
try:
nxos_db.get_port_vlan_switch_binding(port_id, vlan_id,
switch_ip)
except excep.NexusPortBindingNotFound:
pass
else:
continue
if auto_trunk:
self.driver.disable_vlan_on_trunk_int(
switch_ip, vlan_id, intf_type, nexus_port,
is_native)
# if there are no remaining db entries using this vlan on this
# nexus switch then remove the vlan.
if auto_create:
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
# Do not perform a second time on same switch
if switch_ip not in vlan_already_removed:
self.driver.delete_vlan(switch_ip, vlan_id)
vlan_already_removed.append(switch_ip)
self._delete_port_channel_resources(
host_id, switch_ip, intf_type, nexus_port, port_id)
if nexus_help.is_baremetal(port):
connections = self._get_baremetal_connections(
port, False, True)
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
if_id = nexus_help.format_interface_name(
intf_type, nexus_port)
try:
mapping = nxos_db.get_switch_if_host_mappings(
switch_ip, if_id)
ch_grp = mapping[0].ch_grp
except excep.NexusHostMappingNotFound:
ch_grp = 0
bind_port_id = nexus_help.format_interface_name(
intf_type, nexus_port, ch_grp)
binding = nxos_db.get_port_switch_bindings(
bind_port_id,
switch_ip)
if not binding:
nxos_db.remove_host_mapping(if_id, switch_ip) | Delete the nexus switch entry.
By accessing the current db entries determine if switch
configuration can be removed.
Called during delete postcommit port event. | entailment |
def _port_action_vlan(self, port, segment, func, vni):
"""Verify configuration and then process event."""
# Verify segment.
if not self._is_valid_segment(segment):
return
device_id = self._get_port_uuid(port)
if nexus_help.is_baremetal(port):
host_id = port.get('dns_name')
else:
host_id = port.get(bc.portbindings.HOST_ID)
vlan_id = segment.get(api.SEGMENTATION_ID)
is_provider = nxos_db.is_provider_vlan(vlan_id)
settings = {"vlan_id": vlan_id,
"device_id": device_id,
"host_id": host_id}
missing_fields = [field for field, value in settings.items()
if (field != 'host_id' and not value)]
if not missing_fields:
func(port, vlan_id, device_id, host_id, vni, is_provider)
else:
raise excep.NexusMissingRequiredFields(
fields=' '.join(missing_fields)) | Verify configuration and then process event. | entailment |
def _port_action_vxlan(self, port, segment, func):
"""Verify configuration and then process event."""
# If the segment is None, just log a warning message and return.
if segment is None:
self._log_missing_segment()
return
device_id = port.get('device_id')
mcast_group = segment.get(api.PHYSICAL_NETWORK)
host_id = port.get(bc.portbindings.HOST_ID)
vni = segment.get(api.SEGMENTATION_ID)
if vni and device_id and mcast_group and host_id:
func(vni, device_id, mcast_group, host_id)
return vni
else:
fields = "vni " if not vni else ""
fields += "device_id " if not device_id else ""
fields += "mcast_group " if not mcast_group else ""
fields += "host_id" if not host_id else ""
raise excep.NexusMissingRequiredFields(fields=fields) | Verify configuration and then process event. | entailment |
def create_port_postcommit(self, context):
"""Create port non-database commit event."""
# No new events are handled until replay
# thread has put the switch in active state.
# If a switch is in active state, verify
# the switch is still in active state
# before accepting this new event.
#
# If create_port_postcommit fails, it causes
# other openstack dbs to be cleared and
# retries for new VMs will stop. Subnet
# transactions will continue to be retried.
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
# Verify segment.
if not self._is_valid_segment(vlan_segment):
return
port = context.current
if self._is_supported_deviceowner(port):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# Verify switch is still up before replay
# thread checks.
verified_active_switches = []
for switch_ip in active_switches:
try:
self.driver.get_nexus_type(switch_ip)
verified_active_switches.append(switch_ip)
except Exception as e:
LOG.error("Failed to ping "
"switch ip %(switch_ip)s error %(exp_err)s",
{'switch_ip': switch_ip, 'exp_err': e})
LOG.debug("Create Stats: thread %(thid)d, "
"all_switches %(all)d, "
"active %(active)d, verified %(verify)d",
{'thid': threading.current_thread().ident,
'all': len(all_switches),
'active': len(active_switches),
'verify': len(verified_active_switches)})
# if host_id is valid and there is no active
# switches remaining
if all_switches and not verified_active_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Create Failed: Port event can not "
"be processed at this time.") | Create port non-database commit event. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.